2 * Code for replacing ftrace calls with jumps.
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6 * Thanks goes out to P.A. Semi, Inc for supplying me with a PPC64 box.
8 * Added function graph tracer code, taken from x86 that was written
9 * by Frederic Weisbecker, and ported to PPC by Steven Rostedt.
13 #include <linux/spinlock.h>
14 #include <linux/hardirq.h>
15 #include <linux/uaccess.h>
16 #include <linux/module.h>
17 #include <linux/ftrace.h>
18 #include <linux/percpu.h>
19 #include <linux/init.h>
20 #include <linux/list.h>
22 #include <asm/cacheflush.h>
23 #include <asm/code-patching.h>
24 #include <asm/ftrace.h>
27 # define GET_ADDR(addr) addr
29 /* PowerPC64's functions are data that points to the functions */
30 # define GET_ADDR(addr) (*(unsigned long *)addr)
33 #ifdef CONFIG_DYNAMIC_FTRACE
34 static unsigned int ftrace_nop_replace(void)
40 ftrace_call_replace(unsigned long ip, unsigned long addr, int link)
44 addr = GET_ADDR(addr);
46 /* if (link) set op to 'bl' else 'b' */
47 op = create_branch((unsigned int *)ip, addr, link ? 1 : 0);
53 # define _ASM_ALIGN " .align 3 "
54 # define _ASM_PTR " .llong "
56 # define _ASM_ALIGN " .align 2 "
57 # define _ASM_PTR " .long "
61 ftrace_modify_code(unsigned long ip, unsigned int old, unsigned int new)
63 unsigned int replaced;
66 * Note: Due to modules and __init, code can
67 * disappear and change, we need to protect against faulting
68 * as well as code changing. We do this by using the
69 * probe_kernel_* functions.
71 * No real locking needed, this code is run through
72 * kstop_machine, or before SMP starts.
75 /* read the text we want to modify */
76 if (probe_kernel_read(&replaced, (void *)ip, MCOUNT_INSN_SIZE))
79 /* Make sure it is what we expect it to be */
83 /* replace the text with the new text */
84 if (probe_kernel_write((void *)ip, &new, MCOUNT_INSN_SIZE))
87 flush_icache_range(ip, ip + 8);
93 * Helper functions that are the same for both PPC64 and PPC32.
95 static int test_24bit_addr(unsigned long ip, unsigned long addr)
98 /* use the create_branch to verify that this offset can be branched */
99 return create_branch((unsigned int *)ip, addr, 0);
102 #ifdef CONFIG_MODULES
104 static int is_bl_op(unsigned int op)
106 return (op & 0xfc000003) == 0x48000001;
109 static unsigned long find_bl_target(unsigned long ip, unsigned int op)
113 offset = (op & 0x03fffffc);
115 if (offset & 0x02000000)
116 offset |= 0xfe000000;
118 return ip + (long)offset;
123 __ftrace_make_nop(struct module *mod,
124 struct dyn_ftrace *rec, unsigned long addr)
129 unsigned long ip = rec->ip;
133 /* read where this goes */
134 if (probe_kernel_read(&op, (void *)ip, sizeof(int)))
137 /* Make sure that that this is still a 24bit jump */
139 printk(KERN_ERR "Not expected bl: opcode is %x\n", op);
143 /* lets find where the pointer goes */
144 tramp = find_bl_target(ip, op);
147 * On PPC64 the trampoline looks like:
148 * 0x3d, 0x82, 0x00, 0x00, addis r12,r2, <high>
149 * 0x39, 0x8c, 0x00, 0x00, addi r12,r12, <low>
150 * Where the bytes 2,3,6 and 7 make up the 32bit offset
151 * to the TOC that holds the pointer.
153 * 0xf8, 0x41, 0x00, 0x28, std r2,40(r1)
154 * 0xe9, 0x6c, 0x00, 0x20, ld r11,32(r12)
155 * The actually address is 32 bytes from the offset
157 * 0xe8, 0x4c, 0x00, 0x28, ld r2,40(r12)
160 pr_devel("ip:%lx jumps to %lx r2: %lx", ip, tramp, mod->arch.toc);
162 /* Find where the trampoline jumps to */
163 if (probe_kernel_read(jmp, (void *)tramp, sizeof(jmp))) {
164 printk(KERN_ERR "Failed to read %lx\n", tramp);
168 pr_devel(" %08x %08x", jmp[0], jmp[1]);
170 /* verify that this is what we expect it to be */
171 if (((jmp[0] & 0xffff0000) != 0x3d820000) ||
172 ((jmp[1] & 0xffff0000) != 0x398c0000) ||
173 (jmp[2] != 0xf8410028) ||
174 (jmp[3] != 0xe96c0020) ||
175 (jmp[4] != 0xe84c0028)) {
176 printk(KERN_ERR "Not a trampoline\n");
180 /* The bottom half is signed extended */
181 offset = ((unsigned)((unsigned short)jmp[0]) << 16) +
182 (int)((short)jmp[1]);
184 pr_devel(" %x ", offset);
186 /* get the address this jumps too */
187 tramp = mod->arch.toc + offset + 32;
188 pr_devel("toc: %lx", tramp);
190 if (probe_kernel_read(jmp, (void *)tramp, 8)) {
191 printk(KERN_ERR "Failed to read %lx\n", tramp);
195 pr_devel(" %08x %08x\n", jmp[0], jmp[1]);
197 ptr = ((unsigned long)jmp[0] << 32) + jmp[1];
199 /* This should match what was called */
200 if (ptr != GET_ADDR(addr)) {
201 printk(KERN_ERR "addr does not match %lx\n", ptr);
206 * We want to nop the line, but the next line is
207 * 0xe8, 0x41, 0x00, 0x28 ld r2,40(r1)
208 * This needs to be turned to a nop too.
210 if (probe_kernel_read(&op, (void *)(ip+4), MCOUNT_INSN_SIZE))
213 if (op != 0xe8410028) {
214 printk(KERN_ERR "Next line is not ld! (%08x)\n", op);
219 * Milton Miller pointed out that we can not blindly do nops.
220 * If a task was preempted when calling a trace function,
221 * the nops will remove the way to restore the TOC in r2
222 * and the r2 TOC will get corrupted.
227 * bl <tramp> <==== will be replaced with "b 1f"
231 op = 0x48000008; /* b +8 */
233 if (probe_kernel_write((void *)ip, &op, MCOUNT_INSN_SIZE))
237 flush_icache_range(ip, ip + 8);
244 __ftrace_make_nop(struct module *mod,
245 struct dyn_ftrace *rec, unsigned long addr)
249 unsigned long ip = rec->ip;
252 if (probe_kernel_read(&op, (void *)ip, MCOUNT_INSN_SIZE))
255 /* Make sure that that this is still a 24bit jump */
257 printk(KERN_ERR "Not expected bl: opcode is %x\n", op);
261 /* lets find where the pointer goes */
262 tramp = find_bl_target(ip, op);
265 * On PPC32 the trampoline looks like:
266 * 0x3d, 0x60, 0x00, 0x00 lis r11,sym@ha
267 * 0x39, 0x6b, 0x00, 0x00 addi r11,r11,sym@l
268 * 0x7d, 0x69, 0x03, 0xa6 mtctr r11
269 * 0x4e, 0x80, 0x04, 0x20 bctr
272 pr_devel("ip:%lx jumps to %lx", ip, tramp);
274 /* Find where the trampoline jumps to */
275 if (probe_kernel_read(jmp, (void *)tramp, sizeof(jmp))) {
276 printk(KERN_ERR "Failed to read %lx\n", tramp);
280 pr_devel(" %08x %08x ", jmp[0], jmp[1]);
282 /* verify that this is what we expect it to be */
283 if (((jmp[0] & 0xffff0000) != 0x3d600000) ||
284 ((jmp[1] & 0xffff0000) != 0x396b0000) ||
285 (jmp[2] != 0x7d6903a6) ||
286 (jmp[3] != 0x4e800420)) {
287 printk(KERN_ERR "Not a trampoline\n");
291 tramp = (jmp[1] & 0xffff) |
292 ((jmp[0] & 0xffff) << 16);
296 pr_devel(" %lx ", tramp);
300 "Trampoline location %08lx does not match addr\n",
307 if (probe_kernel_write((void *)ip, &op, MCOUNT_INSN_SIZE))
310 flush_icache_range(ip, ip + 8);
315 #endif /* CONFIG_MODULES */
317 int ftrace_make_nop(struct module *mod,
318 struct dyn_ftrace *rec, unsigned long addr)
320 unsigned long ip = rec->ip;
321 unsigned int old, new;
324 * If the calling address is more that 24 bits away,
325 * then we had to use a trampoline to make the call.
326 * Otherwise just update the call site.
328 if (test_24bit_addr(ip, addr)) {
330 old = ftrace_call_replace(ip, addr, 1);
331 new = ftrace_nop_replace();
332 return ftrace_modify_code(ip, old, new);
335 #ifdef CONFIG_MODULES
337 * Out of range jumps are called from modules.
338 * We should either already have a pointer to the module
339 * or it has been passed in.
341 if (!rec->arch.mod) {
343 printk(KERN_ERR "No module loaded addr=%lx\n",
349 if (mod != rec->arch.mod) {
351 "Record mod %p not equal to passed in mod %p\n",
355 /* nothing to do if mod == rec->arch.mod */
359 return __ftrace_make_nop(mod, rec, addr);
361 /* We should not get here without modules */
363 #endif /* CONFIG_MODULES */
366 #ifdef CONFIG_MODULES
369 __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
372 unsigned long ip = rec->ip;
374 /* read where this goes */
375 if (probe_kernel_read(op, (void *)ip, MCOUNT_INSN_SIZE * 2))
379 * It should be pointing to two nops or
382 if (((op[0] != 0x48000008) || (op[1] != 0xe8410028)) &&
383 ((op[0] != PPC_INST_NOP) || (op[1] != PPC_INST_NOP))) {
384 printk(KERN_ERR "Expected NOPs but have %x %x\n", op[0], op[1]);
388 /* If we never set up a trampoline to ftrace_caller, then bail */
389 if (!rec->arch.mod->arch.tramp) {
390 printk(KERN_ERR "No ftrace trampoline\n");
394 /* create the branch to the trampoline */
395 op[0] = create_branch((unsigned int *)ip,
396 rec->arch.mod->arch.tramp, BRANCH_SET_LINK);
398 printk(KERN_ERR "REL24 out of range!\n");
405 pr_devel("write to %lx\n", rec->ip);
407 if (probe_kernel_write((void *)ip, op, MCOUNT_INSN_SIZE * 2))
410 flush_icache_range(ip, ip + 8);
416 __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
419 unsigned long ip = rec->ip;
421 /* read where this goes */
422 if (probe_kernel_read(&op, (void *)ip, MCOUNT_INSN_SIZE))
425 /* It should be pointing to a nop */
426 if (op != PPC_INST_NOP) {
427 printk(KERN_ERR "Expected NOP but have %x\n", op);
431 /* If we never set up a trampoline to ftrace_caller, then bail */
432 if (!rec->arch.mod->arch.tramp) {
433 printk(KERN_ERR "No ftrace trampoline\n");
437 /* create the branch to the trampoline */
438 op = create_branch((unsigned int *)ip,
439 rec->arch.mod->arch.tramp, BRANCH_SET_LINK);
441 printk(KERN_ERR "REL24 out of range!\n");
445 pr_devel("write to %lx\n", rec->ip);
447 if (probe_kernel_write((void *)ip, &op, MCOUNT_INSN_SIZE))
450 flush_icache_range(ip, ip + 8);
454 #endif /* CONFIG_PPC64 */
455 #endif /* CONFIG_MODULES */
457 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
459 unsigned long ip = rec->ip;
460 unsigned int old, new;
463 * If the calling address is more that 24 bits away,
464 * then we had to use a trampoline to make the call.
465 * Otherwise just update the call site.
467 if (test_24bit_addr(ip, addr)) {
469 old = ftrace_nop_replace();
470 new = ftrace_call_replace(ip, addr, 1);
471 return ftrace_modify_code(ip, old, new);
474 #ifdef CONFIG_MODULES
476 * Out of range jumps are called from modules.
477 * Being that we are converting from nop, it had better
478 * already have a module defined.
480 if (!rec->arch.mod) {
481 printk(KERN_ERR "No module loaded\n");
485 return __ftrace_make_call(rec, addr);
487 /* We should not get here without modules */
489 #endif /* CONFIG_MODULES */
492 int ftrace_update_ftrace_func(ftrace_func_t func)
494 unsigned long ip = (unsigned long)(&ftrace_call);
495 unsigned int old, new;
498 old = *(unsigned int *)&ftrace_call;
499 new = ftrace_call_replace(ip, (unsigned long)func, 1);
500 ret = ftrace_modify_code(ip, old, new);
505 int __init ftrace_dyn_arch_init(void *data)
507 /* caller expects data to be zero */
508 unsigned long *p = data;
514 #endif /* CONFIG_DYNAMIC_FTRACE */
516 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
518 #ifdef CONFIG_DYNAMIC_FTRACE
519 extern void ftrace_graph_call(void);
520 extern void ftrace_graph_stub(void);
522 int ftrace_enable_ftrace_graph_caller(void)
524 unsigned long ip = (unsigned long)(&ftrace_graph_call);
525 unsigned long addr = (unsigned long)(&ftrace_graph_caller);
526 unsigned long stub = (unsigned long)(&ftrace_graph_stub);
527 unsigned int old, new;
529 old = ftrace_call_replace(ip, stub, 0);
530 new = ftrace_call_replace(ip, addr, 0);
532 return ftrace_modify_code(ip, old, new);
535 int ftrace_disable_ftrace_graph_caller(void)
537 unsigned long ip = (unsigned long)(&ftrace_graph_call);
538 unsigned long addr = (unsigned long)(&ftrace_graph_caller);
539 unsigned long stub = (unsigned long)(&ftrace_graph_stub);
540 unsigned int old, new;
542 old = ftrace_call_replace(ip, addr, 0);
543 new = ftrace_call_replace(ip, stub, 0);
545 return ftrace_modify_code(ip, old, new);
547 #endif /* CONFIG_DYNAMIC_FTRACE */
550 extern void mod_return_to_handler(void);
554 * Hook the return address and push it in the stack of return addrs
555 * in current thread info.
557 void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
561 struct ftrace_graph_ent trace;
562 unsigned long return_hooker = (unsigned long)&return_to_handler;
564 if (unlikely(atomic_read(¤t->tracing_graph_pause)))
568 /* non core kernel code needs to save and restore the TOC */
569 if (REGION_ID(self_addr) != KERNEL_REGION_ID)
570 return_hooker = (unsigned long)&mod_return_to_handler;
573 return_hooker = GET_ADDR(return_hooker);
576 * Protect against fault, even if it shouldn't
577 * happen. This tool is too much intrusive to
578 * ignore such a protection.
581 "1: " PPC_LL "%[old], 0(%[parent])\n"
582 "2: " PPC_STL "%[return_hooker], 0(%[parent])\n"
583 " li %[faulted], 0\n"
586 ".section .fixup, \"ax\"\n"
587 "4: li %[faulted], 1\n"
591 ".section __ex_table,\"a\"\n"
597 : [old] "=&r" (old), [faulted] "=r" (faulted)
598 : [parent] "r" (parent), [return_hooker] "r" (return_hooker)
602 if (unlikely(faulted)) {
608 if (ftrace_push_return_trace(old, self_addr, &trace.depth) == -EBUSY) {
613 trace.func = self_addr;
615 /* Only trace if the calling function expects to */
616 if (!ftrace_graph_entry(&trace)) {
617 current->curr_ret_stack--;
621 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */