2 * Dynamic function tracer architecture backend.
4 * Copyright IBM Corp. 2009
6 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
10 #include <linux/hardirq.h>
11 #include <linux/uaccess.h>
12 #include <linux/ftrace.h>
13 #include <linux/kernel.h>
14 #include <linux/types.h>
15 #include <trace/syscall.h>
16 #include <asm/lowcore.h>
18 #ifdef CONFIG_DYNAMIC_FTRACE
20 void ftrace_disable_code(void);
21 void ftrace_disable_return(void);
22 void ftrace_call_code(void);
23 void ftrace_nop_code(void);
25 #define FTRACE_INSN_SIZE 4
31 "ftrace_disable_code:\n"
34 " lg %r1,"__stringify(__LC_FTRACE_FUNC)"\n"
36 "ftrace_disable_return:\n"
44 " j .+"__stringify(MCOUNT_INSN_SIZE)"\n");
49 " stg %r14,8(%r15)\n");
51 #else /* CONFIG_64BIT */
55 "ftrace_disable_code:\n"
57 " l %r1,"__stringify(__LC_FTRACE_FUNC)"\n"
59 "ftrace_disable_return:\n"
73 " j .+"__stringify(MCOUNT_INSN_SIZE)"\n");
78 " st %r14,4(%r15)\n");
80 #endif /* CONFIG_64BIT */
82 static int ftrace_modify_code(unsigned long ip,
83 void *old_code, int old_size,
84 void *new_code, int new_size)
86 unsigned char replaced[MCOUNT_INSN_SIZE];
89 * Note: Due to modules code can disappear and change.
90 * We need to protect against faulting as well as code
91 * changing. We do this by using the probe_kernel_*
93 * This however is just a simple sanity check.
95 if (probe_kernel_read(replaced, (void *)ip, old_size))
97 if (memcmp(replaced, old_code, old_size) != 0)
99 if (probe_kernel_write((void *)ip, new_code, new_size))
104 static int ftrace_make_initial_nop(struct module *mod, struct dyn_ftrace *rec,
107 return ftrace_modify_code(rec->ip,
108 ftrace_call_code, FTRACE_INSN_SIZE,
109 ftrace_disable_code, MCOUNT_INSN_SIZE);
112 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
115 if (addr == MCOUNT_ADDR)
116 return ftrace_make_initial_nop(mod, rec, addr);
117 return ftrace_modify_code(rec->ip,
118 ftrace_call_code, FTRACE_INSN_SIZE,
119 ftrace_nop_code, FTRACE_INSN_SIZE);
122 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
124 return ftrace_modify_code(rec->ip,
125 ftrace_nop_code, FTRACE_INSN_SIZE,
126 ftrace_call_code, FTRACE_INSN_SIZE);
129 int ftrace_update_ftrace_func(ftrace_func_t func)
131 ftrace_dyn_func = (unsigned long)func;
135 int __init ftrace_dyn_arch_init(void *data)
137 *(unsigned long *)data = 0;
141 #endif /* CONFIG_DYNAMIC_FTRACE */
143 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
144 #ifdef CONFIG_DYNAMIC_FTRACE
146 * Patch the kernel code at ftrace_graph_caller location:
147 * The instruction there is branch relative on condition. The condition mask
148 * is either all ones (always branch aka disable ftrace_graph_caller) or all
149 * zeroes (nop aka enable ftrace_graph_caller).
150 * Instruction format for brc is a7m4xxxx where m is the condition mask.
152 int ftrace_enable_ftrace_graph_caller(void)
154 unsigned short opcode = 0xa704;
156 return probe_kernel_write(ftrace_graph_caller, &opcode, sizeof(opcode));
159 int ftrace_disable_ftrace_graph_caller(void)
161 unsigned short opcode = 0xa7f4;
163 return probe_kernel_write(ftrace_graph_caller, &opcode, sizeof(opcode));
166 static inline unsigned long ftrace_mcount_call_adjust(unsigned long addr)
168 return addr - (ftrace_disable_return - ftrace_disable_code);
171 #else /* CONFIG_DYNAMIC_FTRACE */
173 static inline unsigned long ftrace_mcount_call_adjust(unsigned long addr)
175 return addr - MCOUNT_OFFSET_RET;
178 #endif /* CONFIG_DYNAMIC_FTRACE */
181 * Hook the return address and push it in the stack of return addresses
182 * in current thread info.
184 unsigned long prepare_ftrace_return(unsigned long ip, unsigned long parent)
186 struct ftrace_graph_ent trace;
188 /* Nmi's are currently unsupported. */
189 if (unlikely(in_nmi()))
191 if (unlikely(atomic_read(¤t->tracing_graph_pause)))
193 if (ftrace_push_return_trace(parent, ip, &trace.depth) == -EBUSY)
195 trace.func = ftrace_mcount_call_adjust(ip) & PSW_ADDR_INSN;
196 /* Only trace if the calling function expects to. */
197 if (!ftrace_graph_entry(&trace)) {
198 current->curr_ret_stack--;
201 parent = (unsigned long)return_to_handler;
205 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
207 #ifdef CONFIG_FTRACE_SYSCALLS
209 extern unsigned long __start_syscalls_metadata[];
210 extern unsigned long __stop_syscalls_metadata[];
211 extern unsigned int sys_call_table[];
213 static struct syscall_metadata **syscalls_metadata;
215 struct syscall_metadata *syscall_nr_to_meta(int nr)
217 if (!syscalls_metadata || nr >= NR_syscalls || nr < 0)
220 return syscalls_metadata[nr];
223 static struct syscall_metadata *find_syscall_meta(unsigned long syscall)
225 struct syscall_metadata *start;
226 struct syscall_metadata *stop;
227 char str[KSYM_SYMBOL_LEN];
229 start = (struct syscall_metadata *)__start_syscalls_metadata;
230 stop = (struct syscall_metadata *)__stop_syscalls_metadata;
231 kallsyms_lookup(syscall, NULL, NULL, NULL, str);
233 for ( ; start < stop; start++) {
234 if (start->name && !strcmp(start->name + 3, str + 3))
240 void arch_init_ftrace_syscalls(void)
242 struct syscall_metadata *meta;
244 static atomic_t refs;
246 if (atomic_inc_return(&refs) != 1)
248 syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) * NR_syscalls,
250 if (!syscalls_metadata)
252 for (i = 0; i < NR_syscalls; i++) {
253 meta = find_syscall_meta((unsigned long)sys_call_table[i]);
254 syscalls_metadata[i] = meta;