Commit | Line | Data |
---|---|---|
3d083395 SR |
1 | /* |
2 | * Code for replacing ftrace calls with jumps. | |
3 | * | |
4 | * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> | |
5 | * | |
6 | * Thanks goes to Ingo Molnar, for suggesting the idea. | |
7 | * Mathieu Desnoyers, for suggesting postponing the modifications. | |
8 | * Arjan van de Ven, for keeping me straight, and explaining to me | |
9 | * the dangers of modifying code on the run. | |
10 | */ | |
11 | ||
12 | #include <linux/spinlock.h> | |
13 | #include <linux/hardirq.h> | |
6f93fc07 | 14 | #include <linux/uaccess.h> |
3d083395 SR |
15 | #include <linux/ftrace.h> |
16 | #include <linux/percpu.h> | |
19b3e967 | 17 | #include <linux/sched.h> |
3d083395 SR |
18 | #include <linux/init.h> |
19 | #include <linux/list.h> | |
20 | ||
16239630 | 21 | #include <asm/cacheflush.h> |
395a59d0 | 22 | #include <asm/ftrace.h> |
732f3ca7 | 23 | #include <asm/nops.h> |
caf4b323 | 24 | #include <asm/nmi.h> |
3d083395 | 25 | |
3d083395 | 26 | |
caf4b323 | 27 | #ifdef CONFIG_DYNAMIC_FTRACE |
3d083395 | 28 | |
16239630 SR |
29 | int ftrace_arch_code_modify_prepare(void) |
30 | { | |
31 | set_kernel_text_rw(); | |
32 | return 0; | |
33 | } | |
34 | ||
35 | int ftrace_arch_code_modify_post_process(void) | |
36 | { | |
37 | set_kernel_text_ro(); | |
38 | return 0; | |
39 | } | |
40 | ||
3d083395 | 41 | union ftrace_code_union { |
395a59d0 | 42 | char code[MCOUNT_INSN_SIZE]; |
3d083395 SR |
43 | struct { |
44 | char e8; | |
45 | int offset; | |
46 | } __attribute__((packed)); | |
47 | }; | |
48 | ||
15adc048 | 49 | static int ftrace_calc_offset(long ip, long addr) |
3c1720f0 SR |
50 | { |
51 | return (int)(addr - ip); | |
52 | } | |
3d083395 | 53 | |
31e88909 | 54 | static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) |
3c1720f0 SR |
55 | { |
56 | static union ftrace_code_union calc; | |
3d083395 | 57 | |
3c1720f0 | 58 | calc.e8 = 0xe8; |
395a59d0 | 59 | calc.offset = ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr); |
3c1720f0 SR |
60 | |
61 | /* | |
62 | * No locking needed, this must be called via kstop_machine | |
63 | * which in essence is like running on a uniprocessor machine. | |
64 | */ | |
65 | return calc.code; | |
3d083395 SR |
66 | } |
67 | ||
17666f02 SR |
68 | /* |
69 | * Modifying code must take extra care. On an SMP machine, if | |
70 | * the code being modified is also being executed on another CPU | |
71 | * that CPU will have undefined results and possibly take a GPF. | |
72 | * We use kstop_machine to stop other CPUS from exectuing code. | |
73 | * But this does not stop NMIs from happening. We still need | |
74 | * to protect against that. We separate out the modification of | |
75 | * the code to take care of this. | |
76 | * | |
77 | * Two buffers are added: An IP buffer and a "code" buffer. | |
78 | * | |
a26a2a27 | 79 | * 1) Put the instruction pointer into the IP buffer |
17666f02 | 80 | * and the new code into the "code" buffer. |
e9d9df44 LJ |
81 | * 2) Wait for any running NMIs to finish and set a flag that says |
82 | * we are modifying code, it is done in an atomic operation. | |
83 | * 3) Write the code | |
84 | * 4) clear the flag. | |
85 | * 5) Wait for any running NMIs to finish. | |
17666f02 SR |
86 | * |
87 | * If an NMI is executed, the first thing it does is to call | |
88 | * "ftrace_nmi_enter". This will check if the flag is set to write | |
89 | * and if it is, it will write what is in the IP and "code" buffers. | |
90 | * | |
91 | * The trick is, it does not matter if everyone is writing the same | |
92 | * content to the code location. Also, if a CPU is executing code | |
93 | * it is OK to write to that code location if the contents being written | |
94 | * are the same as what exists. | |
95 | */ | |
96 | ||
e9d9df44 | 97 | #define MOD_CODE_WRITE_FLAG (1 << 31) /* set when NMI should do the write */ |
4e6ea144 | 98 | static atomic_t nmi_running = ATOMIC_INIT(0); |
a26a2a27 | 99 | static int mod_code_status; /* holds return value of text write */ |
a26a2a27 SR |
100 | static void *mod_code_ip; /* holds the IP to write to */ |
101 | static void *mod_code_newcode; /* holds the text to write to the IP */ | |
17666f02 | 102 | |
a26a2a27 SR |
103 | static unsigned nmi_wait_count; |
104 | static atomic_t nmi_update_count = ATOMIC_INIT(0); | |
b807c3d0 SR |
105 | |
106 | int ftrace_arch_read_dyn_info(char *buf, int size) | |
107 | { | |
108 | int r; | |
109 | ||
110 | r = snprintf(buf, size, "%u %u", | |
111 | nmi_wait_count, | |
112 | atomic_read(&nmi_update_count)); | |
113 | return r; | |
114 | } | |
115 | ||
e9d9df44 LJ |
116 | static void clear_mod_flag(void) |
117 | { | |
118 | int old = atomic_read(&nmi_running); | |
119 | ||
120 | for (;;) { | |
121 | int new = old & ~MOD_CODE_WRITE_FLAG; | |
122 | ||
123 | if (old == new) | |
124 | break; | |
125 | ||
126 | old = atomic_cmpxchg(&nmi_running, old, new); | |
127 | } | |
128 | } | |
129 | ||
17666f02 SR |
130 | static void ftrace_mod_code(void) |
131 | { | |
132 | /* | |
133 | * Yes, more than one CPU process can be writing to mod_code_status. | |
134 | * (and the code itself) | |
135 | * But if one were to fail, then they all should, and if one were | |
136 | * to succeed, then they all should. | |
137 | */ | |
138 | mod_code_status = probe_kernel_write(mod_code_ip, mod_code_newcode, | |
139 | MCOUNT_INSN_SIZE); | |
90c7ac49 SR |
140 | |
141 | /* if we fail, then kill any new writers */ | |
142 | if (mod_code_status) | |
e9d9df44 | 143 | clear_mod_flag(); |
17666f02 SR |
144 | } |
145 | ||
a81bd80a | 146 | void ftrace_nmi_enter(void) |
17666f02 | 147 | { |
e9d9df44 LJ |
148 | if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) { |
149 | smp_rmb(); | |
17666f02 | 150 | ftrace_mod_code(); |
b807c3d0 SR |
151 | atomic_inc(&nmi_update_count); |
152 | } | |
e9d9df44 LJ |
153 | /* Must have previous changes seen before executions */ |
154 | smp_mb(); | |
17666f02 SR |
155 | } |
156 | ||
a81bd80a | 157 | void ftrace_nmi_exit(void) |
17666f02 | 158 | { |
4e6ea144 | 159 | /* Finish all executions before clearing nmi_running */ |
e9d9df44 | 160 | smp_mb(); |
4e6ea144 | 161 | atomic_dec(&nmi_running); |
17666f02 SR |
162 | } |
163 | ||
e9d9df44 LJ |
164 | static void wait_for_nmi_and_set_mod_flag(void) |
165 | { | |
166 | if (!atomic_cmpxchg(&nmi_running, 0, MOD_CODE_WRITE_FLAG)) | |
167 | return; | |
168 | ||
169 | do { | |
170 | cpu_relax(); | |
171 | } while (atomic_cmpxchg(&nmi_running, 0, MOD_CODE_WRITE_FLAG)); | |
172 | ||
173 | nmi_wait_count++; | |
174 | } | |
175 | ||
17666f02 SR |
176 | static void wait_for_nmi(void) |
177 | { | |
4e6ea144 | 178 | if (!atomic_read(&nmi_running)) |
89025282 | 179 | return; |
b807c3d0 | 180 | |
89025282 | 181 | do { |
17666f02 | 182 | cpu_relax(); |
4e6ea144 | 183 | } while (atomic_read(&nmi_running)); |
b807c3d0 | 184 | |
89025282 | 185 | nmi_wait_count++; |
17666f02 SR |
186 | } |
187 | ||
188 | static int | |
189 | do_ftrace_mod_code(unsigned long ip, void *new_code) | |
190 | { | |
191 | mod_code_ip = (void *)ip; | |
192 | mod_code_newcode = new_code; | |
193 | ||
194 | /* The buffers need to be visible before we let NMIs write them */ | |
17666f02 SR |
195 | smp_mb(); |
196 | ||
e9d9df44 | 197 | wait_for_nmi_and_set_mod_flag(); |
17666f02 SR |
198 | |
199 | /* Make sure all running NMIs have finished before we write the code */ | |
200 | smp_mb(); | |
201 | ||
202 | ftrace_mod_code(); | |
203 | ||
204 | /* Make sure the write happens before clearing the bit */ | |
17666f02 SR |
205 | smp_mb(); |
206 | ||
e9d9df44 | 207 | clear_mod_flag(); |
17666f02 SR |
208 | wait_for_nmi(); |
209 | ||
210 | return mod_code_status; | |
211 | } | |
212 | ||
213 | ||
caf4b323 FW |
214 | |
215 | ||
216 | static unsigned char ftrace_nop[MCOUNT_INSN_SIZE]; | |
217 | ||
31e88909 | 218 | static unsigned char *ftrace_nop_replace(void) |
caf4b323 FW |
219 | { |
220 | return ftrace_nop; | |
221 | } | |
222 | ||
31e88909 | 223 | static int |
3d083395 SR |
224 | ftrace_modify_code(unsigned long ip, unsigned char *old_code, |
225 | unsigned char *new_code) | |
226 | { | |
6f93fc07 | 227 | unsigned char replaced[MCOUNT_INSN_SIZE]; |
3d083395 SR |
228 | |
229 | /* | |
230 | * Note: Due to modules and __init, code can | |
231 | * disappear and change, we need to protect against faulting | |
76aefee5 | 232 | * as well as code changing. We do this by using the |
ab9a0918 | 233 | * probe_kernel_* functions. |
3d083395 SR |
234 | * |
235 | * No real locking needed, this code is run through | |
6f93fc07 | 236 | * kstop_machine, or before SMP starts. |
3d083395 | 237 | */ |
76aefee5 SR |
238 | |
239 | /* read the text we want to modify */ | |
ab9a0918 | 240 | if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE)) |
593eb8a2 | 241 | return -EFAULT; |
6f93fc07 | 242 | |
76aefee5 | 243 | /* Make sure it is what we expect it to be */ |
6f93fc07 | 244 | if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0) |
593eb8a2 | 245 | return -EINVAL; |
3d083395 | 246 | |
76aefee5 | 247 | /* replace the text with the new text */ |
17666f02 | 248 | if (do_ftrace_mod_code(ip, new_code)) |
593eb8a2 | 249 | return -EPERM; |
6f93fc07 SR |
250 | |
251 | sync_core(); | |
3d083395 | 252 | |
6f93fc07 | 253 | return 0; |
3d083395 SR |
254 | } |
255 | ||
31e88909 SR |
256 | int ftrace_make_nop(struct module *mod, |
257 | struct dyn_ftrace *rec, unsigned long addr) | |
258 | { | |
259 | unsigned char *new, *old; | |
260 | unsigned long ip = rec->ip; | |
261 | ||
262 | old = ftrace_call_replace(ip, addr); | |
263 | new = ftrace_nop_replace(); | |
264 | ||
265 | return ftrace_modify_code(rec->ip, old, new); | |
266 | } | |
267 | ||
268 | int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) | |
269 | { | |
270 | unsigned char *new, *old; | |
271 | unsigned long ip = rec->ip; | |
272 | ||
273 | old = ftrace_nop_replace(); | |
274 | new = ftrace_call_replace(ip, addr); | |
275 | ||
276 | return ftrace_modify_code(rec->ip, old, new); | |
277 | } | |
278 | ||
15adc048 | 279 | int ftrace_update_ftrace_func(ftrace_func_t func) |
d61f82d0 SR |
280 | { |
281 | unsigned long ip = (unsigned long)(&ftrace_call); | |
395a59d0 | 282 | unsigned char old[MCOUNT_INSN_SIZE], *new; |
d61f82d0 SR |
283 | int ret; |
284 | ||
395a59d0 | 285 | memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE); |
d61f82d0 SR |
286 | new = ftrace_call_replace(ip, (unsigned long)func); |
287 | ret = ftrace_modify_code(ip, old, new); | |
288 | ||
289 | return ret; | |
290 | } | |
291 | ||
d61f82d0 | 292 | int __init ftrace_dyn_arch_init(void *data) |
3d083395 | 293 | { |
732f3ca7 SR |
294 | extern const unsigned char ftrace_test_p6nop[]; |
295 | extern const unsigned char ftrace_test_nop5[]; | |
296 | extern const unsigned char ftrace_test_jmp[]; | |
297 | int faulted = 0; | |
d61f82d0 | 298 | |
732f3ca7 SR |
299 | /* |
300 | * There is no good nop for all x86 archs. | |
301 | * We will default to using the P6_NOP5, but first we | |
302 | * will test to make sure that the nop will actually | |
303 | * work on this CPU. If it faults, we will then | |
304 | * go to a lesser efficient 5 byte nop. If that fails | |
305 | * we then just use a jmp as our nop. This isn't the most | |
306 | * efficient nop, but we can not use a multi part nop | |
307 | * since we would then risk being preempted in the middle | |
308 | * of that nop, and if we enabled tracing then, it might | |
309 | * cause a system crash. | |
310 | * | |
311 | * TODO: check the cpuid to determine the best nop. | |
312 | */ | |
313 | asm volatile ( | |
732f3ca7 SR |
314 | "ftrace_test_jmp:" |
315 | "jmp ftrace_test_p6nop\n" | |
8b27386a AK |
316 | "nop\n" |
317 | "nop\n" | |
318 | "nop\n" /* 2 byte jmp + 3 bytes */ | |
732f3ca7 SR |
319 | "ftrace_test_p6nop:" |
320 | P6_NOP5 | |
321 | "jmp 1f\n" | |
322 | "ftrace_test_nop5:" | |
323 | ".byte 0x66,0x66,0x66,0x66,0x90\n" | |
732f3ca7 SR |
324 | "1:" |
325 | ".section .fixup, \"ax\"\n" | |
326 | "2: movl $1, %0\n" | |
327 | " jmp ftrace_test_nop5\n" | |
328 | "3: movl $2, %0\n" | |
329 | " jmp 1b\n" | |
330 | ".previous\n" | |
331 | _ASM_EXTABLE(ftrace_test_p6nop, 2b) | |
332 | _ASM_EXTABLE(ftrace_test_nop5, 3b) | |
333 | : "=r"(faulted) : "0" (faulted)); | |
334 | ||
335 | switch (faulted) { | |
336 | case 0: | |
337 | pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n"); | |
8115f3f0 | 338 | memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE); |
732f3ca7 SR |
339 | break; |
340 | case 1: | |
341 | pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n"); | |
8115f3f0 | 342 | memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE); |
732f3ca7 SR |
343 | break; |
344 | case 2: | |
8b27386a | 345 | pr_info("ftrace: converting mcount calls to jmp . + 5\n"); |
8115f3f0 | 346 | memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE); |
732f3ca7 SR |
347 | break; |
348 | } | |
349 | ||
350 | /* The return code is retured via data */ | |
351 | *(unsigned long *)data = 0; | |
dfa60aba | 352 | |
3d083395 SR |
353 | return 0; |
354 | } | |
caf4b323 | 355 | #endif |
e7d3737e | 356 | |
fb52607a | 357 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
e7d3737e | 358 | |
5a45cfe1 SR |
359 | #ifdef CONFIG_DYNAMIC_FTRACE |
360 | extern void ftrace_graph_call(void); | |
361 | ||
362 | static int ftrace_mod_jmp(unsigned long ip, | |
363 | int old_offset, int new_offset) | |
364 | { | |
365 | unsigned char code[MCOUNT_INSN_SIZE]; | |
366 | ||
367 | if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE)) | |
368 | return -EFAULT; | |
369 | ||
370 | if (code[0] != 0xe9 || old_offset != *(int *)(&code[1])) | |
371 | return -EINVAL; | |
372 | ||
373 | *(int *)(&code[1]) = new_offset; | |
374 | ||
375 | if (do_ftrace_mod_code(ip, &code)) | |
376 | return -EPERM; | |
377 | ||
378 | return 0; | |
379 | } | |
380 | ||
381 | int ftrace_enable_ftrace_graph_caller(void) | |
382 | { | |
383 | unsigned long ip = (unsigned long)(&ftrace_graph_call); | |
384 | int old_offset, new_offset; | |
385 | ||
386 | old_offset = (unsigned long)(&ftrace_stub) - (ip + MCOUNT_INSN_SIZE); | |
387 | new_offset = (unsigned long)(&ftrace_graph_caller) - (ip + MCOUNT_INSN_SIZE); | |
388 | ||
389 | return ftrace_mod_jmp(ip, old_offset, new_offset); | |
390 | } | |
391 | ||
392 | int ftrace_disable_ftrace_graph_caller(void) | |
393 | { | |
394 | unsigned long ip = (unsigned long)(&ftrace_graph_call); | |
395 | int old_offset, new_offset; | |
396 | ||
397 | old_offset = (unsigned long)(&ftrace_graph_caller) - (ip + MCOUNT_INSN_SIZE); | |
398 | new_offset = (unsigned long)(&ftrace_stub) - (ip + MCOUNT_INSN_SIZE); | |
399 | ||
400 | return ftrace_mod_jmp(ip, old_offset, new_offset); | |
401 | } | |
402 | ||
e7d3737e FW |
403 | #endif /* !CONFIG_DYNAMIC_FTRACE */ |
404 | ||
e7d3737e FW |
405 | /* |
406 | * Hook the return address and push it in the stack of return addrs | |
407 | * in current thread info. | |
408 | */ | |
409 | void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) | |
410 | { | |
411 | unsigned long old; | |
e7d3737e | 412 | int faulted; |
287b6e68 | 413 | struct ftrace_graph_ent trace; |
e7d3737e FW |
414 | unsigned long return_hooker = (unsigned long) |
415 | &return_to_handler; | |
416 | ||
417 | /* Nmi's are currently unsupported */ | |
9a5fd902 | 418 | if (unlikely(in_nmi())) |
380c4b14 FW |
419 | return; |
420 | ||
421 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) | |
e7d3737e FW |
422 | return; |
423 | ||
424 | /* | |
425 | * Protect against fault, even if it shouldn't | |
426 | * happen. This tool is too much intrusive to | |
427 | * ignore such a protection. | |
428 | */ | |
429 | asm volatile( | |
96665788 SR |
430 | "1: " _ASM_MOV " (%[parent]), %[old]\n" |
431 | "2: " _ASM_MOV " %[return_hooker], (%[parent])\n" | |
e7d3737e | 432 | " movl $0, %[faulted]\n" |
e3944bfa | 433 | "3:\n" |
e7d3737e FW |
434 | |
435 | ".section .fixup, \"ax\"\n" | |
e3944bfa SR |
436 | "4: movl $1, %[faulted]\n" |
437 | " jmp 3b\n" | |
e7d3737e FW |
438 | ".previous\n" |
439 | ||
e3944bfa SR |
440 | _ASM_EXTABLE(1b, 4b) |
441 | _ASM_EXTABLE(2b, 4b) | |
e7d3737e | 442 | |
96665788 SR |
443 | : [old] "=r" (old), [faulted] "=r" (faulted) |
444 | : [parent] "r" (parent), [return_hooker] "r" (return_hooker) | |
e7d3737e FW |
445 | : "memory" |
446 | ); | |
447 | ||
14a866c5 SR |
448 | if (unlikely(faulted)) { |
449 | ftrace_graph_stop(); | |
450 | WARN_ON(1); | |
e7d3737e FW |
451 | return; |
452 | } | |
453 | ||
5d1a03dc | 454 | if (ftrace_push_return_trace(old, self_addr, &trace.depth) == -EBUSY) { |
e7d3737e | 455 | *parent = old; |
287b6e68 FW |
456 | return; |
457 | } | |
458 | ||
459 | trace.func = self_addr; | |
287b6e68 | 460 | |
e49dc19c SR |
461 | /* Only trace if the calling function expects to */ |
462 | if (!ftrace_graph_entry(&trace)) { | |
463 | current->curr_ret_stack--; | |
464 | *parent = old; | |
465 | } | |
e7d3737e | 466 | } |
fb52607a | 467 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
f58ba100 FW |
468 | |
469 | #ifdef CONFIG_FTRACE_SYSCALLS | |
470 | ||
471 | extern unsigned long __start_syscalls_metadata[]; | |
472 | extern unsigned long __stop_syscalls_metadata[]; | |
473 | extern unsigned long *sys_call_table; | |
474 | ||
475 | static struct syscall_metadata **syscalls_metadata; | |
476 | ||
477 | static struct syscall_metadata *find_syscall_meta(unsigned long *syscall) | |
478 | { | |
479 | struct syscall_metadata *start; | |
480 | struct syscall_metadata *stop; | |
481 | char str[KSYM_SYMBOL_LEN]; | |
482 | ||
483 | ||
484 | start = (struct syscall_metadata *)__start_syscalls_metadata; | |
485 | stop = (struct syscall_metadata *)__stop_syscalls_metadata; | |
486 | kallsyms_lookup((unsigned long) syscall, NULL, NULL, NULL, str); | |
487 | ||
488 | for ( ; start < stop; start++) { | |
489 | if (start->name && !strcmp(start->name, str)) | |
490 | return start; | |
491 | } | |
492 | return NULL; | |
493 | } | |
494 | ||
495 | struct syscall_metadata *syscall_nr_to_meta(int nr) | |
496 | { | |
497 | if (!syscalls_metadata || nr >= FTRACE_SYSCALL_MAX || nr < 0) | |
498 | return NULL; | |
499 | ||
500 | return syscalls_metadata[nr]; | |
501 | } | |
502 | ||
503 | void arch_init_ftrace_syscalls(void) | |
504 | { | |
505 | int i; | |
506 | struct syscall_metadata *meta; | |
507 | unsigned long **psys_syscall_table = &sys_call_table; | |
508 | static atomic_t refs; | |
509 | ||
510 | if (atomic_inc_return(&refs) != 1) | |
511 | goto end; | |
512 | ||
513 | syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) * | |
514 | FTRACE_SYSCALL_MAX, GFP_KERNEL); | |
515 | if (!syscalls_metadata) { | |
516 | WARN_ON(1); | |
517 | return; | |
518 | } | |
519 | ||
520 | for (i = 0; i < FTRACE_SYSCALL_MAX; i++) { | |
521 | meta = find_syscall_meta(psys_syscall_table[i]); | |
522 | syscalls_metadata[i] = meta; | |
523 | } | |
524 | return; | |
525 | ||
526 | /* Paranoid: avoid overflow */ | |
527 | end: | |
528 | atomic_dec(&refs); | |
529 | } | |
530 | #endif |