2 * Kernel Probes (KProbes)
3 * arch/ppc64/kernel/kprobes.c
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 * Copyright (C) IBM Corporation, 2002, 2004
21 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
22 * Probes initial implementation ( includes contributions from
24 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
25 * interface to access function arguments.
26 * 2004-Nov Ananth N Mavinakayanahalli <ananth@in.ibm.com> kprobes port
30 #include <linux/config.h>
31 #include <linux/kprobes.h>
32 #include <linux/ptrace.h>
33 #include <linux/spinlock.h>
34 #include <linux/preempt.h>
35 #include <asm/cacheflush.h>
36 #include <asm/kdebug.h>
37 #include <asm/sstep.h>
39 static DECLARE_MUTEX(kprobe_mutex);
41 static struct kprobe *current_kprobe;
42 static unsigned long kprobe_status, kprobe_saved_msr;
43 static struct kprobe *kprobe_prev;
44 static unsigned long kprobe_status_prev, kprobe_saved_msr_prev;
45 static struct pt_regs jprobe_saved_regs;
47 int __kprobes arch_prepare_kprobe(struct kprobe *p)
50 kprobe_opcode_t insn = *p->addr;
52 if ((unsigned long)p->addr & 0x03) {
53 printk("Attempt to register kprobe at an unaligned address\n");
55 } else if (IS_MTMSRD(insn) || IS_RFID(insn)) {
56 printk("Cannot register a kprobe on rfid or mtmsrd\n");
60 /* insn must be on a special executable page on ppc64 */
63 p->ainsn.insn = get_insn_slot();
71 void __kprobes arch_copy_kprobe(struct kprobe *p)
73 memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
77 void __kprobes arch_arm_kprobe(struct kprobe *p)
79 *p->addr = BREAKPOINT_INSTRUCTION;
80 flush_icache_range((unsigned long) p->addr,
81 (unsigned long) p->addr + sizeof(kprobe_opcode_t));
84 void __kprobes arch_disarm_kprobe(struct kprobe *p)
87 flush_icache_range((unsigned long) p->addr,
88 (unsigned long) p->addr + sizeof(kprobe_opcode_t));
91 void __kprobes arch_remove_kprobe(struct kprobe *p)
94 free_insn_slot(p->ainsn.insn);
98 static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
100 kprobe_opcode_t insn = *p->ainsn.insn;
104 /* single step inline if it is a trap variant */
106 regs->nip = (unsigned long)p->addr;
108 regs->nip = (unsigned long)p->ainsn.insn;
111 static inline void save_previous_kprobe(void)
113 kprobe_prev = current_kprobe;
114 kprobe_status_prev = kprobe_status;
115 kprobe_saved_msr_prev = kprobe_saved_msr;
118 static inline void restore_previous_kprobe(void)
120 current_kprobe = kprobe_prev;
121 kprobe_status = kprobe_status_prev;
122 kprobe_saved_msr = kprobe_saved_msr_prev;
125 void __kprobes arch_prepare_kretprobe(struct kretprobe *rp,
126 struct pt_regs *regs)
128 struct kretprobe_instance *ri;
130 if ((ri = get_free_rp_inst(rp)) != NULL) {
133 ri->ret_addr = (kprobe_opcode_t *)regs->link;
135 /* Replace the return addr with trampoline addr */
136 regs->link = (unsigned long)kretprobe_trampoline;
143 static inline int kprobe_handler(struct pt_regs *regs)
147 unsigned int *addr = (unsigned int *)regs->nip;
149 /* Check we're not actually recursing */
150 if (kprobe_running()) {
151 /* We *are* holding lock here, so this is safe.
152 Disarm the probe we just hit, and ignore it. */
153 p = get_kprobe(addr);
155 kprobe_opcode_t insn = *p->ainsn.insn;
156 if (kprobe_status == KPROBE_HIT_SS &&
158 regs->msr &= ~MSR_SE;
159 regs->msr |= kprobe_saved_msr;
163 /* We have reentered the kprobe_handler(), since
164 * another probe was hit while within the handler.
165 * We here save the original kprobes variables and
166 * just single step on the instruction of the new probe
167 * without calling any user handlers.
169 save_previous_kprobe();
171 kprobe_saved_msr = regs->msr;
173 prepare_singlestep(p, regs);
174 kprobe_status = KPROBE_REENTER;
178 if (p->break_handler && p->break_handler(p, regs)) {
182 /* If it's not ours, can't be delete race, (we hold lock). */
187 p = get_kprobe(addr);
190 if (*addr != BREAKPOINT_INSTRUCTION) {
192 * PowerPC has multiple variants of the "trap"
193 * instruction. If the current instruction is a
194 * trap variant, it could belong to someone else
196 kprobe_opcode_t cur_insn = *addr;
197 if (is_trap(cur_insn))
200 * The breakpoint instruction was removed right
201 * after we hit it. Another cpu has removed
202 * either a probepoint or a debugger breakpoint
203 * at this address. In either case, no further
204 * handling of this interrupt is appropriate.
208 /* Not one of ours: let kernel handle it */
212 kprobe_status = KPROBE_HIT_ACTIVE;
214 kprobe_saved_msr = regs->msr;
215 if (p->pre_handler && p->pre_handler(p, regs))
216 /* handler has already set things up, so skip ss setup */
220 prepare_singlestep(p, regs);
221 kprobe_status = KPROBE_HIT_SS;
223 * This preempt_disable() matches the preempt_enable_no_resched()
224 * in post_kprobe_handler().
234 * Function return probe trampoline:
235 * - init_kprobes() establishes a probepoint here
236 * - When the probed function returns, this probe
237 * causes the handlers to fire
239 void kretprobe_trampoline_holder(void)
241 asm volatile(".global kretprobe_trampoline\n"
242 "kretprobe_trampoline:\n"
247 * Called when the probe at kretprobe trampoline is hit
249 int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
251 struct kretprobe_instance *ri = NULL;
252 struct hlist_head *head;
253 struct hlist_node *node, *tmp;
254 unsigned long orig_ret_address = 0;
255 unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
257 head = kretprobe_inst_table_head(current);
260 * It is possible to have multiple instances associated with a given
261 * task either because an multiple functions in the call path
262 * have a return probe installed on them, and/or more then one return
263 * return probe was registered for a target function.
265 * We can handle this because:
266 * - instances are always inserted at the head of the list
267 * - when multiple return probes are registered for the same
268 * function, the first instance's ret_addr will point to the
269 * real return address, and all the rest will point to
270 * kretprobe_trampoline
272 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
273 if (ri->task != current)
274 /* another task is sharing our hash bucket */
277 if (ri->rp && ri->rp->handler)
278 ri->rp->handler(ri, regs);
280 orig_ret_address = (unsigned long)ri->ret_addr;
283 if (orig_ret_address != trampoline_address)
285 * This is the real return address. Any other
286 * instances associated with this task are for
287 * other calls deeper on the call stack
292 BUG_ON(!orig_ret_address || (orig_ret_address == trampoline_address));
293 regs->nip = orig_ret_address;
298 * By returning a non-zero value, we are telling
299 * kprobe_handler() that we have handled unlocking
300 * and re-enabling preemption.
306 * Called after single-stepping. p->addr is the address of the
307 * instruction whose first byte has been replaced by the "breakpoint"
308 * instruction. To avoid the SMP problems that can occur when we
309 * temporarily put back the original opcode to single-step, we
310 * single-stepped a copy of the instruction. The address of this
311 * copy is p->ainsn.insn.
313 static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
316 unsigned int insn = *p->ainsn.insn;
318 regs->nip = (unsigned long)p->addr;
319 ret = emulate_step(regs, insn);
321 regs->nip = (unsigned long)p->addr + 4;
324 static inline int post_kprobe_handler(struct pt_regs *regs)
326 if (!kprobe_running())
329 if ((kprobe_status != KPROBE_REENTER) && current_kprobe->post_handler) {
330 kprobe_status = KPROBE_HIT_SSDONE;
331 current_kprobe->post_handler(current_kprobe, regs, 0);
334 resume_execution(current_kprobe, regs);
335 regs->msr |= kprobe_saved_msr;
337 /*Restore back the original saved kprobes variables and continue. */
338 if (kprobe_status == KPROBE_REENTER) {
339 restore_previous_kprobe();
344 preempt_enable_no_resched();
347 * if somebody else is singlestepping across a probe point, msr
348 * will have SE set, in which case, continue the remaining processing
349 * of do_debug, as if this is not a probe hit.
351 if (regs->msr & MSR_SE)
357 /* Interrupts disabled, kprobe_lock held. */
358 static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
360 if (current_kprobe->fault_handler
361 && current_kprobe->fault_handler(current_kprobe, regs, trapnr))
364 if (kprobe_status & KPROBE_HIT_SS) {
365 resume_execution(current_kprobe, regs);
366 regs->msr &= ~MSR_SE;
367 regs->msr |= kprobe_saved_msr;
370 preempt_enable_no_resched();
376 * Wrapper routine to for handling exceptions.
378 int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
379 unsigned long val, void *data)
381 struct die_args *args = (struct die_args *)data;
382 int ret = NOTIFY_DONE;
385 * Interrupts are not disabled here. We need to disable
386 * preemption, because kprobe_running() uses smp_processor_id().
391 if (kprobe_handler(args->regs))
395 if (post_kprobe_handler(args->regs))
399 if (kprobe_running() &&
400 kprobe_fault_handler(args->regs, args->trapnr))
406 preempt_enable_no_resched();
410 int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
412 struct jprobe *jp = container_of(p, struct jprobe, kp);
414 memcpy(&jprobe_saved_regs, regs, sizeof(struct pt_regs));
416 /* setup return addr to the jprobe handler routine */
417 regs->nip = (unsigned long)(((func_descr_t *)jp->entry)->entry);
418 regs->gpr[2] = (unsigned long)(((func_descr_t *)jp->entry)->toc);
423 void __kprobes jprobe_return(void)
425 asm volatile("trap" ::: "memory");
428 void __kprobes jprobe_return_end(void)
432 int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
435 * FIXME - we should ideally be validating that we got here 'cos
436 * of the "trap" in jprobe_return() above, before restoring the
439 memcpy(regs, &jprobe_saved_regs, sizeof(struct pt_regs));
443 static struct kprobe trampoline_p = {
444 .addr = (kprobe_opcode_t *) &kretprobe_trampoline,
445 .pre_handler = trampoline_probe_handler
448 int __init arch_init_kprobes(void)
450 return register_kprobe(&trampoline_p);