2 * Kernel Probes (KProbes)
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * Copyright (C) IBM Corporation, 2002, 2004
20 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
21 * Probes initial implementation ( includes contributions from
23 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
24 * interface to access function arguments.
25 * 2004-Nov Ananth N Mavinakayanahalli <ananth@in.ibm.com> kprobes port
29 #include <linux/kprobes.h>
30 #include <linux/ptrace.h>
31 #include <linux/preempt.h>
32 #include <linux/module.h>
33 #include <linux/kdebug.h>
34 #include <asm/cacheflush.h>
35 #include <asm/sstep.h>
36 #include <asm/uaccess.h>
37 #include <asm/system.h>
40 #define MSR_SINGLESTEP (MSR_DE)
42 #define MSR_SINGLESTEP (MSR_SE)
45 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
46 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
48 struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}};
50 int __kprobes arch_prepare_kprobe(struct kprobe *p)
53 kprobe_opcode_t insn = *p->addr;
55 if ((unsigned long)p->addr & 0x03) {
56 printk("Attempt to register kprobe at an unaligned address\n");
58 } else if (IS_MTMSRD(insn) || IS_RFID(insn) || IS_RFI(insn)) {
59 printk("Cannot register a kprobe on rfi/rfid or mtmsr[d]\n");
63 /* insn must be on a special executable page on ppc64. This is
64 * not explicitly required on ppc32 (right now), but it doesn't hurt */
66 p->ainsn.insn = get_insn_slot();
72 memcpy(p->ainsn.insn, p->addr,
73 MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
75 flush_icache_range((unsigned long)p->ainsn.insn,
76 (unsigned long)p->ainsn.insn + sizeof(kprobe_opcode_t));
79 p->ainsn.boostable = 0;
83 void __kprobes arch_arm_kprobe(struct kprobe *p)
85 *p->addr = BREAKPOINT_INSTRUCTION;
86 flush_icache_range((unsigned long) p->addr,
87 (unsigned long) p->addr + sizeof(kprobe_opcode_t));
90 void __kprobes arch_disarm_kprobe(struct kprobe *p)
93 flush_icache_range((unsigned long) p->addr,
94 (unsigned long) p->addr + sizeof(kprobe_opcode_t));
97 void __kprobes arch_remove_kprobe(struct kprobe *p)
99 mutex_lock(&kprobe_mutex);
100 free_insn_slot(p->ainsn.insn, 0);
101 mutex_unlock(&kprobe_mutex);
104 static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
106 /* We turn off async exceptions to ensure that the single step will
107 * be for the instruction we have the kprobe on, if we dont its
108 * possible we'd get the single step reported for an exception handler
109 * like Decrementer or External Interrupt */
110 regs->msr &= ~MSR_EE;
111 regs->msr |= MSR_SINGLESTEP;
113 regs->msr &= ~MSR_CE;
114 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM);
118 * On powerpc we should single step on the original
119 * instruction even if the probed insn is a trap
120 * variant as values in regs could play a part in
121 * if the trap is taken or not
123 regs->nip = (unsigned long)p->ainsn.insn;
126 static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
128 kcb->prev_kprobe.kp = kprobe_running();
129 kcb->prev_kprobe.status = kcb->kprobe_status;
130 kcb->prev_kprobe.saved_msr = kcb->kprobe_saved_msr;
133 static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
135 __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
136 kcb->kprobe_status = kcb->prev_kprobe.status;
137 kcb->kprobe_saved_msr = kcb->prev_kprobe.saved_msr;
140 static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
141 struct kprobe_ctlblk *kcb)
143 __get_cpu_var(current_kprobe) = p;
144 kcb->kprobe_saved_msr = regs->msr;
147 /* Called with kretprobe_lock held */
148 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
149 struct pt_regs *regs)
151 ri->ret_addr = (kprobe_opcode_t *)regs->link;
153 /* Replace the return addr with trampoline addr */
154 regs->link = (unsigned long)kretprobe_trampoline;
157 static int __kprobes kprobe_handler(struct pt_regs *regs)
161 unsigned int *addr = (unsigned int *)regs->nip;
162 struct kprobe_ctlblk *kcb;
165 * We don't want to be preempted for the entire
166 * duration of kprobe processing
169 kcb = get_kprobe_ctlblk();
171 /* Check we're not actually recursing */
172 if (kprobe_running()) {
173 p = get_kprobe(addr);
175 kprobe_opcode_t insn = *p->ainsn.insn;
176 if (kcb->kprobe_status == KPROBE_HIT_SS &&
178 /* Turn off 'trace' bits */
179 regs->msr &= ~MSR_SINGLESTEP;
180 regs->msr |= kcb->kprobe_saved_msr;
183 /* We have reentered the kprobe_handler(), since
184 * another probe was hit while within the handler.
185 * We here save the original kprobes variables and
186 * just single step on the instruction of the new probe
187 * without calling any user handlers.
189 save_previous_kprobe(kcb);
190 set_current_kprobe(p, regs, kcb);
191 kcb->kprobe_saved_msr = regs->msr;
192 kprobes_inc_nmissed_count(p);
193 prepare_singlestep(p, regs);
194 kcb->kprobe_status = KPROBE_REENTER;
197 if (*addr != BREAKPOINT_INSTRUCTION) {
198 /* If trap variant, then it belongs not to us */
199 kprobe_opcode_t cur_insn = *addr;
200 if (is_trap(cur_insn))
202 /* The breakpoint instruction was removed by
203 * another cpu right after we hit, no further
204 * handling of this interrupt is appropriate
209 p = __get_cpu_var(current_kprobe);
210 if (p->break_handler && p->break_handler(p, regs)) {
217 p = get_kprobe(addr);
219 if (*addr != BREAKPOINT_INSTRUCTION) {
221 * PowerPC has multiple variants of the "trap"
222 * instruction. If the current instruction is a
223 * trap variant, it could belong to someone else
225 kprobe_opcode_t cur_insn = *addr;
226 if (is_trap(cur_insn))
229 * The breakpoint instruction was removed right
230 * after we hit it. Another cpu has removed
231 * either a probepoint or a debugger breakpoint
232 * at this address. In either case, no further
233 * handling of this interrupt is appropriate.
237 /* Not one of ours: let kernel handle it */
241 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
242 set_current_kprobe(p, regs, kcb);
243 if (p->pre_handler && p->pre_handler(p, regs))
244 /* handler has already set things up, so skip ss setup */
248 if (p->ainsn.boostable >= 0) {
249 unsigned int insn = *p->ainsn.insn;
251 /* regs->nip is also adjusted if emulate_step returns 1 */
252 ret = emulate_step(regs, insn);
255 * Once this instruction has been boosted
256 * successfully, set the boostable flag
258 if (unlikely(p->ainsn.boostable == 0))
259 p->ainsn.boostable = 1;
262 p->post_handler(p, regs, 0);
264 kcb->kprobe_status = KPROBE_HIT_SSDONE;
265 reset_current_kprobe();
266 preempt_enable_no_resched();
268 } else if (ret < 0) {
270 * We don't allow kprobes on mtmsr(d)/rfi(d), etc.
271 * So, we should never get here... but, its still
272 * good to catch them, just in case...
274 printk("Can't step on instruction %x\n", insn);
277 /* This instruction can't be boosted */
278 p->ainsn.boostable = -1;
280 prepare_singlestep(p, regs);
281 kcb->kprobe_status = KPROBE_HIT_SS;
285 preempt_enable_no_resched();
290 * Function return probe trampoline:
291 * - init_kprobes() establishes a probepoint here
292 * - When the probed function returns, this probe
293 * causes the handlers to fire
295 static void __used kretprobe_trampoline_holder(void)
297 asm volatile(".global kretprobe_trampoline\n"
298 "kretprobe_trampoline:\n"
303 * Called when the probe at kretprobe trampoline is hit
305 static int __kprobes trampoline_probe_handler(struct kprobe *p,
306 struct pt_regs *regs)
308 struct kretprobe_instance *ri = NULL;
309 struct hlist_head *head, empty_rp;
310 struct hlist_node *node, *tmp;
311 unsigned long flags, orig_ret_address = 0;
312 unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
314 INIT_HLIST_HEAD(&empty_rp);
315 spin_lock_irqsave(&kretprobe_lock, flags);
316 head = kretprobe_inst_table_head(current);
319 * It is possible to have multiple instances associated with a given
320 * task either because an multiple functions in the call path
321 * have a return probe installed on them, and/or more then one return
322 * return probe was registered for a target function.
324 * We can handle this because:
325 * - instances are always inserted at the head of the list
326 * - when multiple return probes are registered for the same
327 * function, the first instance's ret_addr will point to the
328 * real return address, and all the rest will point to
329 * kretprobe_trampoline
331 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
332 if (ri->task != current)
333 /* another task is sharing our hash bucket */
336 if (ri->rp && ri->rp->handler)
337 ri->rp->handler(ri, regs);
339 orig_ret_address = (unsigned long)ri->ret_addr;
340 recycle_rp_inst(ri, &empty_rp);
342 if (orig_ret_address != trampoline_address)
344 * This is the real return address. Any other
345 * instances associated with this task are for
346 * other calls deeper on the call stack
351 kretprobe_assert(ri, orig_ret_address, trampoline_address);
352 regs->nip = orig_ret_address;
354 reset_current_kprobe();
355 spin_unlock_irqrestore(&kretprobe_lock, flags);
356 preempt_enable_no_resched();
358 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
359 hlist_del(&ri->hlist);
363 * By returning a non-zero value, we are telling
364 * kprobe_handler() that we don't want the post_handler
365 * to run (and have re-enabled preemption)
371 * Called after single-stepping. p->addr is the address of the
372 * instruction whose first byte has been replaced by the "breakpoint"
373 * instruction. To avoid the SMP problems that can occur when we
374 * temporarily put back the original opcode to single-step, we
375 * single-stepped a copy of the instruction. The address of this
376 * copy is p->ainsn.insn.
378 static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
381 unsigned int insn = *p->ainsn.insn;
383 regs->nip = (unsigned long)p->addr;
384 ret = emulate_step(regs, insn);
386 regs->nip = (unsigned long)p->addr + 4;
389 static int __kprobes post_kprobe_handler(struct pt_regs *regs)
391 struct kprobe *cur = kprobe_running();
392 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
397 /* make sure we got here for instruction we have a kprobe on */
398 if (((unsigned long)cur->ainsn.insn + 4) != regs->nip)
401 if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
402 kcb->kprobe_status = KPROBE_HIT_SSDONE;
403 cur->post_handler(cur, regs, 0);
406 resume_execution(cur, regs);
407 regs->msr |= kcb->kprobe_saved_msr;
409 /*Restore back the original saved kprobes variables and continue. */
410 if (kcb->kprobe_status == KPROBE_REENTER) {
411 restore_previous_kprobe(kcb);
414 reset_current_kprobe();
416 preempt_enable_no_resched();
419 * if somebody else is singlestepping across a probe point, msr
420 * will have DE/SE set, in which case, continue the remaining processing
421 * of do_debug, as if this is not a probe hit.
423 if (regs->msr & MSR_SINGLESTEP)
429 int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
431 struct kprobe *cur = kprobe_running();
432 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
433 const struct exception_table_entry *entry;
435 switch(kcb->kprobe_status) {
439 * We are here because the instruction being single
440 * stepped caused a page fault. We reset the current
441 * kprobe and the nip points back to the probe address
442 * and allow the page fault handler to continue as a
445 regs->nip = (unsigned long)cur->addr;
446 regs->msr &= ~MSR_SINGLESTEP; /* Turn off 'trace' bits */
447 regs->msr |= kcb->kprobe_saved_msr;
448 if (kcb->kprobe_status == KPROBE_REENTER)
449 restore_previous_kprobe(kcb);
451 reset_current_kprobe();
452 preempt_enable_no_resched();
454 case KPROBE_HIT_ACTIVE:
455 case KPROBE_HIT_SSDONE:
457 * We increment the nmissed count for accounting,
458 * we can also use npre/npostfault count for accouting
459 * these specific fault cases.
461 kprobes_inc_nmissed_count(cur);
464 * We come here because instructions in the pre/post
465 * handler caused the page_fault, this could happen
466 * if handler tries to access user space by
467 * copy_from_user(), get_user() etc. Let the
468 * user-specified handler try to fix it first.
470 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
474 * In case the user-specified fault handler returned
475 * zero, try to fix up.
477 if ((entry = search_exception_tables(regs->nip)) != NULL) {
478 regs->nip = entry->fixup;
483 * fixup_exception() could not handle it,
484 * Let do_page_fault() fix it.
494 * Wrapper routine to for handling exceptions.
496 int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
497 unsigned long val, void *data)
499 struct die_args *args = (struct die_args *)data;
500 int ret = NOTIFY_DONE;
502 if (args->regs && user_mode(args->regs))
507 if (kprobe_handler(args->regs))
511 if (post_kprobe_handler(args->regs))
521 unsigned long arch_deref_entry_point(void *entry)
523 return ((func_descr_t *)entry)->entry;
527 int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
529 struct jprobe *jp = container_of(p, struct jprobe, kp);
530 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
532 memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs));
534 /* setup return addr to the jprobe handler routine */
535 regs->nip = arch_deref_entry_point(jp->entry);
537 regs->gpr[2] = (unsigned long)(((func_descr_t *)jp->entry)->toc);
543 void __used __kprobes jprobe_return(void)
545 asm volatile("trap" ::: "memory");
548 static void __used __kprobes jprobe_return_end(void)
552 int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
554 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
557 * FIXME - we should ideally be validating that we got here 'cos
558 * of the "trap" in jprobe_return() above, before restoring the
561 memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs));
562 preempt_enable_no_resched();
566 static struct kprobe trampoline_p = {
567 .addr = (kprobe_opcode_t *) &kretprobe_trampoline,
568 .pre_handler = trampoline_probe_handler
571 int __init arch_init_kprobes(void)
573 return register_kprobe(&trampoline_p);
576 int __kprobes arch_trampoline_kprobe(struct kprobe *p)
578 if (p->addr == (kprobe_opcode_t *)&kretprobe_trampoline)