2 * Kernel Probes (KProbes)
3 * arch/i386/kernel/kprobes.c
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 * Copyright (C) IBM Corporation, 2002, 2004
21 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
22 * Probes initial implementation ( includes contributions from
24 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
25 * interface to access function arguments.
26 * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston
27 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
28 * <prasanna@in.ibm.com> added function-return probes.
31 #include <linux/config.h>
32 #include <linux/kprobes.h>
33 #include <linux/ptrace.h>
34 #include <linux/preempt.h>
35 #include <asm/cacheflush.h>
36 #include <asm/kdebug.h>
38 #include <asm/uaccess.h>
40 void jprobe_return_end(void);
42 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
43 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
45 /* insert a jmp code */
46 static inline void set_jmp_op(void *from, void *to)
48 struct __arch_jmp_op {
51 } __attribute__((packed)) *jop;
52 jop = (struct __arch_jmp_op *)from;
53 jop->raddr = (long)(to) - ((long)(from) + 5);
54 jop->op = RELATIVEJUMP_INSTRUCTION;
58 * returns non-zero if opcodes can be boosted.
60 static inline int can_boost(kprobe_opcode_t opcode)
62 switch (opcode & 0xf0 ) {
64 return 0; /* can't boost conditional jump */
66 /* can't boost call and pushf */
67 return opcode != 0x9a && opcode != 0x9c;
69 /* can't boost undefined opcodes and soft-interruptions */
70 return (0xc1 < opcode && opcode < 0xc6) ||
71 (0xc7 < opcode && opcode < 0xcc) || opcode == 0xcf;
73 /* can boost AA* and XLAT */
74 return (opcode == 0xd4 || opcode == 0xd5 || opcode == 0xd7);
76 /* can boost in/out and (may be) jmps */
77 return (0xe3 < opcode && opcode != 0xe8);
79 /* clear and set flags can be boost */
80 return (opcode == 0xf5 || (0xf7 < opcode && opcode < 0xfe));
82 /* currently, can't boost 2 bytes opcodes */
83 return opcode != 0x0f;
89 * returns non-zero if opcode modifies the interrupt flag.
91 static inline int is_IF_modifier(kprobe_opcode_t opcode)
96 case 0xcf: /* iret/iretd */
97 case 0x9d: /* popf/popfd */
103 int __kprobes arch_prepare_kprobe(struct kprobe *p)
105 /* insn: must be on special executable page on i386. */
106 p->ainsn.insn = get_insn_slot();
110 memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
111 p->opcode = *p->addr;
112 if (can_boost(p->opcode)) {
113 p->ainsn.boostable = 0;
115 p->ainsn.boostable = -1;
120 void __kprobes arch_arm_kprobe(struct kprobe *p)
122 *p->addr = BREAKPOINT_INSTRUCTION;
123 flush_icache_range((unsigned long) p->addr,
124 (unsigned long) p->addr + sizeof(kprobe_opcode_t));
127 void __kprobes arch_disarm_kprobe(struct kprobe *p)
129 *p->addr = p->opcode;
130 flush_icache_range((unsigned long) p->addr,
131 (unsigned long) p->addr + sizeof(kprobe_opcode_t));
134 void __kprobes arch_remove_kprobe(struct kprobe *p)
136 mutex_lock(&kprobe_mutex);
137 free_insn_slot(p->ainsn.insn);
138 mutex_unlock(&kprobe_mutex);
141 static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb)
143 kcb->prev_kprobe.kp = kprobe_running();
144 kcb->prev_kprobe.status = kcb->kprobe_status;
145 kcb->prev_kprobe.old_eflags = kcb->kprobe_old_eflags;
146 kcb->prev_kprobe.saved_eflags = kcb->kprobe_saved_eflags;
149 static inline void restore_previous_kprobe(struct kprobe_ctlblk *kcb)
151 __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
152 kcb->kprobe_status = kcb->prev_kprobe.status;
153 kcb->kprobe_old_eflags = kcb->prev_kprobe.old_eflags;
154 kcb->kprobe_saved_eflags = kcb->prev_kprobe.saved_eflags;
157 static inline void set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
158 struct kprobe_ctlblk *kcb)
160 __get_cpu_var(current_kprobe) = p;
161 kcb->kprobe_saved_eflags = kcb->kprobe_old_eflags
162 = (regs->eflags & (TF_MASK | IF_MASK));
163 if (is_IF_modifier(p->opcode))
164 kcb->kprobe_saved_eflags &= ~IF_MASK;
167 static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
169 regs->eflags |= TF_MASK;
170 regs->eflags &= ~IF_MASK;
171 /*single step inline if the instruction is an int3*/
172 if (p->opcode == BREAKPOINT_INSTRUCTION)
173 regs->eip = (unsigned long)p->addr;
175 regs->eip = (unsigned long)p->ainsn.insn;
178 /* Called with kretprobe_lock held */
179 void __kprobes arch_prepare_kretprobe(struct kretprobe *rp,
180 struct pt_regs *regs)
182 unsigned long *sara = (unsigned long *)®s->esp;
183 struct kretprobe_instance *ri;
185 if ((ri = get_free_rp_inst(rp)) != NULL) {
188 ri->ret_addr = (kprobe_opcode_t *) *sara;
190 /* Replace the return addr with trampoline addr */
191 *sara = (unsigned long) &kretprobe_trampoline;
200 * Interrupts are disabled on entry as trap3 is an interrupt gate and they
201 * remain disabled thorough out this function.
203 static int __kprobes kprobe_handler(struct pt_regs *regs)
207 kprobe_opcode_t *addr;
208 struct kprobe_ctlblk *kcb;
209 #ifdef CONFIG_PREEMPT
210 unsigned pre_preempt_count = preempt_count();
211 #endif /* CONFIG_PREEMPT */
213 addr = (kprobe_opcode_t *)(regs->eip - sizeof(kprobe_opcode_t));
216 * We don't want to be preempted for the entire
217 * duration of kprobe processing
220 kcb = get_kprobe_ctlblk();
222 /* Check we're not actually recursing */
223 if (kprobe_running()) {
224 p = get_kprobe(addr);
226 if (kcb->kprobe_status == KPROBE_HIT_SS &&
227 *p->ainsn.insn == BREAKPOINT_INSTRUCTION) {
228 regs->eflags &= ~TF_MASK;
229 regs->eflags |= kcb->kprobe_saved_eflags;
232 /* We have reentered the kprobe_handler(), since
233 * another probe was hit while within the handler.
234 * We here save the original kprobes variables and
235 * just single step on the instruction of the new probe
236 * without calling any user handlers.
238 save_previous_kprobe(kcb);
239 set_current_kprobe(p, regs, kcb);
240 kprobes_inc_nmissed_count(p);
241 prepare_singlestep(p, regs);
242 kcb->kprobe_status = KPROBE_REENTER;
245 if (regs->eflags & VM_MASK) {
246 /* We are in virtual-8086 mode. Return 0 */
249 if (*addr != BREAKPOINT_INSTRUCTION) {
250 /* The breakpoint instruction was removed by
251 * another cpu right after we hit, no further
252 * handling of this interrupt is appropriate
254 regs->eip -= sizeof(kprobe_opcode_t);
258 p = __get_cpu_var(current_kprobe);
259 if (p->break_handler && p->break_handler(p, regs)) {
266 p = get_kprobe(addr);
268 if (regs->eflags & VM_MASK) {
269 /* We are in virtual-8086 mode. Return 0 */
273 if (*addr != BREAKPOINT_INSTRUCTION) {
275 * The breakpoint instruction was removed right
276 * after we hit it. Another cpu has removed
277 * either a probepoint or a debugger breakpoint
278 * at this address. In either case, no further
279 * handling of this interrupt is appropriate.
280 * Back up over the (now missing) int3 and run
281 * the original instruction.
283 regs->eip -= sizeof(kprobe_opcode_t);
286 /* Not one of ours: let kernel handle it */
290 set_current_kprobe(p, regs, kcb);
291 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
293 if (p->pre_handler && p->pre_handler(p, regs))
294 /* handler has already set things up, so skip ss setup */
297 if (p->ainsn.boostable == 1 &&
298 #ifdef CONFIG_PREEMPT
299 !(pre_preempt_count) && /*
300 * This enables booster when the direct
301 * execution path aren't preempted.
303 #endif /* CONFIG_PREEMPT */
304 !p->post_handler && !p->break_handler ) {
305 /* Boost up -- we can execute copied instructions directly */
306 reset_current_kprobe();
307 regs->eip = (unsigned long)p->ainsn.insn;
308 preempt_enable_no_resched();
313 prepare_singlestep(p, regs);
314 kcb->kprobe_status = KPROBE_HIT_SS;
318 preempt_enable_no_resched();
323 * For function-return probes, init_kprobes() establishes a probepoint
324 * here. When a retprobed function returns, this probe is hit and
325 * trampoline_probe_handler() runs, calling the kretprobe's handler.
327 void __kprobes kretprobe_trampoline_holder(void)
329 asm volatile ( ".global kretprobe_trampoline\n"
330 "kretprobe_trampoline: \n"
332 /* skip cs, eip, orig_eax, es, ds */
342 " call trampoline_handler\n"
343 /* move eflags to cs */
344 " movl 48(%esp), %edx\n"
345 " movl %edx, 44(%esp)\n"
346 /* save true return address on eflags */
347 " movl %eax, 48(%esp)\n"
355 /* skip eip, orig_eax, es, ds */
362 * Called from kretprobe_trampoline
364 fastcall void *__kprobes trampoline_handler(struct pt_regs *regs)
366 struct kretprobe_instance *ri = NULL;
367 struct hlist_head *head;
368 struct hlist_node *node, *tmp;
369 unsigned long flags, orig_ret_address = 0;
370 unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
372 spin_lock_irqsave(&kretprobe_lock, flags);
373 head = kretprobe_inst_table_head(current);
376 * It is possible to have multiple instances associated with a given
377 * task either because an multiple functions in the call path
378 * have a return probe installed on them, and/or more then one return
379 * return probe was registered for a target function.
381 * We can handle this because:
382 * - instances are always inserted at the head of the list
383 * - when multiple return probes are registered for the same
384 * function, the first instance's ret_addr will point to the
385 * real return address, and all the rest will point to
386 * kretprobe_trampoline
388 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
389 if (ri->task != current)
390 /* another task is sharing our hash bucket */
393 if (ri->rp && ri->rp->handler){
394 __get_cpu_var(current_kprobe) = &ri->rp->kp;
395 ri->rp->handler(ri, regs);
396 __get_cpu_var(current_kprobe) = NULL;
399 orig_ret_address = (unsigned long)ri->ret_addr;
402 if (orig_ret_address != trampoline_address)
404 * This is the real return address. Any other
405 * instances associated with this task are for
406 * other calls deeper on the call stack
411 BUG_ON(!orig_ret_address || (orig_ret_address == trampoline_address));
413 spin_unlock_irqrestore(&kretprobe_lock, flags);
415 return (void*)orig_ret_address;
419 * Called after single-stepping. p->addr is the address of the
420 * instruction whose first byte has been replaced by the "int 3"
421 * instruction. To avoid the SMP problems that can occur when we
422 * temporarily put back the original opcode to single-step, we
423 * single-stepped a copy of the instruction. The address of this
424 * copy is p->ainsn.insn.
426 * This function prepares to return from the post-single-step
427 * interrupt. We have to fix up the stack as follows:
429 * 0) Except in the case of absolute or indirect jump or call instructions,
430 * the new eip is relative to the copied instruction. We need to make
431 * it relative to the original instruction.
433 * 1) If the single-stepped instruction was pushfl, then the TF and IF
434 * flags are set in the just-pushed eflags, and may need to be cleared.
436 * 2) If the single-stepped instruction was a call, the return address
437 * that is atop the stack is the address following the copied instruction.
438 * We need to make it the address following the original instruction.
440 * This function also checks instruction size for preparing direct execution.
442 static void __kprobes resume_execution(struct kprobe *p,
443 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
445 unsigned long *tos = (unsigned long *)®s->esp;
446 unsigned long copy_eip = (unsigned long)p->ainsn.insn;
447 unsigned long orig_eip = (unsigned long)p->addr;
449 regs->eflags &= ~TF_MASK;
450 switch (p->ainsn.insn[0]) {
451 case 0x9c: /* pushfl */
452 *tos &= ~(TF_MASK | IF_MASK);
453 *tos |= kcb->kprobe_old_eflags;
455 case 0xc3: /* ret/lret */
459 case 0xea: /* jmp absolute -- eip is correct */
460 /* eip is already adjusted, no more changes required */
461 p->ainsn.boostable = 1;
463 case 0xe8: /* call relative - Fix return addr */
464 *tos = orig_eip + (*tos - copy_eip);
467 if ((p->ainsn.insn[1] & 0x30) == 0x10) {
468 /* call absolute, indirect */
470 * Fix return addr; eip is correct.
471 * But this is not boostable
473 *tos = orig_eip + (*tos - copy_eip);
475 } else if (((p->ainsn.insn[1] & 0x31) == 0x20) || /* jmp near, absolute indirect */
476 ((p->ainsn.insn[1] & 0x31) == 0x21)) { /* jmp far, absolute indirect */
477 /* eip is correct. And this is boostable */
478 p->ainsn.boostable = 1;
485 if (p->ainsn.boostable == 0) {
486 if ((regs->eip > copy_eip) &&
487 (regs->eip - copy_eip) + 5 < MAX_INSN_SIZE) {
489 * These instructions can be executed directly if it
490 * jumps back to correct address.
492 set_jmp_op((void *)regs->eip,
493 (void *)orig_eip + (regs->eip - copy_eip));
494 p->ainsn.boostable = 1;
496 p->ainsn.boostable = -1;
500 regs->eip = orig_eip + (regs->eip - copy_eip);
507 * Interrupts are disabled on entry as trap1 is an interrupt gate and they
508 * remain disabled thoroughout this function.
510 static inline int post_kprobe_handler(struct pt_regs *regs)
512 struct kprobe *cur = kprobe_running();
513 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
518 if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
519 kcb->kprobe_status = KPROBE_HIT_SSDONE;
520 cur->post_handler(cur, regs, 0);
523 resume_execution(cur, regs, kcb);
524 regs->eflags |= kcb->kprobe_saved_eflags;
526 /*Restore back the original saved kprobes variables and continue. */
527 if (kcb->kprobe_status == KPROBE_REENTER) {
528 restore_previous_kprobe(kcb);
531 reset_current_kprobe();
533 preempt_enable_no_resched();
536 * if somebody else is singlestepping across a probe point, eflags
537 * will have TF set, in which case, continue the remaining processing
538 * of do_debug, as if this is not a probe hit.
540 if (regs->eflags & TF_MASK)
546 static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
548 struct kprobe *cur = kprobe_running();
549 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
551 switch(kcb->kprobe_status) {
555 * We are here because the instruction being single
556 * stepped caused a page fault. We reset the current
557 * kprobe and the eip points back to the probe address
558 * and allow the page fault handler to continue as a
561 regs->eip = (unsigned long)cur->addr;
562 regs->eflags |= kcb->kprobe_old_eflags;
563 if (kcb->kprobe_status == KPROBE_REENTER)
564 restore_previous_kprobe(kcb);
566 reset_current_kprobe();
567 preempt_enable_no_resched();
569 case KPROBE_HIT_ACTIVE:
570 case KPROBE_HIT_SSDONE:
572 * We increment the nmissed count for accounting,
573 * we can also use npre/npostfault count for accouting
574 * these specific fault cases.
576 kprobes_inc_nmissed_count(cur);
579 * We come here because instructions in the pre/post
580 * handler caused the page_fault, this could happen
581 * if handler tries to access user space by
582 * copy_from_user(), get_user() etc. Let the
583 * user-specified handler try to fix it first.
585 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
589 * In case the user-specified fault handler returned
590 * zero, try to fix up.
592 if (fixup_exception(regs))
596 * fixup_exception() could not handle it,
597 * Let do_page_fault() fix it.
607 * Wrapper routine to for handling exceptions.
609 int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
610 unsigned long val, void *data)
612 struct die_args *args = (struct die_args *)data;
613 int ret = NOTIFY_DONE;
615 if (args->regs && user_mode(args->regs))
620 if (kprobe_handler(args->regs))
624 if (post_kprobe_handler(args->regs))
629 /* kprobe_running() needs smp_processor_id() */
631 if (kprobe_running() &&
632 kprobe_fault_handler(args->regs, args->trapnr))
642 int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
644 struct jprobe *jp = container_of(p, struct jprobe, kp);
646 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
648 kcb->jprobe_saved_regs = *regs;
649 kcb->jprobe_saved_esp = ®s->esp;
650 addr = (unsigned long)(kcb->jprobe_saved_esp);
653 * TBD: As Linus pointed out, gcc assumes that the callee
654 * owns the argument space and could overwrite it, e.g.
655 * tailcall optimization. So, to be absolutely safe
656 * we also save and restore enough stack bytes to cover
659 memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr,
660 MIN_STACK_SIZE(addr));
661 regs->eflags &= ~IF_MASK;
662 regs->eip = (unsigned long)(jp->entry);
666 void __kprobes jprobe_return(void)
668 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
670 asm volatile (" xchgl %%ebx,%%esp \n"
672 " .globl jprobe_return_end \n"
673 " jprobe_return_end: \n"
675 (kcb->jprobe_saved_esp):"memory");
678 int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
680 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
681 u8 *addr = (u8 *) (regs->eip - 1);
682 unsigned long stack_addr = (unsigned long)(kcb->jprobe_saved_esp);
683 struct jprobe *jp = container_of(p, struct jprobe, kp);
685 if ((addr > (u8 *) jprobe_return) && (addr < (u8 *) jprobe_return_end)) {
686 if (®s->esp != kcb->jprobe_saved_esp) {
687 struct pt_regs *saved_regs =
688 container_of(kcb->jprobe_saved_esp,
689 struct pt_regs, esp);
690 printk("current esp %p does not match saved esp %p\n",
691 ®s->esp, kcb->jprobe_saved_esp);
692 printk("Saved registers for jprobe %p\n", jp);
693 show_registers(saved_regs);
694 printk("Current registers\n");
695 show_registers(regs);
698 *regs = kcb->jprobe_saved_regs;
699 memcpy((kprobe_opcode_t *) stack_addr, kcb->jprobes_stack,
700 MIN_STACK_SIZE(stack_addr));
701 preempt_enable_no_resched();
707 int __init arch_init_kprobes(void)