2 * Kernel Probes (KProbes)
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * Copyright (C) IBM Corporation, 2002, 2006
20 * s390 port, used ppc64 as template. Mike Grundy <grundym@us.ibm.com>
23 #include <linux/kprobes.h>
24 #include <linux/ptrace.h>
25 #include <linux/preempt.h>
26 #include <linux/stop_machine.h>
27 #include <asm/cacheflush.h>
28 #include <asm/kdebug.h>
29 #include <asm/sections.h>
30 #include <asm/uaccess.h>
31 #include <linux/module.h>
33 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
34 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
36 int __kprobes arch_prepare_kprobe(struct kprobe *p)
38 /* Make sure the probe isn't going on a difficult instruction */
39 if (is_prohibited_opcode((kprobe_opcode_t *) p->addr))
42 if ((unsigned long)p->addr & 0x01) {
43 printk("Attempt to register kprobe at an unaligned address\n");
47 /* Use the get_insn_slot() facility for correctness */
48 if (!(p->ainsn.insn = get_insn_slot()))
51 memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
53 get_instruction_type(&p->ainsn);
58 int __kprobes is_prohibited_opcode(kprobe_opcode_t *instruction)
60 switch (*(__u8 *) instruction) {
61 case 0x0c: /* bassm */
67 switch (*(__u16 *) instruction) {
69 case 0xb25a: /* bsa */
70 case 0xb240: /* bakr */
71 case 0xb258: /* bsg */
79 void __kprobes get_instruction_type(struct arch_specific_insn *ainsn)
81 /* default fixup method */
82 ainsn->fixup = FIXUP_PSW_NORMAL;
85 ainsn->reg = (*ainsn->insn & 0xf0) >> 4;
87 /* save the instruction length (pop 5-5) in bytes */
88 switch (*(__u8 *) (ainsn->insn) >> 4) {
101 switch (*(__u8 *) ainsn->insn) {
102 case 0x05: /* balr */
103 case 0x0d: /* basr */
104 ainsn->fixup = FIXUP_RETURN_REGISTER;
105 /* if r2 = 0, no branch will be taken */
106 if ((*ainsn->insn & 0x0f) == 0)
107 ainsn->fixup |= FIXUP_BRANCH_NOT_TAKEN;
109 case 0x06: /* bctr */
111 ainsn->fixup = FIXUP_BRANCH_NOT_TAKEN;
115 ainsn->fixup = FIXUP_RETURN_REGISTER;
120 case 0x87: /* bxle */
121 ainsn->fixup = FIXUP_BRANCH_NOT_TAKEN;
123 case 0x82: /* lpsw */
124 ainsn->fixup = FIXUP_NOT_REQUIRED;
126 case 0xb2: /* lpswe */
127 if (*(((__u8 *) ainsn->insn) + 1) == 0xb2) {
128 ainsn->fixup = FIXUP_NOT_REQUIRED;
131 case 0xa7: /* bras */
132 if ((*ainsn->insn & 0x0f) == 0x05) {
133 ainsn->fixup |= FIXUP_RETURN_REGISTER;
137 if ((*ainsn->insn & 0x0f) == 0x00 /* larl */
138 || (*ainsn->insn & 0x0f) == 0x05) /* brasl */
139 ainsn->fixup |= FIXUP_RETURN_REGISTER;
142 if (*(((__u8 *) ainsn->insn) + 5 ) == 0x44 || /* bxhg */
143 *(((__u8 *) ainsn->insn) + 5) == 0x45) {/* bxleg */
144 ainsn->fixup = FIXUP_BRANCH_NOT_TAKEN;
147 case 0xe3: /* bctg */
148 if (*(((__u8 *) ainsn->insn) + 5) == 0x46) {
149 ainsn->fixup = FIXUP_BRANCH_NOT_TAKEN;
155 static int __kprobes swap_instruction(void *aref)
157 struct ins_replace_args *args = aref;
163 * Text segment is read-only, hence we use stura to bypass dynamic
164 * address translation to exchange the instruction. Since stura
165 * always operates on four bytes, but we only want to exchange two
166 * bytes do some calculations to get things right. In addition we
167 * shall not cross any page boundaries (vmalloc area!) when writing
168 * the new instruction.
170 addr = (u32 *)ALIGN((unsigned long)args->ptr, 4);
171 if ((unsigned long)args->ptr & 2)
172 instr = ((*addr) & 0xffff0000) | args->new;
174 instr = ((*addr) & 0x0000ffff) | args->new << 16;
183 : "a" (addr), "d" (instr)
189 void __kprobes arch_arm_kprobe(struct kprobe *p)
191 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
192 unsigned long status = kcb->kprobe_status;
193 struct ins_replace_args args;
196 args.old = p->opcode;
197 args.new = BREAKPOINT_INSTRUCTION;
199 kcb->kprobe_status = KPROBE_SWAP_INST;
200 stop_machine_run(swap_instruction, &args, NR_CPUS);
201 kcb->kprobe_status = status;
204 void __kprobes arch_disarm_kprobe(struct kprobe *p)
206 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
207 unsigned long status = kcb->kprobe_status;
208 struct ins_replace_args args;
211 args.old = BREAKPOINT_INSTRUCTION;
212 args.new = p->opcode;
214 kcb->kprobe_status = KPROBE_SWAP_INST;
215 stop_machine_run(swap_instruction, &args, NR_CPUS);
216 kcb->kprobe_status = status;
219 void __kprobes arch_remove_kprobe(struct kprobe *p)
221 mutex_lock(&kprobe_mutex);
222 free_insn_slot(p->ainsn.insn, 0);
223 mutex_unlock(&kprobe_mutex);
226 static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
228 per_cr_bits kprobe_per_regs[1];
230 memset(kprobe_per_regs, 0, sizeof(per_cr_bits));
231 regs->psw.addr = (unsigned long)p->ainsn.insn | PSW_ADDR_AMODE;
233 /* Set up the per control reg info, will pass to lctl */
234 kprobe_per_regs[0].em_instruction_fetch = 1;
235 kprobe_per_regs[0].starting_addr = (unsigned long)p->ainsn.insn;
236 kprobe_per_regs[0].ending_addr = (unsigned long)p->ainsn.insn + 1;
238 /* Set the PER control regs, turns on single step for this address */
239 __ctl_load(kprobe_per_regs, 9, 11);
240 regs->psw.mask |= PSW_MASK_PER;
241 regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK);
244 static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
246 kcb->prev_kprobe.kp = kprobe_running();
247 kcb->prev_kprobe.status = kcb->kprobe_status;
248 kcb->prev_kprobe.kprobe_saved_imask = kcb->kprobe_saved_imask;
249 memcpy(kcb->prev_kprobe.kprobe_saved_ctl, kcb->kprobe_saved_ctl,
250 sizeof(kcb->kprobe_saved_ctl));
253 static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
255 __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
256 kcb->kprobe_status = kcb->prev_kprobe.status;
257 kcb->kprobe_saved_imask = kcb->prev_kprobe.kprobe_saved_imask;
258 memcpy(kcb->kprobe_saved_ctl, kcb->prev_kprobe.kprobe_saved_ctl,
259 sizeof(kcb->kprobe_saved_ctl));
262 static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
263 struct kprobe_ctlblk *kcb)
265 __get_cpu_var(current_kprobe) = p;
266 /* Save the interrupt and per flags */
267 kcb->kprobe_saved_imask = regs->psw.mask &
268 (PSW_MASK_PER | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK);
269 /* Save the control regs that govern PER */
270 __ctl_store(kcb->kprobe_saved_ctl, 9, 11);
273 /* Called with kretprobe_lock held */
274 void __kprobes arch_prepare_kretprobe(struct kretprobe *rp,
275 struct pt_regs *regs)
277 struct kretprobe_instance *ri;
279 if ((ri = get_free_rp_inst(rp)) != NULL) {
282 ri->ret_addr = (kprobe_opcode_t *) regs->gprs[14];
284 /* Replace the return addr with trampoline addr */
285 regs->gprs[14] = (unsigned long)&kretprobe_trampoline;
293 static int __kprobes kprobe_handler(struct pt_regs *regs)
297 unsigned long *addr = (unsigned long *)
298 ((regs->psw.addr & PSW_ADDR_INSN) - 2);
299 struct kprobe_ctlblk *kcb;
302 * We don't want to be preempted for the entire
303 * duration of kprobe processing
306 kcb = get_kprobe_ctlblk();
308 /* Check we're not actually recursing */
309 if (kprobe_running()) {
310 p = get_kprobe(addr);
312 if (kcb->kprobe_status == KPROBE_HIT_SS &&
313 *p->ainsn.insn == BREAKPOINT_INSTRUCTION) {
314 regs->psw.mask &= ~PSW_MASK_PER;
315 regs->psw.mask |= kcb->kprobe_saved_imask;
318 /* We have reentered the kprobe_handler(), since
319 * another probe was hit while within the handler.
320 * We here save the original kprobes variables and
321 * just single step on the instruction of the new probe
322 * without calling any user handlers.
324 save_previous_kprobe(kcb);
325 set_current_kprobe(p, regs, kcb);
326 kprobes_inc_nmissed_count(p);
327 prepare_singlestep(p, regs);
328 kcb->kprobe_status = KPROBE_REENTER;
331 p = __get_cpu_var(current_kprobe);
332 if (p->break_handler && p->break_handler(p, regs)) {
339 p = get_kprobe(addr);
341 if (*addr != BREAKPOINT_INSTRUCTION) {
343 * The breakpoint instruction was removed right
344 * after we hit it. Another cpu has removed
345 * either a probepoint or a debugger breakpoint
346 * at this address. In either case, no further
347 * handling of this interrupt is appropriate.
352 /* Not one of ours: let kernel handle it */
356 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
357 set_current_kprobe(p, regs, kcb);
358 if (p->pre_handler && p->pre_handler(p, regs))
359 /* handler has already set things up, so skip ss setup */
363 prepare_singlestep(p, regs);
364 kcb->kprobe_status = KPROBE_HIT_SS;
368 preempt_enable_no_resched();
373 * Function return probe trampoline:
374 * - init_kprobes() establishes a probepoint here
375 * - When the probed function returns, this probe
376 * causes the handlers to fire
378 void kretprobe_trampoline_holder(void)
380 asm volatile(".global kretprobe_trampoline\n"
381 "kretprobe_trampoline: bcr 0,0\n");
385 * Called when the probe at kretprobe trampoline is hit
387 static int __kprobes trampoline_probe_handler(struct kprobe *p,
388 struct pt_regs *regs)
390 struct kretprobe_instance *ri = NULL;
391 struct hlist_head *head, empty_rp;
392 struct hlist_node *node, *tmp;
393 unsigned long flags, orig_ret_address = 0;
394 unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
396 INIT_HLIST_HEAD(&empty_rp);
397 spin_lock_irqsave(&kretprobe_lock, flags);
398 head = kretprobe_inst_table_head(current);
401 * It is possible to have multiple instances associated with a given
402 * task either because an multiple functions in the call path
403 * have a return probe installed on them, and/or more then one return
404 * return probe was registered for a target function.
406 * We can handle this because:
407 * - instances are always inserted at the head of the list
408 * - when multiple return probes are registered for the same
409 * function, the first instance's ret_addr will point to the
410 * real return address, and all the rest will point to
411 * kretprobe_trampoline
413 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
414 if (ri->task != current)
415 /* another task is sharing our hash bucket */
418 if (ri->rp && ri->rp->handler)
419 ri->rp->handler(ri, regs);
421 orig_ret_address = (unsigned long)ri->ret_addr;
422 recycle_rp_inst(ri, &empty_rp);
424 if (orig_ret_address != trampoline_address) {
426 * This is the real return address. Any other
427 * instances associated with this task are for
428 * other calls deeper on the call stack
433 BUG_ON(!orig_ret_address || (orig_ret_address == trampoline_address));
434 regs->psw.addr = orig_ret_address | PSW_ADDR_AMODE;
436 reset_current_kprobe();
437 spin_unlock_irqrestore(&kretprobe_lock, flags);
438 preempt_enable_no_resched();
440 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
441 hlist_del(&ri->hlist);
445 * By returning a non-zero value, we are telling
446 * kprobe_handler() that we don't want the post_handler
447 * to run (and have re-enabled preemption)
453 * Called after single-stepping. p->addr is the address of the
454 * instruction whose first byte has been replaced by the "breakpoint"
455 * instruction. To avoid the SMP problems that can occur when we
456 * temporarily put back the original opcode to single-step, we
457 * single-stepped a copy of the instruction. The address of this
458 * copy is p->ainsn.insn.
460 static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
462 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
464 regs->psw.addr &= PSW_ADDR_INSN;
466 if (p->ainsn.fixup & FIXUP_PSW_NORMAL)
467 regs->psw.addr = (unsigned long)p->addr +
468 ((unsigned long)regs->psw.addr -
469 (unsigned long)p->ainsn.insn);
471 if (p->ainsn.fixup & FIXUP_BRANCH_NOT_TAKEN)
472 if ((unsigned long)regs->psw.addr -
473 (unsigned long)p->ainsn.insn == p->ainsn.ilen)
474 regs->psw.addr = (unsigned long)p->addr + p->ainsn.ilen;
476 if (p->ainsn.fixup & FIXUP_RETURN_REGISTER)
477 regs->gprs[p->ainsn.reg] = ((unsigned long)p->addr +
478 (regs->gprs[p->ainsn.reg] -
479 (unsigned long)p->ainsn.insn))
482 regs->psw.addr |= PSW_ADDR_AMODE;
483 /* turn off PER mode */
484 regs->psw.mask &= ~PSW_MASK_PER;
485 /* Restore the original per control regs */
486 __ctl_load(kcb->kprobe_saved_ctl, 9, 11);
487 regs->psw.mask |= kcb->kprobe_saved_imask;
490 static int __kprobes post_kprobe_handler(struct pt_regs *regs)
492 struct kprobe *cur = kprobe_running();
493 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
498 if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
499 kcb->kprobe_status = KPROBE_HIT_SSDONE;
500 cur->post_handler(cur, regs, 0);
503 resume_execution(cur, regs);
505 /*Restore back the original saved kprobes variables and continue. */
506 if (kcb->kprobe_status == KPROBE_REENTER) {
507 restore_previous_kprobe(kcb);
510 reset_current_kprobe();
512 preempt_enable_no_resched();
515 * if somebody else is singlestepping across a probe point, psw mask
516 * will have PER set, in which case, continue the remaining processing
517 * of do_single_step, as if this is not a probe hit.
519 if (regs->psw.mask & PSW_MASK_PER) {
526 static int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
528 struct kprobe *cur = kprobe_running();
529 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
530 const struct exception_table_entry *entry;
532 switch(kcb->kprobe_status) {
533 case KPROBE_SWAP_INST:
534 /* We are here because the instruction replacement failed */
539 * We are here because the instruction being single
540 * stepped caused a page fault. We reset the current
541 * kprobe and the nip points back to the probe address
542 * and allow the page fault handler to continue as a
545 regs->psw.addr = (unsigned long)cur->addr | PSW_ADDR_AMODE;
546 regs->psw.mask &= ~PSW_MASK_PER;
547 regs->psw.mask |= kcb->kprobe_saved_imask;
548 if (kcb->kprobe_status == KPROBE_REENTER)
549 restore_previous_kprobe(kcb);
551 reset_current_kprobe();
552 preempt_enable_no_resched();
554 case KPROBE_HIT_ACTIVE:
555 case KPROBE_HIT_SSDONE:
557 * We increment the nmissed count for accounting,
558 * we can also use npre/npostfault count for accouting
559 * these specific fault cases.
561 kprobes_inc_nmissed_count(cur);
564 * We come here because instructions in the pre/post
565 * handler caused the page_fault, this could happen
566 * if handler tries to access user space by
567 * copy_from_user(), get_user() etc. Let the
568 * user-specified handler try to fix it first.
570 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
574 * In case the user-specified fault handler returned
575 * zero, try to fix up.
577 entry = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
579 regs->psw.addr = entry->fixup | PSW_ADDR_AMODE;
584 * fixup_exception() could not handle it,
585 * Let do_page_fault() fix it.
595 * Wrapper routine to for handling exceptions.
597 int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
598 unsigned long val, void *data)
600 struct die_args *args = (struct die_args *)data;
601 int ret = NOTIFY_DONE;
605 if (kprobe_handler(args->regs))
609 if (post_kprobe_handler(args->regs))
614 /* kprobe_running() needs smp_processor_id() */
616 if (kprobe_running() &&
617 kprobe_fault_handler(args->regs, args->trapnr))
627 int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
629 struct jprobe *jp = container_of(p, struct jprobe, kp);
631 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
633 memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs));
635 /* setup return addr to the jprobe handler routine */
636 regs->psw.addr = (unsigned long)(jp->entry) | PSW_ADDR_AMODE;
638 /* r14 is the function return address */
639 kcb->jprobe_saved_r14 = (unsigned long)regs->gprs[14];
640 /* r15 is the stack pointer */
641 kcb->jprobe_saved_r15 = (unsigned long)regs->gprs[15];
642 addr = (unsigned long)kcb->jprobe_saved_r15;
644 memcpy(kcb->jprobes_stack, (kprobe_opcode_t *) addr,
645 MIN_STACK_SIZE(addr));
649 void __kprobes jprobe_return(void)
651 asm volatile(".word 0x0002");
654 void __kprobes jprobe_return_end(void)
656 asm volatile("bcr 0,0");
659 int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
661 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
662 unsigned long stack_addr = (unsigned long)(kcb->jprobe_saved_r15);
664 /* Put the regs back */
665 memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs));
666 /* put the stack back */
667 memcpy((kprobe_opcode_t *) stack_addr, kcb->jprobes_stack,
668 MIN_STACK_SIZE(stack_addr));
669 preempt_enable_no_resched();
673 static struct kprobe trampoline_p = {
674 .addr = (kprobe_opcode_t *) & kretprobe_trampoline,
675 .pre_handler = trampoline_probe_handler
678 int __init arch_init_kprobes(void)
680 return register_kprobe(&trampoline_p);