2 * Kernel Probes (KProbes)
3 * arch/i386/kernel/kprobes.c
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 * Copyright (C) IBM Corporation, 2002, 2004
21 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
22 * Probes initial implementation ( includes contributions from
24 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
25 * interface to access function arguments.
26 * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston
27 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
28 * <prasanna@in.ibm.com> added function-return probes.
31 #include <linux/kprobes.h>
32 #include <linux/ptrace.h>
33 #include <linux/preempt.h>
34 #include <linux/kdebug.h>
35 #include <asm/cacheflush.h>
37 #include <asm/uaccess.h>
39 void jprobe_return_end(void);
41 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
42 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
44 /* insert a jmp code */
45 static __always_inline void set_jmp_op(void *from, void *to)
47 struct __arch_jmp_op {
50 } __attribute__((packed)) *jop;
51 jop = (struct __arch_jmp_op *)from;
52 jop->raddr = (long)(to) - ((long)(from) + 5);
53 jop->op = RELATIVEJUMP_INSTRUCTION;
57 * returns non-zero if opcodes can be boosted.
59 static __always_inline int can_boost(kprobe_opcode_t *opcodes)
61 #define W(row,b0,b1,b2,b3,b4,b5,b6,b7,b8,b9,ba,bb,bc,bd,be,bf) \
62 (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \
63 (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) | \
64 (b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) | \
65 (bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf)) \
68 * Undefined/reserved opcodes, conditional jump, Opcode Extension
69 * Groups, and some special opcodes can not be boost.
71 static const unsigned long twobyte_is_boostable[256 / 32] = {
72 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
73 /* ------------------------------- */
74 W(0x00, 0,0,1,1,0,0,1,0,1,1,0,0,0,0,0,0)| /* 00 */
75 W(0x10, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), /* 10 */
76 W(0x20, 1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0)| /* 20 */
77 W(0x30, 0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0), /* 30 */
78 W(0x40, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* 40 */
79 W(0x50, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), /* 50 */
80 W(0x60, 1,1,1,1,1,1,1,1,1,1,1,1,0,0,1,1)| /* 60 */
81 W(0x70, 0,0,0,0,1,1,1,1,0,0,0,0,0,0,1,1), /* 70 */
82 W(0x80, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)| /* 80 */
83 W(0x90, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1), /* 90 */
84 W(0xa0, 1,1,0,1,1,1,0,0,1,1,0,1,1,1,0,1)| /* a0 */
85 W(0xb0, 1,1,1,1,1,1,1,1,0,0,0,1,1,1,1,1), /* b0 */
86 W(0xc0, 1,1,0,0,0,0,0,0,1,1,1,1,1,1,1,1)| /* c0 */
87 W(0xd0, 0,1,1,1,0,1,0,0,1,1,0,1,1,1,0,1), /* d0 */
88 W(0xe0, 0,1,1,0,0,1,0,0,1,1,0,1,1,1,0,1)| /* e0 */
89 W(0xf0, 0,1,1,1,0,1,0,0,1,1,1,0,1,1,1,0) /* f0 */
90 /* ------------------------------- */
91 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
94 kprobe_opcode_t opcode;
95 kprobe_opcode_t *orig_opcodes = opcodes;
97 if (opcodes - orig_opcodes > MAX_INSN_SIZE - 1)
99 opcode = *(opcodes++);
101 /* 2nd-byte opcode */
102 if (opcode == 0x0f) {
103 if (opcodes - orig_opcodes > MAX_INSN_SIZE - 1)
105 return test_bit(*opcodes, twobyte_is_boostable);
108 switch (opcode & 0xf0) {
110 if (0x63 < opcode && opcode < 0x67)
111 goto retry; /* prefixes */
112 /* can't boost Address-size override and bound */
113 return (opcode != 0x62 && opcode != 0x67);
115 return 0; /* can't boost conditional jump */
117 /* can't boost software-interruptions */
118 return (0xc1 < opcode && opcode < 0xcc) || opcode == 0xcf;
120 /* can boost AA* and XLAT */
121 return (opcode == 0xd4 || opcode == 0xd5 || opcode == 0xd7);
123 /* can boost in/out and absolute jmps */
124 return ((opcode & 0x04) || opcode == 0xea);
126 if ((opcode & 0x0c) == 0 && opcode != 0xf1)
127 goto retry; /* lock/rep(ne) prefix */
128 /* clear and set flags can be boost */
129 return (opcode == 0xf5 || (0xf7 < opcode && opcode < 0xfe));
131 if (opcode == 0x26 || opcode == 0x36 || opcode == 0x3e)
132 goto retry; /* prefixes */
133 /* can't boost CS override and call */
134 return (opcode != 0x2e && opcode != 0x9a);
139 * returns non-zero if opcode modifies the interrupt flag.
141 static int __kprobes is_IF_modifier(kprobe_opcode_t opcode)
146 case 0xcf: /* iret/iretd */
147 case 0x9d: /* popf/popfd */
153 int __kprobes arch_prepare_kprobe(struct kprobe *p)
155 /* insn: must be on special executable page on i386. */
156 p->ainsn.insn = get_insn_slot();
160 memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
161 p->opcode = *p->addr;
162 if (can_boost(p->addr)) {
163 p->ainsn.boostable = 0;
165 p->ainsn.boostable = -1;
170 void __kprobes arch_arm_kprobe(struct kprobe *p)
172 *p->addr = BREAKPOINT_INSTRUCTION;
173 flush_icache_range((unsigned long) p->addr,
174 (unsigned long) p->addr + sizeof(kprobe_opcode_t));
177 void __kprobes arch_disarm_kprobe(struct kprobe *p)
179 *p->addr = p->opcode;
180 flush_icache_range((unsigned long) p->addr,
181 (unsigned long) p->addr + sizeof(kprobe_opcode_t));
184 void __kprobes arch_remove_kprobe(struct kprobe *p)
186 mutex_lock(&kprobe_mutex);
187 free_insn_slot(p->ainsn.insn, (p->ainsn.boostable == 1));
188 mutex_unlock(&kprobe_mutex);
191 static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
193 kcb->prev_kprobe.kp = kprobe_running();
194 kcb->prev_kprobe.status = kcb->kprobe_status;
195 kcb->prev_kprobe.old_eflags = kcb->kprobe_old_eflags;
196 kcb->prev_kprobe.saved_eflags = kcb->kprobe_saved_eflags;
199 static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
201 __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
202 kcb->kprobe_status = kcb->prev_kprobe.status;
203 kcb->kprobe_old_eflags = kcb->prev_kprobe.old_eflags;
204 kcb->kprobe_saved_eflags = kcb->prev_kprobe.saved_eflags;
207 static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
208 struct kprobe_ctlblk *kcb)
210 __get_cpu_var(current_kprobe) = p;
211 kcb->kprobe_saved_eflags = kcb->kprobe_old_eflags
212 = (regs->eflags & (TF_MASK | IF_MASK));
213 if (is_IF_modifier(p->opcode))
214 kcb->kprobe_saved_eflags &= ~IF_MASK;
217 static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
219 regs->eflags |= TF_MASK;
220 regs->eflags &= ~IF_MASK;
221 /*single step inline if the instruction is an int3*/
222 if (p->opcode == BREAKPOINT_INSTRUCTION)
223 regs->eip = (unsigned long)p->addr;
225 regs->eip = (unsigned long)p->ainsn.insn;
228 /* Called with kretprobe_lock held */
229 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
230 struct pt_regs *regs)
232 unsigned long *sara = (unsigned long *)®s->esp;
234 ri->ret_addr = (kprobe_opcode_t *) *sara;
236 /* Replace the return addr with trampoline addr */
237 *sara = (unsigned long) &kretprobe_trampoline;
241 * Interrupts are disabled on entry as trap3 is an interrupt gate and they
242 * remain disabled thorough out this function.
244 static int __kprobes kprobe_handler(struct pt_regs *regs)
248 kprobe_opcode_t *addr;
249 struct kprobe_ctlblk *kcb;
251 addr = (kprobe_opcode_t *)(regs->eip - sizeof(kprobe_opcode_t));
254 * We don't want to be preempted for the entire
255 * duration of kprobe processing
258 kcb = get_kprobe_ctlblk();
260 /* Check we're not actually recursing */
261 if (kprobe_running()) {
262 p = get_kprobe(addr);
264 if (kcb->kprobe_status == KPROBE_HIT_SS &&
265 *p->ainsn.insn == BREAKPOINT_INSTRUCTION) {
266 regs->eflags &= ~TF_MASK;
267 regs->eflags |= kcb->kprobe_saved_eflags;
270 /* We have reentered the kprobe_handler(), since
271 * another probe was hit while within the handler.
272 * We here save the original kprobes variables and
273 * just single step on the instruction of the new probe
274 * without calling any user handlers.
276 save_previous_kprobe(kcb);
277 set_current_kprobe(p, regs, kcb);
278 kprobes_inc_nmissed_count(p);
279 prepare_singlestep(p, regs);
280 kcb->kprobe_status = KPROBE_REENTER;
283 if (*addr != BREAKPOINT_INSTRUCTION) {
284 /* The breakpoint instruction was removed by
285 * another cpu right after we hit, no further
286 * handling of this interrupt is appropriate
288 regs->eip -= sizeof(kprobe_opcode_t);
292 p = __get_cpu_var(current_kprobe);
293 if (p->break_handler && p->break_handler(p, regs)) {
300 p = get_kprobe(addr);
302 if (*addr != BREAKPOINT_INSTRUCTION) {
304 * The breakpoint instruction was removed right
305 * after we hit it. Another cpu has removed
306 * either a probepoint or a debugger breakpoint
307 * at this address. In either case, no further
308 * handling of this interrupt is appropriate.
309 * Back up over the (now missing) int3 and run
310 * the original instruction.
312 regs->eip -= sizeof(kprobe_opcode_t);
315 /* Not one of ours: let kernel handle it */
319 set_current_kprobe(p, regs, kcb);
320 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
322 if (p->pre_handler && p->pre_handler(p, regs))
323 /* handler has already set things up, so skip ss setup */
327 #if !defined(CONFIG_PREEMPT) || defined(CONFIG_PM)
328 if (p->ainsn.boostable == 1 && !p->post_handler){
329 /* Boost up -- we can execute copied instructions directly */
330 reset_current_kprobe();
331 regs->eip = (unsigned long)p->ainsn.insn;
332 preempt_enable_no_resched();
336 prepare_singlestep(p, regs);
337 kcb->kprobe_status = KPROBE_HIT_SS;
341 preempt_enable_no_resched();
346 * For function-return probes, init_kprobes() establishes a probepoint
347 * here. When a retprobed function returns, this probe is hit and
348 * trampoline_probe_handler() runs, calling the kretprobe's handler.
350 void __kprobes kretprobe_trampoline_holder(void)
352 asm volatile ( ".global kretprobe_trampoline\n"
353 "kretprobe_trampoline: \n"
355 /* skip cs, eip, orig_eax */
368 " call trampoline_handler\n"
369 /* move eflags to cs */
370 " movl 52(%esp), %edx\n"
371 " movl %edx, 48(%esp)\n"
372 /* save true return address on eflags */
373 " movl %eax, 52(%esp)\n"
381 /* skip eip, orig_eax, es, ds, fs */
388 * Called from kretprobe_trampoline
390 fastcall void *__kprobes trampoline_handler(struct pt_regs *regs)
392 struct kretprobe_instance *ri = NULL;
393 struct hlist_head *head, empty_rp;
394 struct hlist_node *node, *tmp;
395 unsigned long flags, orig_ret_address = 0;
396 unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
398 INIT_HLIST_HEAD(&empty_rp);
399 spin_lock_irqsave(&kretprobe_lock, flags);
400 head = kretprobe_inst_table_head(current);
401 /* fixup registers */
402 regs->xcs = __KERNEL_CS | get_kernel_rpl();
403 regs->eip = trampoline_address;
404 regs->orig_eax = 0xffffffff;
407 * It is possible to have multiple instances associated with a given
408 * task either because an multiple functions in the call path
409 * have a return probe installed on them, and/or more then one return
410 * return probe was registered for a target function.
412 * We can handle this because:
413 * - instances are always inserted at the head of the list
414 * - when multiple return probes are registered for the same
415 * function, the first instance's ret_addr will point to the
416 * real return address, and all the rest will point to
417 * kretprobe_trampoline
419 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
420 if (ri->task != current)
421 /* another task is sharing our hash bucket */
424 if (ri->rp && ri->rp->handler){
425 __get_cpu_var(current_kprobe) = &ri->rp->kp;
426 get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
427 ri->rp->handler(ri, regs);
428 __get_cpu_var(current_kprobe) = NULL;
431 orig_ret_address = (unsigned long)ri->ret_addr;
432 recycle_rp_inst(ri, &empty_rp);
434 if (orig_ret_address != trampoline_address)
436 * This is the real return address. Any other
437 * instances associated with this task are for
438 * other calls deeper on the call stack
443 kretprobe_assert(ri, orig_ret_address, trampoline_address);
444 spin_unlock_irqrestore(&kretprobe_lock, flags);
446 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
447 hlist_del(&ri->hlist);
450 return (void*)orig_ret_address;
454 * Called after single-stepping. p->addr is the address of the
455 * instruction whose first byte has been replaced by the "int 3"
456 * instruction. To avoid the SMP problems that can occur when we
457 * temporarily put back the original opcode to single-step, we
458 * single-stepped a copy of the instruction. The address of this
459 * copy is p->ainsn.insn.
461 * This function prepares to return from the post-single-step
462 * interrupt. We have to fix up the stack as follows:
464 * 0) Except in the case of absolute or indirect jump or call instructions,
465 * the new eip is relative to the copied instruction. We need to make
466 * it relative to the original instruction.
468 * 1) If the single-stepped instruction was pushfl, then the TF and IF
469 * flags are set in the just-pushed eflags, and may need to be cleared.
471 * 2) If the single-stepped instruction was a call, the return address
472 * that is atop the stack is the address following the copied instruction.
473 * We need to make it the address following the original instruction.
475 * This function also checks instruction size for preparing direct execution.
477 static void __kprobes resume_execution(struct kprobe *p,
478 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
480 unsigned long *tos = (unsigned long *)®s->esp;
481 unsigned long copy_eip = (unsigned long)p->ainsn.insn;
482 unsigned long orig_eip = (unsigned long)p->addr;
484 regs->eflags &= ~TF_MASK;
485 switch (p->ainsn.insn[0]) {
486 case 0x9c: /* pushfl */
487 *tos &= ~(TF_MASK | IF_MASK);
488 *tos |= kcb->kprobe_old_eflags;
490 case 0xc2: /* iret/ret/lret */
495 case 0xea: /* jmp absolute -- eip is correct */
496 /* eip is already adjusted, no more changes required */
497 p->ainsn.boostable = 1;
499 case 0xe8: /* call relative - Fix return addr */
500 *tos = orig_eip + (*tos - copy_eip);
502 case 0x9a: /* call absolute -- same as call absolute, indirect */
503 *tos = orig_eip + (*tos - copy_eip);
506 if ((p->ainsn.insn[1] & 0x30) == 0x10) {
508 * call absolute, indirect
509 * Fix return addr; eip is correct.
510 * But this is not boostable
512 *tos = orig_eip + (*tos - copy_eip);
514 } else if (((p->ainsn.insn[1] & 0x31) == 0x20) || /* jmp near, absolute indirect */
515 ((p->ainsn.insn[1] & 0x31) == 0x21)) { /* jmp far, absolute indirect */
516 /* eip is correct. And this is boostable */
517 p->ainsn.boostable = 1;
524 if (p->ainsn.boostable == 0) {
525 if ((regs->eip > copy_eip) &&
526 (regs->eip - copy_eip) + 5 < MAX_INSN_SIZE) {
528 * These instructions can be executed directly if it
529 * jumps back to correct address.
531 set_jmp_op((void *)regs->eip,
532 (void *)orig_eip + (regs->eip - copy_eip));
533 p->ainsn.boostable = 1;
535 p->ainsn.boostable = -1;
539 regs->eip = orig_eip + (regs->eip - copy_eip);
546 * Interrupts are disabled on entry as trap1 is an interrupt gate and they
547 * remain disabled thoroughout this function.
549 static int __kprobes post_kprobe_handler(struct pt_regs *regs)
551 struct kprobe *cur = kprobe_running();
552 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
557 if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
558 kcb->kprobe_status = KPROBE_HIT_SSDONE;
559 cur->post_handler(cur, regs, 0);
562 resume_execution(cur, regs, kcb);
563 regs->eflags |= kcb->kprobe_saved_eflags;
565 /*Restore back the original saved kprobes variables and continue. */
566 if (kcb->kprobe_status == KPROBE_REENTER) {
567 restore_previous_kprobe(kcb);
570 reset_current_kprobe();
572 preempt_enable_no_resched();
575 * if somebody else is singlestepping across a probe point, eflags
576 * will have TF set, in which case, continue the remaining processing
577 * of do_debug, as if this is not a probe hit.
579 if (regs->eflags & TF_MASK)
585 static int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
587 struct kprobe *cur = kprobe_running();
588 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
590 switch(kcb->kprobe_status) {
594 * We are here because the instruction being single
595 * stepped caused a page fault. We reset the current
596 * kprobe and the eip points back to the probe address
597 * and allow the page fault handler to continue as a
600 regs->eip = (unsigned long)cur->addr;
601 regs->eflags |= kcb->kprobe_old_eflags;
602 if (kcb->kprobe_status == KPROBE_REENTER)
603 restore_previous_kprobe(kcb);
605 reset_current_kprobe();
606 preempt_enable_no_resched();
608 case KPROBE_HIT_ACTIVE:
609 case KPROBE_HIT_SSDONE:
611 * We increment the nmissed count for accounting,
612 * we can also use npre/npostfault count for accouting
613 * these specific fault cases.
615 kprobes_inc_nmissed_count(cur);
618 * We come here because instructions in the pre/post
619 * handler caused the page_fault, this could happen
620 * if handler tries to access user space by
621 * copy_from_user(), get_user() etc. Let the
622 * user-specified handler try to fix it first.
624 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
628 * In case the user-specified fault handler returned
629 * zero, try to fix up.
631 if (fixup_exception(regs))
635 * fixup_exception() could not handle it,
636 * Let do_page_fault() fix it.
646 * Wrapper routine to for handling exceptions.
648 int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
649 unsigned long val, void *data)
651 struct die_args *args = (struct die_args *)data;
652 int ret = NOTIFY_DONE;
654 if (args->regs && user_mode_vm(args->regs))
659 if (kprobe_handler(args->regs))
663 if (post_kprobe_handler(args->regs))
668 /* kprobe_running() needs smp_processor_id() */
670 if (kprobe_running() &&
671 kprobe_fault_handler(args->regs, args->trapnr))
681 int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
683 struct jprobe *jp = container_of(p, struct jprobe, kp);
685 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
687 kcb->jprobe_saved_regs = *regs;
688 kcb->jprobe_saved_esp = ®s->esp;
689 addr = (unsigned long)(kcb->jprobe_saved_esp);
692 * TBD: As Linus pointed out, gcc assumes that the callee
693 * owns the argument space and could overwrite it, e.g.
694 * tailcall optimization. So, to be absolutely safe
695 * we also save and restore enough stack bytes to cover
698 memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr,
699 MIN_STACK_SIZE(addr));
700 regs->eflags &= ~IF_MASK;
701 regs->eip = (unsigned long)(jp->entry);
705 void __kprobes jprobe_return(void)
707 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
709 asm volatile (" xchgl %%ebx,%%esp \n"
711 " .globl jprobe_return_end \n"
712 " jprobe_return_end: \n"
714 (kcb->jprobe_saved_esp):"memory");
717 int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
719 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
720 u8 *addr = (u8 *) (regs->eip - 1);
721 unsigned long stack_addr = (unsigned long)(kcb->jprobe_saved_esp);
722 struct jprobe *jp = container_of(p, struct jprobe, kp);
724 if ((addr > (u8 *) jprobe_return) && (addr < (u8 *) jprobe_return_end)) {
725 if (®s->esp != kcb->jprobe_saved_esp) {
726 struct pt_regs *saved_regs =
727 container_of(kcb->jprobe_saved_esp,
728 struct pt_regs, esp);
729 printk("current esp %p does not match saved esp %p\n",
730 ®s->esp, kcb->jprobe_saved_esp);
731 printk("Saved registers for jprobe %p\n", jp);
732 show_registers(saved_regs);
733 printk("Current registers\n");
734 show_registers(regs);
737 *regs = kcb->jprobe_saved_regs;
738 memcpy((kprobe_opcode_t *) stack_addr, kcb->jprobes_stack,
739 MIN_STACK_SIZE(stack_addr));
740 preempt_enable_no_resched();
746 int __kprobes arch_trampoline_kprobe(struct kprobe *p)
751 int __init arch_init_kprobes(void)