Merge git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-for-linus
[linux-2.6] / kernel / kprobes.c
1 /*
2  *  Kernel Probes (KProbes)
3  *  kernel/kprobes.c
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18  *
19  * Copyright (C) IBM Corporation, 2002, 2004
20  *
21  * 2002-Oct     Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
22  *              Probes initial implementation (includes suggestions from
23  *              Rusty Russell).
24  * 2004-Aug     Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
25  *              hlists and exceptions notifier as suggested by Andi Kleen.
26  * 2004-July    Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
27  *              interface to access function arguments.
28  * 2004-Sep     Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
29  *              exceptions notifier to be first on the priority list.
30  * 2005-May     Hien Nguyen <hien@us.ibm.com>, Jim Keniston
31  *              <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
32  *              <prasanna@in.ibm.com> added function-return probes.
33  */
34 #include <linux/kprobes.h>
35 #include <linux/hash.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/stddef.h>
39 #include <linux/module.h>
40 #include <linux/moduleloader.h>
41 #include <linux/kallsyms.h>
42 #include <linux/freezer.h>
43 #include <linux/seq_file.h>
44 #include <linux/debugfs.h>
45 #include <linux/kdebug.h>
46
47 #include <asm-generic/sections.h>
48 #include <asm/cacheflush.h>
49 #include <asm/errno.h>
50 #include <asm/uaccess.h>
51
52 #define KPROBE_HASH_BITS 6
53 #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
54
55
56 /*
57  * Some oddball architectures like 64bit powerpc have function descriptors
58  * so this must be overridable.
59  */
60 #ifndef kprobe_lookup_name
61 #define kprobe_lookup_name(name, addr) \
62         addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name)))
63 #endif
64
65 static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
66 static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
67
68 /* NOTE: change this value only with kprobe_mutex held */
69 static bool kprobe_enabled;
70
71 DEFINE_MUTEX(kprobe_mutex);             /* Protects kprobe_table */
72 DEFINE_SPINLOCK(kretprobe_lock);        /* Protects kretprobe_inst_table */
73 static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
74
75 #ifdef __ARCH_WANT_KPROBES_INSN_SLOT
76 /*
77  * kprobe->ainsn.insn points to the copy of the instruction to be
78  * single-stepped. x86_64, POWER4 and above have no-exec support and
79  * stepping on the instruction on a vmalloced/kmalloced/data page
80  * is a recipe for disaster
81  */
82 #define INSNS_PER_PAGE  (PAGE_SIZE/(MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
83
84 struct kprobe_insn_page {
85         struct hlist_node hlist;
86         kprobe_opcode_t *insns;         /* Page of instruction slots */
87         char slot_used[INSNS_PER_PAGE];
88         int nused;
89         int ngarbage;
90 };
91
92 enum kprobe_slot_state {
93         SLOT_CLEAN = 0,
94         SLOT_DIRTY = 1,
95         SLOT_USED = 2,
96 };
97
98 static struct hlist_head kprobe_insn_pages;
99 static int kprobe_garbage_slots;
100 static int collect_garbage_slots(void);
101
102 static int __kprobes check_safety(void)
103 {
104         int ret = 0;
105 #if defined(CONFIG_PREEMPT) && defined(CONFIG_PM)
106         ret = freeze_processes();
107         if (ret == 0) {
108                 struct task_struct *p, *q;
109                 do_each_thread(p, q) {
110                         if (p != current && p->state == TASK_RUNNING &&
111                             p->pid != 0) {
112                                 printk("Check failed: %s is running\n",p->comm);
113                                 ret = -1;
114                                 goto loop_end;
115                         }
116                 } while_each_thread(p, q);
117         }
118 loop_end:
119         thaw_processes();
120 #else
121         synchronize_sched();
122 #endif
123         return ret;
124 }
125
126 /**
127  * get_insn_slot() - Find a slot on an executable page for an instruction.
128  * We allocate an executable page if there's no room on existing ones.
129  */
130 kprobe_opcode_t __kprobes *get_insn_slot(void)
131 {
132         struct kprobe_insn_page *kip;
133         struct hlist_node *pos;
134
135  retry:
136         hlist_for_each_entry(kip, pos, &kprobe_insn_pages, hlist) {
137                 if (kip->nused < INSNS_PER_PAGE) {
138                         int i;
139                         for (i = 0; i < INSNS_PER_PAGE; i++) {
140                                 if (kip->slot_used[i] == SLOT_CLEAN) {
141                                         kip->slot_used[i] = SLOT_USED;
142                                         kip->nused++;
143                                         return kip->insns + (i * MAX_INSN_SIZE);
144                                 }
145                         }
146                         /* Surprise!  No unused slots.  Fix kip->nused. */
147                         kip->nused = INSNS_PER_PAGE;
148                 }
149         }
150
151         /* If there are any garbage slots, collect it and try again. */
152         if (kprobe_garbage_slots && collect_garbage_slots() == 0) {
153                 goto retry;
154         }
155         /* All out of space.  Need to allocate a new page. Use slot 0. */
156         kip = kmalloc(sizeof(struct kprobe_insn_page), GFP_KERNEL);
157         if (!kip)
158                 return NULL;
159
160         /*
161          * Use module_alloc so this page is within +/- 2GB of where the
162          * kernel image and loaded module images reside. This is required
163          * so x86_64 can correctly handle the %rip-relative fixups.
164          */
165         kip->insns = module_alloc(PAGE_SIZE);
166         if (!kip->insns) {
167                 kfree(kip);
168                 return NULL;
169         }
170         INIT_HLIST_NODE(&kip->hlist);
171         hlist_add_head(&kip->hlist, &kprobe_insn_pages);
172         memset(kip->slot_used, SLOT_CLEAN, INSNS_PER_PAGE);
173         kip->slot_used[0] = SLOT_USED;
174         kip->nused = 1;
175         kip->ngarbage = 0;
176         return kip->insns;
177 }
178
179 /* Return 1 if all garbages are collected, otherwise 0. */
180 static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
181 {
182         kip->slot_used[idx] = SLOT_CLEAN;
183         kip->nused--;
184         if (kip->nused == 0) {
185                 /*
186                  * Page is no longer in use.  Free it unless
187                  * it's the last one.  We keep the last one
188                  * so as not to have to set it up again the
189                  * next time somebody inserts a probe.
190                  */
191                 hlist_del(&kip->hlist);
192                 if (hlist_empty(&kprobe_insn_pages)) {
193                         INIT_HLIST_NODE(&kip->hlist);
194                         hlist_add_head(&kip->hlist,
195                                        &kprobe_insn_pages);
196                 } else {
197                         module_free(NULL, kip->insns);
198                         kfree(kip);
199                 }
200                 return 1;
201         }
202         return 0;
203 }
204
205 static int __kprobes collect_garbage_slots(void)
206 {
207         struct kprobe_insn_page *kip;
208         struct hlist_node *pos, *next;
209
210         /* Ensure no-one is preepmted on the garbages */
211         if (check_safety() != 0)
212                 return -EAGAIN;
213
214         hlist_for_each_entry_safe(kip, pos, next, &kprobe_insn_pages, hlist) {
215                 int i;
216                 if (kip->ngarbage == 0)
217                         continue;
218                 kip->ngarbage = 0;      /* we will collect all garbages */
219                 for (i = 0; i < INSNS_PER_PAGE; i++) {
220                         if (kip->slot_used[i] == SLOT_DIRTY &&
221                             collect_one_slot(kip, i))
222                                 break;
223                 }
224         }
225         kprobe_garbage_slots = 0;
226         return 0;
227 }
228
229 void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty)
230 {
231         struct kprobe_insn_page *kip;
232         struct hlist_node *pos;
233
234         hlist_for_each_entry(kip, pos, &kprobe_insn_pages, hlist) {
235                 if (kip->insns <= slot &&
236                     slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) {
237                         int i = (slot - kip->insns) / MAX_INSN_SIZE;
238                         if (dirty) {
239                                 kip->slot_used[i] = SLOT_DIRTY;
240                                 kip->ngarbage++;
241                         } else {
242                                 collect_one_slot(kip, i);
243                         }
244                         break;
245                 }
246         }
247
248         if (dirty && ++kprobe_garbage_slots > INSNS_PER_PAGE)
249                 collect_garbage_slots();
250 }
251 #endif
252
253 /* We have preemption disabled.. so it is safe to use __ versions */
254 static inline void set_kprobe_instance(struct kprobe *kp)
255 {
256         __get_cpu_var(kprobe_instance) = kp;
257 }
258
259 static inline void reset_kprobe_instance(void)
260 {
261         __get_cpu_var(kprobe_instance) = NULL;
262 }
263
264 /*
265  * This routine is called either:
266  *      - under the kprobe_mutex - during kprobe_[un]register()
267  *                              OR
268  *      - with preemption disabled - from arch/xxx/kernel/kprobes.c
269  */
270 struct kprobe __kprobes *get_kprobe(void *addr)
271 {
272         struct hlist_head *head;
273         struct hlist_node *node;
274         struct kprobe *p;
275
276         head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
277         hlist_for_each_entry_rcu(p, node, head, hlist) {
278                 if (p->addr == addr)
279                         return p;
280         }
281         return NULL;
282 }
283
284 /*
285  * Aggregate handlers for multiple kprobes support - these handlers
286  * take care of invoking the individual kprobe handlers on p->list
287  */
288 static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
289 {
290         struct kprobe *kp;
291
292         list_for_each_entry_rcu(kp, &p->list, list) {
293                 if (kp->pre_handler) {
294                         set_kprobe_instance(kp);
295                         if (kp->pre_handler(kp, regs))
296                                 return 1;
297                 }
298                 reset_kprobe_instance();
299         }
300         return 0;
301 }
302
303 static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
304                                         unsigned long flags)
305 {
306         struct kprobe *kp;
307
308         list_for_each_entry_rcu(kp, &p->list, list) {
309                 if (kp->post_handler) {
310                         set_kprobe_instance(kp);
311                         kp->post_handler(kp, regs, flags);
312                         reset_kprobe_instance();
313                 }
314         }
315 }
316
317 static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
318                                         int trapnr)
319 {
320         struct kprobe *cur = __get_cpu_var(kprobe_instance);
321
322         /*
323          * if we faulted "during" the execution of a user specified
324          * probe handler, invoke just that probe's fault handler
325          */
326         if (cur && cur->fault_handler) {
327                 if (cur->fault_handler(cur, regs, trapnr))
328                         return 1;
329         }
330         return 0;
331 }
332
333 static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
334 {
335         struct kprobe *cur = __get_cpu_var(kprobe_instance);
336         int ret = 0;
337
338         if (cur && cur->break_handler) {
339                 if (cur->break_handler(cur, regs))
340                         ret = 1;
341         }
342         reset_kprobe_instance();
343         return ret;
344 }
345
346 /* Walks the list and increments nmissed count for multiprobe case */
347 void __kprobes kprobes_inc_nmissed_count(struct kprobe *p)
348 {
349         struct kprobe *kp;
350         if (p->pre_handler != aggr_pre_handler) {
351                 p->nmissed++;
352         } else {
353                 list_for_each_entry_rcu(kp, &p->list, list)
354                         kp->nmissed++;
355         }
356         return;
357 }
358
359 /* Called with kretprobe_lock held */
360 void __kprobes recycle_rp_inst(struct kretprobe_instance *ri,
361                                 struct hlist_head *head)
362 {
363         /* remove rp inst off the rprobe_inst_table */
364         hlist_del(&ri->hlist);
365         if (ri->rp) {
366                 /* remove rp inst off the used list */
367                 hlist_del(&ri->uflist);
368                 /* put rp inst back onto the free list */
369                 INIT_HLIST_NODE(&ri->uflist);
370                 hlist_add_head(&ri->uflist, &ri->rp->free_instances);
371         } else
372                 /* Unregistering */
373                 hlist_add_head(&ri->hlist, head);
374 }
375
376 struct hlist_head __kprobes *kretprobe_inst_table_head(struct task_struct *tsk)
377 {
378         return &kretprobe_inst_table[hash_ptr(tsk, KPROBE_HASH_BITS)];
379 }
380
381 /*
382  * This function is called from finish_task_switch when task tk becomes dead,
383  * so that we can recycle any function-return probe instances associated
384  * with this task. These left over instances represent probed functions
385  * that have been called but will never return.
386  */
387 void __kprobes kprobe_flush_task(struct task_struct *tk)
388 {
389         struct kretprobe_instance *ri;
390         struct hlist_head *head, empty_rp;
391         struct hlist_node *node, *tmp;
392         unsigned long flags = 0;
393
394         INIT_HLIST_HEAD(&empty_rp);
395         spin_lock_irqsave(&kretprobe_lock, flags);
396         head = kretprobe_inst_table_head(tk);
397         hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
398                 if (ri->task == tk)
399                         recycle_rp_inst(ri, &empty_rp);
400         }
401         spin_unlock_irqrestore(&kretprobe_lock, flags);
402
403         hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
404                 hlist_del(&ri->hlist);
405                 kfree(ri);
406         }
407 }
408
409 static inline void free_rp_inst(struct kretprobe *rp)
410 {
411         struct kretprobe_instance *ri;
412         struct hlist_node *pos, *next;
413
414         hlist_for_each_entry_safe(ri, pos, next, &rp->free_instances, uflist) {
415                 hlist_del(&ri->uflist);
416                 kfree(ri);
417         }
418 }
419
420 /*
421  * Keep all fields in the kprobe consistent
422  */
423 static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p)
424 {
425         memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t));
426         memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn));
427 }
428
429 /*
430 * Add the new probe to old_p->list. Fail if this is the
431 * second jprobe at the address - two jprobes can't coexist
432 */
433 static int __kprobes add_new_kprobe(struct kprobe *old_p, struct kprobe *p)
434 {
435         if (p->break_handler) {
436                 if (old_p->break_handler)
437                         return -EEXIST;
438                 list_add_tail_rcu(&p->list, &old_p->list);
439                 old_p->break_handler = aggr_break_handler;
440         } else
441                 list_add_rcu(&p->list, &old_p->list);
442         if (p->post_handler && !old_p->post_handler)
443                 old_p->post_handler = aggr_post_handler;
444         return 0;
445 }
446
447 /*
448  * Fill in the required fields of the "manager kprobe". Replace the
449  * earlier kprobe in the hlist with the manager kprobe
450  */
451 static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
452 {
453         copy_kprobe(p, ap);
454         flush_insn_slot(ap);
455         ap->addr = p->addr;
456         ap->pre_handler = aggr_pre_handler;
457         ap->fault_handler = aggr_fault_handler;
458         if (p->post_handler)
459                 ap->post_handler = aggr_post_handler;
460         if (p->break_handler)
461                 ap->break_handler = aggr_break_handler;
462
463         INIT_LIST_HEAD(&ap->list);
464         list_add_rcu(&p->list, &ap->list);
465
466         hlist_replace_rcu(&p->hlist, &ap->hlist);
467 }
468
469 /*
470  * This is the second or subsequent kprobe at the address - handle
471  * the intricacies
472  */
473 static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
474                                           struct kprobe *p)
475 {
476         int ret = 0;
477         struct kprobe *ap;
478
479         if (old_p->pre_handler == aggr_pre_handler) {
480                 copy_kprobe(old_p, p);
481                 ret = add_new_kprobe(old_p, p);
482         } else {
483                 ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL);
484                 if (!ap)
485                         return -ENOMEM;
486                 add_aggr_kprobe(ap, old_p);
487                 copy_kprobe(ap, p);
488                 ret = add_new_kprobe(ap, p);
489         }
490         return ret;
491 }
492
493 static int __kprobes in_kprobes_functions(unsigned long addr)
494 {
495         if (addr >= (unsigned long)__kprobes_text_start &&
496             addr < (unsigned long)__kprobes_text_end)
497                 return -EINVAL;
498         return 0;
499 }
500
501 static int __kprobes __register_kprobe(struct kprobe *p,
502         unsigned long called_from)
503 {
504         int ret = 0;
505         struct kprobe *old_p;
506         struct module *probed_mod;
507
508         /*
509          * If we have a symbol_name argument look it up,
510          * and add it to the address.  That way the addr
511          * field can either be global or relative to a symbol.
512          */
513         if (p->symbol_name) {
514                 if (p->addr)
515                         return -EINVAL;
516                 kprobe_lookup_name(p->symbol_name, p->addr);
517         }
518
519         if (!p->addr)
520                 return -EINVAL;
521         p->addr = (kprobe_opcode_t *)(((char *)p->addr)+ p->offset);
522
523         if (!kernel_text_address((unsigned long) p->addr) ||
524             in_kprobes_functions((unsigned long) p->addr))
525                 return -EINVAL;
526
527         p->mod_refcounted = 0;
528
529         /*
530          * Check if are we probing a module.
531          */
532         probed_mod = module_text_address((unsigned long) p->addr);
533         if (probed_mod) {
534                 struct module *calling_mod = module_text_address(called_from);
535                 /*
536                  * We must allow modules to probe themself and in this case
537                  * avoid incrementing the module refcount, so as to allow
538                  * unloading of self probing modules.
539                  */
540                 if (calling_mod && calling_mod != probed_mod) {
541                         if (unlikely(!try_module_get(probed_mod)))
542                                 return -EINVAL;
543                         p->mod_refcounted = 1;
544                 } else
545                         probed_mod = NULL;
546         }
547
548         p->nmissed = 0;
549         mutex_lock(&kprobe_mutex);
550         old_p = get_kprobe(p->addr);
551         if (old_p) {
552                 ret = register_aggr_kprobe(old_p, p);
553                 goto out;
554         }
555
556         ret = arch_prepare_kprobe(p);
557         if (ret)
558                 goto out;
559
560         INIT_HLIST_NODE(&p->hlist);
561         hlist_add_head_rcu(&p->hlist,
562                        &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
563
564         if (kprobe_enabled)
565                 arch_arm_kprobe(p);
566
567 out:
568         mutex_unlock(&kprobe_mutex);
569
570         if (ret && probed_mod)
571                 module_put(probed_mod);
572         return ret;
573 }
574
575 int __kprobes register_kprobe(struct kprobe *p)
576 {
577         return __register_kprobe(p, (unsigned long)__builtin_return_address(0));
578 }
579
580 void __kprobes unregister_kprobe(struct kprobe *p)
581 {
582         struct module *mod;
583         struct kprobe *old_p, *list_p;
584         int cleanup_p;
585
586         mutex_lock(&kprobe_mutex);
587         old_p = get_kprobe(p->addr);
588         if (unlikely(!old_p)) {
589                 mutex_unlock(&kprobe_mutex);
590                 return;
591         }
592         if (p != old_p) {
593                 list_for_each_entry_rcu(list_p, &old_p->list, list)
594                         if (list_p == p)
595                         /* kprobe p is a valid probe */
596                                 goto valid_p;
597                 mutex_unlock(&kprobe_mutex);
598                 return;
599         }
600 valid_p:
601         if (old_p == p ||
602             (old_p->pre_handler == aggr_pre_handler &&
603              p->list.next == &old_p->list && p->list.prev == &old_p->list)) {
604                 /*
605                  * Only probe on the hash list. Disarm only if kprobes are
606                  * enabled - otherwise, the breakpoint would already have
607                  * been removed. We save on flushing icache.
608                  */
609                 if (kprobe_enabled)
610                         arch_disarm_kprobe(p);
611                 hlist_del_rcu(&old_p->hlist);
612                 cleanup_p = 1;
613         } else {
614                 list_del_rcu(&p->list);
615                 cleanup_p = 0;
616         }
617
618         mutex_unlock(&kprobe_mutex);
619
620         synchronize_sched();
621         if (p->mod_refcounted) {
622                 mod = module_text_address((unsigned long)p->addr);
623                 if (mod)
624                         module_put(mod);
625         }
626
627         if (cleanup_p) {
628                 if (p != old_p) {
629                         list_del_rcu(&p->list);
630                         kfree(old_p);
631                 }
632                 arch_remove_kprobe(p);
633         } else {
634                 mutex_lock(&kprobe_mutex);
635                 if (p->break_handler)
636                         old_p->break_handler = NULL;
637                 if (p->post_handler){
638                         list_for_each_entry_rcu(list_p, &old_p->list, list){
639                                 if (list_p->post_handler){
640                                         cleanup_p = 2;
641                                         break;
642                                 }
643                         }
644                         if (cleanup_p == 0)
645                                 old_p->post_handler = NULL;
646                 }
647                 mutex_unlock(&kprobe_mutex);
648         }
649 }
650
651 static struct notifier_block kprobe_exceptions_nb = {
652         .notifier_call = kprobe_exceptions_notify,
653         .priority = 0x7fffffff /* we need to be notified first */
654 };
655
656 unsigned long __weak arch_deref_entry_point(void *entry)
657 {
658         return (unsigned long)entry;
659 }
660
661 int __kprobes register_jprobe(struct jprobe *jp)
662 {
663         unsigned long addr = arch_deref_entry_point(jp->entry);
664
665         if (!kernel_text_address(addr))
666                 return -EINVAL;
667
668         /* Todo: Verify probepoint is a function entry point */
669         jp->kp.pre_handler = setjmp_pre_handler;
670         jp->kp.break_handler = longjmp_break_handler;
671
672         return __register_kprobe(&jp->kp,
673                 (unsigned long)__builtin_return_address(0));
674 }
675
676 void __kprobes unregister_jprobe(struct jprobe *jp)
677 {
678         unregister_kprobe(&jp->kp);
679 }
680
681 #ifdef ARCH_SUPPORTS_KRETPROBES
682
683 /*
684  * This kprobe pre_handler is registered with every kretprobe. When probe
685  * hits it will set up the return probe.
686  */
687 static int __kprobes pre_handler_kretprobe(struct kprobe *p,
688                                            struct pt_regs *regs)
689 {
690         struct kretprobe *rp = container_of(p, struct kretprobe, kp);
691         unsigned long flags = 0;
692
693         /*TODO: consider to only swap the RA after the last pre_handler fired */
694         spin_lock_irqsave(&kretprobe_lock, flags);
695         if (!hlist_empty(&rp->free_instances)) {
696                 struct kretprobe_instance *ri;
697
698                 ri = hlist_entry(rp->free_instances.first,
699                                  struct kretprobe_instance, uflist);
700                 ri->rp = rp;
701                 ri->task = current;
702                 arch_prepare_kretprobe(ri, regs);
703
704                 /* XXX(hch): why is there no hlist_move_head? */
705                 hlist_del(&ri->uflist);
706                 hlist_add_head(&ri->uflist, &ri->rp->used_instances);
707                 hlist_add_head(&ri->hlist, kretprobe_inst_table_head(ri->task));
708         } else
709                 rp->nmissed++;
710         spin_unlock_irqrestore(&kretprobe_lock, flags);
711         return 0;
712 }
713
714 int __kprobes register_kretprobe(struct kretprobe *rp)
715 {
716         int ret = 0;
717         struct kretprobe_instance *inst;
718         int i;
719         void *addr = rp->kp.addr;
720
721         if (kretprobe_blacklist_size) {
722                 if (addr == NULL)
723                         kprobe_lookup_name(rp->kp.symbol_name, addr);
724                 addr += rp->kp.offset;
725
726                 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
727                         if (kretprobe_blacklist[i].addr == addr)
728                                 return -EINVAL;
729                 }
730         }
731
732         rp->kp.pre_handler = pre_handler_kretprobe;
733         rp->kp.post_handler = NULL;
734         rp->kp.fault_handler = NULL;
735         rp->kp.break_handler = NULL;
736
737         /* Pre-allocate memory for max kretprobe instances */
738         if (rp->maxactive <= 0) {
739 #ifdef CONFIG_PREEMPT
740                 rp->maxactive = max(10, 2 * NR_CPUS);
741 #else
742                 rp->maxactive = NR_CPUS;
743 #endif
744         }
745         INIT_HLIST_HEAD(&rp->used_instances);
746         INIT_HLIST_HEAD(&rp->free_instances);
747         for (i = 0; i < rp->maxactive; i++) {
748                 inst = kmalloc(sizeof(struct kretprobe_instance), GFP_KERNEL);
749                 if (inst == NULL) {
750                         free_rp_inst(rp);
751                         return -ENOMEM;
752                 }
753                 INIT_HLIST_NODE(&inst->uflist);
754                 hlist_add_head(&inst->uflist, &rp->free_instances);
755         }
756
757         rp->nmissed = 0;
758         /* Establish function entry probe point */
759         if ((ret = __register_kprobe(&rp->kp,
760                 (unsigned long)__builtin_return_address(0))) != 0)
761                 free_rp_inst(rp);
762         return ret;
763 }
764
765 #else /* ARCH_SUPPORTS_KRETPROBES */
766
767 int __kprobes register_kretprobe(struct kretprobe *rp)
768 {
769         return -ENOSYS;
770 }
771
772 static int __kprobes pre_handler_kretprobe(struct kprobe *p,
773                                            struct pt_regs *regs)
774 {
775         return 0;
776 }
777
778 #endif /* ARCH_SUPPORTS_KRETPROBES */
779
780 void __kprobes unregister_kretprobe(struct kretprobe *rp)
781 {
782         unsigned long flags;
783         struct kretprobe_instance *ri;
784         struct hlist_node *pos, *next;
785
786         unregister_kprobe(&rp->kp);
787
788         /* No race here */
789         spin_lock_irqsave(&kretprobe_lock, flags);
790         hlist_for_each_entry_safe(ri, pos, next, &rp->used_instances, uflist) {
791                 ri->rp = NULL;
792                 hlist_del(&ri->uflist);
793         }
794         spin_unlock_irqrestore(&kretprobe_lock, flags);
795         free_rp_inst(rp);
796 }
797
798 static int __init init_kprobes(void)
799 {
800         int i, err = 0;
801
802         /* FIXME allocate the probe table, currently defined statically */
803         /* initialize all list heads */
804         for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
805                 INIT_HLIST_HEAD(&kprobe_table[i]);
806                 INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
807         }
808
809         if (kretprobe_blacklist_size) {
810                 /* lookup the function address from its name */
811                 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
812                         kprobe_lookup_name(kretprobe_blacklist[i].name,
813                                            kretprobe_blacklist[i].addr);
814                         if (!kretprobe_blacklist[i].addr)
815                                 printk("kretprobe: lookup failed: %s\n",
816                                        kretprobe_blacklist[i].name);
817                 }
818         }
819
820         /* By default, kprobes are enabled */
821         kprobe_enabled = true;
822
823         err = arch_init_kprobes();
824         if (!err)
825                 err = register_die_notifier(&kprobe_exceptions_nb);
826
827         if (!err)
828                 init_test_probes();
829         return err;
830 }
831
832 #ifdef CONFIG_DEBUG_FS
833 static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
834                 const char *sym, int offset,char *modname)
835 {
836         char *kprobe_type;
837
838         if (p->pre_handler == pre_handler_kretprobe)
839                 kprobe_type = "r";
840         else if (p->pre_handler == setjmp_pre_handler)
841                 kprobe_type = "j";
842         else
843                 kprobe_type = "k";
844         if (sym)
845                 seq_printf(pi, "%p  %s  %s+0x%x  %s\n", p->addr, kprobe_type,
846                         sym, offset, (modname ? modname : " "));
847         else
848                 seq_printf(pi, "%p  %s  %p\n", p->addr, kprobe_type, p->addr);
849 }
850
851 static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos)
852 {
853         return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
854 }
855
856 static void __kprobes *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos)
857 {
858         (*pos)++;
859         if (*pos >= KPROBE_TABLE_SIZE)
860                 return NULL;
861         return pos;
862 }
863
864 static void __kprobes kprobe_seq_stop(struct seq_file *f, void *v)
865 {
866         /* Nothing to do */
867 }
868
869 static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
870 {
871         struct hlist_head *head;
872         struct hlist_node *node;
873         struct kprobe *p, *kp;
874         const char *sym = NULL;
875         unsigned int i = *(loff_t *) v;
876         unsigned long offset = 0;
877         char *modname, namebuf[128];
878
879         head = &kprobe_table[i];
880         preempt_disable();
881         hlist_for_each_entry_rcu(p, node, head, hlist) {
882                 sym = kallsyms_lookup((unsigned long)p->addr, NULL,
883                                         &offset, &modname, namebuf);
884                 if (p->pre_handler == aggr_pre_handler) {
885                         list_for_each_entry_rcu(kp, &p->list, list)
886                                 report_probe(pi, kp, sym, offset, modname);
887                 } else
888                         report_probe(pi, p, sym, offset, modname);
889         }
890         preempt_enable();
891         return 0;
892 }
893
894 static struct seq_operations kprobes_seq_ops = {
895         .start = kprobe_seq_start,
896         .next  = kprobe_seq_next,
897         .stop  = kprobe_seq_stop,
898         .show  = show_kprobe_addr
899 };
900
901 static int __kprobes kprobes_open(struct inode *inode, struct file *filp)
902 {
903         return seq_open(filp, &kprobes_seq_ops);
904 }
905
906 static struct file_operations debugfs_kprobes_operations = {
907         .open           = kprobes_open,
908         .read           = seq_read,
909         .llseek         = seq_lseek,
910         .release        = seq_release,
911 };
912
913 static void __kprobes enable_all_kprobes(void)
914 {
915         struct hlist_head *head;
916         struct hlist_node *node;
917         struct kprobe *p;
918         unsigned int i;
919
920         mutex_lock(&kprobe_mutex);
921
922         /* If kprobes are already enabled, just return */
923         if (kprobe_enabled)
924                 goto already_enabled;
925
926         for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
927                 head = &kprobe_table[i];
928                 hlist_for_each_entry_rcu(p, node, head, hlist)
929                         arch_arm_kprobe(p);
930         }
931
932         kprobe_enabled = true;
933         printk(KERN_INFO "Kprobes globally enabled\n");
934
935 already_enabled:
936         mutex_unlock(&kprobe_mutex);
937         return;
938 }
939
940 static void __kprobes disable_all_kprobes(void)
941 {
942         struct hlist_head *head;
943         struct hlist_node *node;
944         struct kprobe *p;
945         unsigned int i;
946
947         mutex_lock(&kprobe_mutex);
948
949         /* If kprobes are already disabled, just return */
950         if (!kprobe_enabled)
951                 goto already_disabled;
952
953         kprobe_enabled = false;
954         printk(KERN_INFO "Kprobes globally disabled\n");
955         for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
956                 head = &kprobe_table[i];
957                 hlist_for_each_entry_rcu(p, node, head, hlist) {
958                         if (!arch_trampoline_kprobe(p))
959                                 arch_disarm_kprobe(p);
960                 }
961         }
962
963         mutex_unlock(&kprobe_mutex);
964         /* Allow all currently running kprobes to complete */
965         synchronize_sched();
966         return;
967
968 already_disabled:
969         mutex_unlock(&kprobe_mutex);
970         return;
971 }
972
973 /*
974  * XXX: The debugfs bool file interface doesn't allow for callbacks
975  * when the bool state is switched. We can reuse that facility when
976  * available
977  */
978 static ssize_t read_enabled_file_bool(struct file *file,
979                char __user *user_buf, size_t count, loff_t *ppos)
980 {
981         char buf[3];
982
983         if (kprobe_enabled)
984                 buf[0] = '1';
985         else
986                 buf[0] = '0';
987         buf[1] = '\n';
988         buf[2] = 0x00;
989         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
990 }
991
992 static ssize_t write_enabled_file_bool(struct file *file,
993                const char __user *user_buf, size_t count, loff_t *ppos)
994 {
995         char buf[32];
996         int buf_size;
997
998         buf_size = min(count, (sizeof(buf)-1));
999         if (copy_from_user(buf, user_buf, buf_size))
1000                 return -EFAULT;
1001
1002         switch (buf[0]) {
1003         case 'y':
1004         case 'Y':
1005         case '1':
1006                 enable_all_kprobes();
1007                 break;
1008         case 'n':
1009         case 'N':
1010         case '0':
1011                 disable_all_kprobes();
1012                 break;
1013         }
1014
1015         return count;
1016 }
1017
1018 static struct file_operations fops_kp = {
1019         .read =         read_enabled_file_bool,
1020         .write =        write_enabled_file_bool,
1021 };
1022
1023 static int __kprobes debugfs_kprobe_init(void)
1024 {
1025         struct dentry *dir, *file;
1026         unsigned int value = 1;
1027
1028         dir = debugfs_create_dir("kprobes", NULL);
1029         if (!dir)
1030                 return -ENOMEM;
1031
1032         file = debugfs_create_file("list", 0444, dir, NULL,
1033                                 &debugfs_kprobes_operations);
1034         if (!file) {
1035                 debugfs_remove(dir);
1036                 return -ENOMEM;
1037         }
1038
1039         file = debugfs_create_file("enabled", 0600, dir,
1040                                         &value, &fops_kp);
1041         if (!file) {
1042                 debugfs_remove(dir);
1043                 return -ENOMEM;
1044         }
1045
1046         return 0;
1047 }
1048
1049 late_initcall(debugfs_kprobe_init);
1050 #endif /* CONFIG_DEBUG_FS */
1051
1052 module_init(init_kprobes);
1053
1054 EXPORT_SYMBOL_GPL(register_kprobe);
1055 EXPORT_SYMBOL_GPL(unregister_kprobe);
1056 EXPORT_SYMBOL_GPL(register_jprobe);
1057 EXPORT_SYMBOL_GPL(unregister_jprobe);
1058 #ifdef CONFIG_KPROBES
1059 EXPORT_SYMBOL_GPL(jprobe_return);
1060 #endif
1061
1062 #ifdef CONFIG_KPROBES
1063 EXPORT_SYMBOL_GPL(register_kretprobe);
1064 EXPORT_SYMBOL_GPL(unregister_kretprobe);
1065 #endif