Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
[linux-2.6] / kernel / kprobes.c
1 /*
2  *  Kernel Probes (KProbes)
3  *  kernel/kprobes.c
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18  *
19  * Copyright (C) IBM Corporation, 2002, 2004
20  *
21  * 2002-Oct     Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
22  *              Probes initial implementation (includes suggestions from
23  *              Rusty Russell).
24  * 2004-Aug     Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
25  *              hlists and exceptions notifier as suggested by Andi Kleen.
26  * 2004-July    Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
27  *              interface to access function arguments.
28  * 2004-Sep     Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
29  *              exceptions notifier to be first on the priority list.
30  * 2005-May     Hien Nguyen <hien@us.ibm.com>, Jim Keniston
31  *              <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
32  *              <prasanna@in.ibm.com> added function-return probes.
33  */
34 #include <linux/kprobes.h>
35 #include <linux/hash.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/stddef.h>
39 #include <linux/module.h>
40 #include <linux/moduleloader.h>
41 #include <linux/kallsyms.h>
42 #include <linux/freezer.h>
43 #include <linux/seq_file.h>
44 #include <linux/debugfs.h>
45 #include <linux/kdebug.h>
46
47 #include <asm-generic/sections.h>
48 #include <asm/cacheflush.h>
49 #include <asm/errno.h>
50 #include <asm/uaccess.h>
51
52 #define KPROBE_HASH_BITS 6
53 #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
54
55
56 /*
57  * Some oddball architectures like 64bit powerpc have function descriptors
58  * so this must be overridable.
59  */
60 #ifndef kprobe_lookup_name
61 #define kprobe_lookup_name(name, addr) \
62         addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name)))
63 #endif
64
65 static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
66 static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
67
68 /* NOTE: change this value only with kprobe_mutex held */
69 static bool kprobe_enabled;
70
71 DEFINE_MUTEX(kprobe_mutex);             /* Protects kprobe_table */
72 DEFINE_SPINLOCK(kretprobe_lock);        /* Protects kretprobe_inst_table */
73 static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
74
75 #ifdef __ARCH_WANT_KPROBES_INSN_SLOT
76 /*
77  * kprobe->ainsn.insn points to the copy of the instruction to be
78  * single-stepped. x86_64, POWER4 and above have no-exec support and
79  * stepping on the instruction on a vmalloced/kmalloced/data page
80  * is a recipe for disaster
81  */
82 #define INSNS_PER_PAGE  (PAGE_SIZE/(MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
83
84 struct kprobe_insn_page {
85         struct hlist_node hlist;
86         kprobe_opcode_t *insns;         /* Page of instruction slots */
87         char slot_used[INSNS_PER_PAGE];
88         int nused;
89         int ngarbage;
90 };
91
92 enum kprobe_slot_state {
93         SLOT_CLEAN = 0,
94         SLOT_DIRTY = 1,
95         SLOT_USED = 2,
96 };
97
98 static struct hlist_head kprobe_insn_pages;
99 static int kprobe_garbage_slots;
100 static int collect_garbage_slots(void);
101
102 static int __kprobes check_safety(void)
103 {
104         int ret = 0;
105 #if defined(CONFIG_PREEMPT) && defined(CONFIG_PM)
106         ret = freeze_processes();
107         if (ret == 0) {
108                 struct task_struct *p, *q;
109                 do_each_thread(p, q) {
110                         if (p != current && p->state == TASK_RUNNING &&
111                             p->pid != 0) {
112                                 printk("Check failed: %s is running\n",p->comm);
113                                 ret = -1;
114                                 goto loop_end;
115                         }
116                 } while_each_thread(p, q);
117         }
118 loop_end:
119         thaw_processes();
120 #else
121         synchronize_sched();
122 #endif
123         return ret;
124 }
125
126 /**
127  * get_insn_slot() - Find a slot on an executable page for an instruction.
128  * We allocate an executable page if there's no room on existing ones.
129  */
130 kprobe_opcode_t __kprobes *get_insn_slot(void)
131 {
132         struct kprobe_insn_page *kip;
133         struct hlist_node *pos;
134
135  retry:
136         hlist_for_each_entry(kip, pos, &kprobe_insn_pages, hlist) {
137                 if (kip->nused < INSNS_PER_PAGE) {
138                         int i;
139                         for (i = 0; i < INSNS_PER_PAGE; i++) {
140                                 if (kip->slot_used[i] == SLOT_CLEAN) {
141                                         kip->slot_used[i] = SLOT_USED;
142                                         kip->nused++;
143                                         return kip->insns + (i * MAX_INSN_SIZE);
144                                 }
145                         }
146                         /* Surprise!  No unused slots.  Fix kip->nused. */
147                         kip->nused = INSNS_PER_PAGE;
148                 }
149         }
150
151         /* If there are any garbage slots, collect it and try again. */
152         if (kprobe_garbage_slots && collect_garbage_slots() == 0) {
153                 goto retry;
154         }
155         /* All out of space.  Need to allocate a new page. Use slot 0. */
156         kip = kmalloc(sizeof(struct kprobe_insn_page), GFP_KERNEL);
157         if (!kip)
158                 return NULL;
159
160         /*
161          * Use module_alloc so this page is within +/- 2GB of where the
162          * kernel image and loaded module images reside. This is required
163          * so x86_64 can correctly handle the %rip-relative fixups.
164          */
165         kip->insns = module_alloc(PAGE_SIZE);
166         if (!kip->insns) {
167                 kfree(kip);
168                 return NULL;
169         }
170         INIT_HLIST_NODE(&kip->hlist);
171         hlist_add_head(&kip->hlist, &kprobe_insn_pages);
172         memset(kip->slot_used, SLOT_CLEAN, INSNS_PER_PAGE);
173         kip->slot_used[0] = SLOT_USED;
174         kip->nused = 1;
175         kip->ngarbage = 0;
176         return kip->insns;
177 }
178
179 /* Return 1 if all garbages are collected, otherwise 0. */
180 static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
181 {
182         kip->slot_used[idx] = SLOT_CLEAN;
183         kip->nused--;
184         if (kip->nused == 0) {
185                 /*
186                  * Page is no longer in use.  Free it unless
187                  * it's the last one.  We keep the last one
188                  * so as not to have to set it up again the
189                  * next time somebody inserts a probe.
190                  */
191                 hlist_del(&kip->hlist);
192                 if (hlist_empty(&kprobe_insn_pages)) {
193                         INIT_HLIST_NODE(&kip->hlist);
194                         hlist_add_head(&kip->hlist,
195                                        &kprobe_insn_pages);
196                 } else {
197                         module_free(NULL, kip->insns);
198                         kfree(kip);
199                 }
200                 return 1;
201         }
202         return 0;
203 }
204
205 static int __kprobes collect_garbage_slots(void)
206 {
207         struct kprobe_insn_page *kip;
208         struct hlist_node *pos, *next;
209
210         /* Ensure no-one is preepmted on the garbages */
211         if (check_safety() != 0)
212                 return -EAGAIN;
213
214         hlist_for_each_entry_safe(kip, pos, next, &kprobe_insn_pages, hlist) {
215                 int i;
216                 if (kip->ngarbage == 0)
217                         continue;
218                 kip->ngarbage = 0;      /* we will collect all garbages */
219                 for (i = 0; i < INSNS_PER_PAGE; i++) {
220                         if (kip->slot_used[i] == SLOT_DIRTY &&
221                             collect_one_slot(kip, i))
222                                 break;
223                 }
224         }
225         kprobe_garbage_slots = 0;
226         return 0;
227 }
228
229 void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty)
230 {
231         struct kprobe_insn_page *kip;
232         struct hlist_node *pos;
233
234         hlist_for_each_entry(kip, pos, &kprobe_insn_pages, hlist) {
235                 if (kip->insns <= slot &&
236                     slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) {
237                         int i = (slot - kip->insns) / MAX_INSN_SIZE;
238                         if (dirty) {
239                                 kip->slot_used[i] = SLOT_DIRTY;
240                                 kip->ngarbage++;
241                         } else {
242                                 collect_one_slot(kip, i);
243                         }
244                         break;
245                 }
246         }
247
248         if (dirty && ++kprobe_garbage_slots > INSNS_PER_PAGE)
249                 collect_garbage_slots();
250 }
251 #endif
252
253 /* We have preemption disabled.. so it is safe to use __ versions */
254 static inline void set_kprobe_instance(struct kprobe *kp)
255 {
256         __get_cpu_var(kprobe_instance) = kp;
257 }
258
259 static inline void reset_kprobe_instance(void)
260 {
261         __get_cpu_var(kprobe_instance) = NULL;
262 }
263
264 /*
265  * This routine is called either:
266  *      - under the kprobe_mutex - during kprobe_[un]register()
267  *                              OR
268  *      - with preemption disabled - from arch/xxx/kernel/kprobes.c
269  */
270 struct kprobe __kprobes *get_kprobe(void *addr)
271 {
272         struct hlist_head *head;
273         struct hlist_node *node;
274         struct kprobe *p;
275
276         head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
277         hlist_for_each_entry_rcu(p, node, head, hlist) {
278                 if (p->addr == addr)
279                         return p;
280         }
281         return NULL;
282 }
283
284 /*
285  * Aggregate handlers for multiple kprobes support - these handlers
286  * take care of invoking the individual kprobe handlers on p->list
287  */
288 static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
289 {
290         struct kprobe *kp;
291
292         list_for_each_entry_rcu(kp, &p->list, list) {
293                 if (kp->pre_handler) {
294                         set_kprobe_instance(kp);
295                         if (kp->pre_handler(kp, regs))
296                                 return 1;
297                 }
298                 reset_kprobe_instance();
299         }
300         return 0;
301 }
302
303 static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
304                                         unsigned long flags)
305 {
306         struct kprobe *kp;
307
308         list_for_each_entry_rcu(kp, &p->list, list) {
309                 if (kp->post_handler) {
310                         set_kprobe_instance(kp);
311                         kp->post_handler(kp, regs, flags);
312                         reset_kprobe_instance();
313                 }
314         }
315 }
316
317 static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
318                                         int trapnr)
319 {
320         struct kprobe *cur = __get_cpu_var(kprobe_instance);
321
322         /*
323          * if we faulted "during" the execution of a user specified
324          * probe handler, invoke just that probe's fault handler
325          */
326         if (cur && cur->fault_handler) {
327                 if (cur->fault_handler(cur, regs, trapnr))
328                         return 1;
329         }
330         return 0;
331 }
332
333 static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
334 {
335         struct kprobe *cur = __get_cpu_var(kprobe_instance);
336         int ret = 0;
337
338         if (cur && cur->break_handler) {
339                 if (cur->break_handler(cur, regs))
340                         ret = 1;
341         }
342         reset_kprobe_instance();
343         return ret;
344 }
345
346 /* Walks the list and increments nmissed count for multiprobe case */
347 void __kprobes kprobes_inc_nmissed_count(struct kprobe *p)
348 {
349         struct kprobe *kp;
350         if (p->pre_handler != aggr_pre_handler) {
351                 p->nmissed++;
352         } else {
353                 list_for_each_entry_rcu(kp, &p->list, list)
354                         kp->nmissed++;
355         }
356         return;
357 }
358
359 /* Called with kretprobe_lock held */
360 void __kprobes recycle_rp_inst(struct kretprobe_instance *ri,
361                                 struct hlist_head *head)
362 {
363         /* remove rp inst off the rprobe_inst_table */
364         hlist_del(&ri->hlist);
365         if (ri->rp) {
366                 /* remove rp inst off the used list */
367                 hlist_del(&ri->uflist);
368                 /* put rp inst back onto the free list */
369                 INIT_HLIST_NODE(&ri->uflist);
370                 hlist_add_head(&ri->uflist, &ri->rp->free_instances);
371         } else
372                 /* Unregistering */
373                 hlist_add_head(&ri->hlist, head);
374 }
375
376 struct hlist_head __kprobes *kretprobe_inst_table_head(struct task_struct *tsk)
377 {
378         return &kretprobe_inst_table[hash_ptr(tsk, KPROBE_HASH_BITS)];
379 }
380
381 /*
382  * This function is called from finish_task_switch when task tk becomes dead,
383  * so that we can recycle any function-return probe instances associated
384  * with this task. These left over instances represent probed functions
385  * that have been called but will never return.
386  */
387 void __kprobes kprobe_flush_task(struct task_struct *tk)
388 {
389         struct kretprobe_instance *ri;
390         struct hlist_head *head, empty_rp;
391         struct hlist_node *node, *tmp;
392         unsigned long flags = 0;
393
394         INIT_HLIST_HEAD(&empty_rp);
395         spin_lock_irqsave(&kretprobe_lock, flags);
396         head = kretprobe_inst_table_head(tk);
397         hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
398                 if (ri->task == tk)
399                         recycle_rp_inst(ri, &empty_rp);
400         }
401         spin_unlock_irqrestore(&kretprobe_lock, flags);
402
403         hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
404                 hlist_del(&ri->hlist);
405                 kfree(ri);
406         }
407 }
408
409 static inline void free_rp_inst(struct kretprobe *rp)
410 {
411         struct kretprobe_instance *ri;
412         struct hlist_node *pos, *next;
413
414         hlist_for_each_entry_safe(ri, pos, next, &rp->free_instances, uflist) {
415                 hlist_del(&ri->uflist);
416                 kfree(ri);
417         }
418 }
419
420 /*
421  * Keep all fields in the kprobe consistent
422  */
423 static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p)
424 {
425         memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t));
426         memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn));
427 }
428
429 /*
430 * Add the new probe to old_p->list. Fail if this is the
431 * second jprobe at the address - two jprobes can't coexist
432 */
433 static int __kprobes add_new_kprobe(struct kprobe *old_p, struct kprobe *p)
434 {
435         if (p->break_handler) {
436                 if (old_p->break_handler)
437                         return -EEXIST;
438                 list_add_tail_rcu(&p->list, &old_p->list);
439                 old_p->break_handler = aggr_break_handler;
440         } else
441                 list_add_rcu(&p->list, &old_p->list);
442         if (p->post_handler && !old_p->post_handler)
443                 old_p->post_handler = aggr_post_handler;
444         return 0;
445 }
446
447 /*
448  * Fill in the required fields of the "manager kprobe". Replace the
449  * earlier kprobe in the hlist with the manager kprobe
450  */
451 static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
452 {
453         copy_kprobe(p, ap);
454         flush_insn_slot(ap);
455         ap->addr = p->addr;
456         ap->pre_handler = aggr_pre_handler;
457         ap->fault_handler = aggr_fault_handler;
458         if (p->post_handler)
459                 ap->post_handler = aggr_post_handler;
460         if (p->break_handler)
461                 ap->break_handler = aggr_break_handler;
462
463         INIT_LIST_HEAD(&ap->list);
464         list_add_rcu(&p->list, &ap->list);
465
466         hlist_replace_rcu(&p->hlist, &ap->hlist);
467 }
468
469 /*
470  * This is the second or subsequent kprobe at the address - handle
471  * the intricacies
472  */
473 static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
474                                           struct kprobe *p)
475 {
476         int ret = 0;
477         struct kprobe *ap;
478
479         if (old_p->pre_handler == aggr_pre_handler) {
480                 copy_kprobe(old_p, p);
481                 ret = add_new_kprobe(old_p, p);
482         } else {
483                 ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL);
484                 if (!ap)
485                         return -ENOMEM;
486                 add_aggr_kprobe(ap, old_p);
487                 copy_kprobe(ap, p);
488                 ret = add_new_kprobe(ap, p);
489         }
490         return ret;
491 }
492
493 static int __kprobes in_kprobes_functions(unsigned long addr)
494 {
495         if (addr >= (unsigned long)__kprobes_text_start &&
496             addr < (unsigned long)__kprobes_text_end)
497                 return -EINVAL;
498         return 0;
499 }
500
501 /*
502  * If we have a symbol_name argument, look it up and add the offset field
503  * to it. This way, we can specify a relative address to a symbol.
504  */
505 static kprobe_opcode_t __kprobes *kprobe_addr(struct kprobe *p)
506 {
507         kprobe_opcode_t *addr = p->addr;
508         if (p->symbol_name) {
509                 if (addr)
510                         return NULL;
511                 kprobe_lookup_name(p->symbol_name, addr);
512         }
513
514         if (!addr)
515                 return NULL;
516         return (kprobe_opcode_t *)(((char *)addr) + p->offset);
517 }
518
519 static int __kprobes __register_kprobe(struct kprobe *p,
520         unsigned long called_from)
521 {
522         int ret = 0;
523         struct kprobe *old_p;
524         struct module *probed_mod;
525         kprobe_opcode_t *addr;
526
527         addr = kprobe_addr(p);
528         if (!addr)
529                 return -EINVAL;
530         p->addr = addr;
531
532         if (!kernel_text_address((unsigned long) p->addr) ||
533             in_kprobes_functions((unsigned long) p->addr))
534                 return -EINVAL;
535
536         p->mod_refcounted = 0;
537
538         /*
539          * Check if are we probing a module.
540          */
541         probed_mod = module_text_address((unsigned long) p->addr);
542         if (probed_mod) {
543                 struct module *calling_mod = module_text_address(called_from);
544                 /*
545                  * We must allow modules to probe themself and in this case
546                  * avoid incrementing the module refcount, so as to allow
547                  * unloading of self probing modules.
548                  */
549                 if (calling_mod && calling_mod != probed_mod) {
550                         if (unlikely(!try_module_get(probed_mod)))
551                                 return -EINVAL;
552                         p->mod_refcounted = 1;
553                 } else
554                         probed_mod = NULL;
555         }
556
557         p->nmissed = 0;
558         mutex_lock(&kprobe_mutex);
559         old_p = get_kprobe(p->addr);
560         if (old_p) {
561                 ret = register_aggr_kprobe(old_p, p);
562                 goto out;
563         }
564
565         ret = arch_prepare_kprobe(p);
566         if (ret)
567                 goto out;
568
569         INIT_HLIST_NODE(&p->hlist);
570         hlist_add_head_rcu(&p->hlist,
571                        &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
572
573         if (kprobe_enabled)
574                 arch_arm_kprobe(p);
575
576 out:
577         mutex_unlock(&kprobe_mutex);
578
579         if (ret && probed_mod)
580                 module_put(probed_mod);
581         return ret;
582 }
583
584 int __kprobes register_kprobe(struct kprobe *p)
585 {
586         return __register_kprobe(p, (unsigned long)__builtin_return_address(0));
587 }
588
589 void __kprobes unregister_kprobe(struct kprobe *p)
590 {
591         struct module *mod;
592         struct kprobe *old_p, *list_p;
593         int cleanup_p;
594
595         mutex_lock(&kprobe_mutex);
596         old_p = get_kprobe(p->addr);
597         if (unlikely(!old_p)) {
598                 mutex_unlock(&kprobe_mutex);
599                 return;
600         }
601         if (p != old_p) {
602                 list_for_each_entry_rcu(list_p, &old_p->list, list)
603                         if (list_p == p)
604                         /* kprobe p is a valid probe */
605                                 goto valid_p;
606                 mutex_unlock(&kprobe_mutex);
607                 return;
608         }
609 valid_p:
610         if (old_p == p ||
611             (old_p->pre_handler == aggr_pre_handler &&
612              p->list.next == &old_p->list && p->list.prev == &old_p->list)) {
613                 /*
614                  * Only probe on the hash list. Disarm only if kprobes are
615                  * enabled - otherwise, the breakpoint would already have
616                  * been removed. We save on flushing icache.
617                  */
618                 if (kprobe_enabled)
619                         arch_disarm_kprobe(p);
620                 hlist_del_rcu(&old_p->hlist);
621                 cleanup_p = 1;
622         } else {
623                 list_del_rcu(&p->list);
624                 cleanup_p = 0;
625         }
626
627         mutex_unlock(&kprobe_mutex);
628
629         synchronize_sched();
630         if (p->mod_refcounted) {
631                 mod = module_text_address((unsigned long)p->addr);
632                 if (mod)
633                         module_put(mod);
634         }
635
636         if (cleanup_p) {
637                 if (p != old_p) {
638                         list_del_rcu(&p->list);
639                         kfree(old_p);
640                 }
641                 arch_remove_kprobe(p);
642         } else {
643                 mutex_lock(&kprobe_mutex);
644                 if (p->break_handler)
645                         old_p->break_handler = NULL;
646                 if (p->post_handler){
647                         list_for_each_entry_rcu(list_p, &old_p->list, list){
648                                 if (list_p->post_handler){
649                                         cleanup_p = 2;
650                                         break;
651                                 }
652                         }
653                         if (cleanup_p == 0)
654                                 old_p->post_handler = NULL;
655                 }
656                 mutex_unlock(&kprobe_mutex);
657         }
658 }
659
660 static struct notifier_block kprobe_exceptions_nb = {
661         .notifier_call = kprobe_exceptions_notify,
662         .priority = 0x7fffffff /* we need to be notified first */
663 };
664
665 unsigned long __weak arch_deref_entry_point(void *entry)
666 {
667         return (unsigned long)entry;
668 }
669
670 int __kprobes register_jprobe(struct jprobe *jp)
671 {
672         unsigned long addr = arch_deref_entry_point(jp->entry);
673
674         if (!kernel_text_address(addr))
675                 return -EINVAL;
676
677         /* Todo: Verify probepoint is a function entry point */
678         jp->kp.pre_handler = setjmp_pre_handler;
679         jp->kp.break_handler = longjmp_break_handler;
680
681         return __register_kprobe(&jp->kp,
682                 (unsigned long)__builtin_return_address(0));
683 }
684
685 void __kprobes unregister_jprobe(struct jprobe *jp)
686 {
687         unregister_kprobe(&jp->kp);
688 }
689
690 #ifdef CONFIG_KRETPROBES
691 /*
692  * This kprobe pre_handler is registered with every kretprobe. When probe
693  * hits it will set up the return probe.
694  */
695 static int __kprobes pre_handler_kretprobe(struct kprobe *p,
696                                            struct pt_regs *regs)
697 {
698         struct kretprobe *rp = container_of(p, struct kretprobe, kp);
699         unsigned long flags = 0;
700
701         /*TODO: consider to only swap the RA after the last pre_handler fired */
702         spin_lock_irqsave(&kretprobe_lock, flags);
703         if (!hlist_empty(&rp->free_instances)) {
704                 struct kretprobe_instance *ri;
705
706                 ri = hlist_entry(rp->free_instances.first,
707                                  struct kretprobe_instance, uflist);
708                 ri->rp = rp;
709                 ri->task = current;
710
711                 if (rp->entry_handler && rp->entry_handler(ri, regs)) {
712                         spin_unlock_irqrestore(&kretprobe_lock, flags);
713                         return 0;
714                 }
715
716                 arch_prepare_kretprobe(ri, regs);
717
718                 /* XXX(hch): why is there no hlist_move_head? */
719                 hlist_del(&ri->uflist);
720                 hlist_add_head(&ri->uflist, &ri->rp->used_instances);
721                 hlist_add_head(&ri->hlist, kretprobe_inst_table_head(ri->task));
722         } else
723                 rp->nmissed++;
724         spin_unlock_irqrestore(&kretprobe_lock, flags);
725         return 0;
726 }
727
728 int __kprobes register_kretprobe(struct kretprobe *rp)
729 {
730         int ret = 0;
731         struct kretprobe_instance *inst;
732         int i;
733         void *addr;
734
735         if (kretprobe_blacklist_size) {
736                 addr = kprobe_addr(&rp->kp);
737                 if (!addr)
738                         return -EINVAL;
739
740                 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
741                         if (kretprobe_blacklist[i].addr == addr)
742                                 return -EINVAL;
743                 }
744         }
745
746         rp->kp.pre_handler = pre_handler_kretprobe;
747         rp->kp.post_handler = NULL;
748         rp->kp.fault_handler = NULL;
749         rp->kp.break_handler = NULL;
750
751         /* Pre-allocate memory for max kretprobe instances */
752         if (rp->maxactive <= 0) {
753 #ifdef CONFIG_PREEMPT
754                 rp->maxactive = max(10, 2 * NR_CPUS);
755 #else
756                 rp->maxactive = NR_CPUS;
757 #endif
758         }
759         INIT_HLIST_HEAD(&rp->used_instances);
760         INIT_HLIST_HEAD(&rp->free_instances);
761         for (i = 0; i < rp->maxactive; i++) {
762                 inst = kmalloc(sizeof(struct kretprobe_instance) +
763                                rp->data_size, GFP_KERNEL);
764                 if (inst == NULL) {
765                         free_rp_inst(rp);
766                         return -ENOMEM;
767                 }
768                 INIT_HLIST_NODE(&inst->uflist);
769                 hlist_add_head(&inst->uflist, &rp->free_instances);
770         }
771
772         rp->nmissed = 0;
773         /* Establish function entry probe point */
774         if ((ret = __register_kprobe(&rp->kp,
775                 (unsigned long)__builtin_return_address(0))) != 0)
776                 free_rp_inst(rp);
777         return ret;
778 }
779
780 #else /* CONFIG_KRETPROBES */
781 int __kprobes register_kretprobe(struct kretprobe *rp)
782 {
783         return -ENOSYS;
784 }
785
786 static int __kprobes pre_handler_kretprobe(struct kprobe *p,
787                                            struct pt_regs *regs)
788 {
789         return 0;
790 }
791 #endif /* CONFIG_KRETPROBES */
792
793 void __kprobes unregister_kretprobe(struct kretprobe *rp)
794 {
795         unsigned long flags;
796         struct kretprobe_instance *ri;
797         struct hlist_node *pos, *next;
798
799         unregister_kprobe(&rp->kp);
800
801         /* No race here */
802         spin_lock_irqsave(&kretprobe_lock, flags);
803         hlist_for_each_entry_safe(ri, pos, next, &rp->used_instances, uflist) {
804                 ri->rp = NULL;
805                 hlist_del(&ri->uflist);
806         }
807         spin_unlock_irqrestore(&kretprobe_lock, flags);
808         free_rp_inst(rp);
809 }
810
811 static int __init init_kprobes(void)
812 {
813         int i, err = 0;
814
815         /* FIXME allocate the probe table, currently defined statically */
816         /* initialize all list heads */
817         for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
818                 INIT_HLIST_HEAD(&kprobe_table[i]);
819                 INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
820         }
821
822         if (kretprobe_blacklist_size) {
823                 /* lookup the function address from its name */
824                 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
825                         kprobe_lookup_name(kretprobe_blacklist[i].name,
826                                            kretprobe_blacklist[i].addr);
827                         if (!kretprobe_blacklist[i].addr)
828                                 printk("kretprobe: lookup failed: %s\n",
829                                        kretprobe_blacklist[i].name);
830                 }
831         }
832
833         /* By default, kprobes are enabled */
834         kprobe_enabled = true;
835
836         err = arch_init_kprobes();
837         if (!err)
838                 err = register_die_notifier(&kprobe_exceptions_nb);
839
840         if (!err)
841                 init_test_probes();
842         return err;
843 }
844
845 #ifdef CONFIG_DEBUG_FS
846 static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
847                 const char *sym, int offset,char *modname)
848 {
849         char *kprobe_type;
850
851         if (p->pre_handler == pre_handler_kretprobe)
852                 kprobe_type = "r";
853         else if (p->pre_handler == setjmp_pre_handler)
854                 kprobe_type = "j";
855         else
856                 kprobe_type = "k";
857         if (sym)
858                 seq_printf(pi, "%p  %s  %s+0x%x  %s\n", p->addr, kprobe_type,
859                         sym, offset, (modname ? modname : " "));
860         else
861                 seq_printf(pi, "%p  %s  %p\n", p->addr, kprobe_type, p->addr);
862 }
863
864 static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos)
865 {
866         return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
867 }
868
869 static void __kprobes *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos)
870 {
871         (*pos)++;
872         if (*pos >= KPROBE_TABLE_SIZE)
873                 return NULL;
874         return pos;
875 }
876
877 static void __kprobes kprobe_seq_stop(struct seq_file *f, void *v)
878 {
879         /* Nothing to do */
880 }
881
882 static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
883 {
884         struct hlist_head *head;
885         struct hlist_node *node;
886         struct kprobe *p, *kp;
887         const char *sym = NULL;
888         unsigned int i = *(loff_t *) v;
889         unsigned long offset = 0;
890         char *modname, namebuf[128];
891
892         head = &kprobe_table[i];
893         preempt_disable();
894         hlist_for_each_entry_rcu(p, node, head, hlist) {
895                 sym = kallsyms_lookup((unsigned long)p->addr, NULL,
896                                         &offset, &modname, namebuf);
897                 if (p->pre_handler == aggr_pre_handler) {
898                         list_for_each_entry_rcu(kp, &p->list, list)
899                                 report_probe(pi, kp, sym, offset, modname);
900                 } else
901                         report_probe(pi, p, sym, offset, modname);
902         }
903         preempt_enable();
904         return 0;
905 }
906
907 static struct seq_operations kprobes_seq_ops = {
908         .start = kprobe_seq_start,
909         .next  = kprobe_seq_next,
910         .stop  = kprobe_seq_stop,
911         .show  = show_kprobe_addr
912 };
913
914 static int __kprobes kprobes_open(struct inode *inode, struct file *filp)
915 {
916         return seq_open(filp, &kprobes_seq_ops);
917 }
918
919 static struct file_operations debugfs_kprobes_operations = {
920         .open           = kprobes_open,
921         .read           = seq_read,
922         .llseek         = seq_lseek,
923         .release        = seq_release,
924 };
925
926 static void __kprobes enable_all_kprobes(void)
927 {
928         struct hlist_head *head;
929         struct hlist_node *node;
930         struct kprobe *p;
931         unsigned int i;
932
933         mutex_lock(&kprobe_mutex);
934
935         /* If kprobes are already enabled, just return */
936         if (kprobe_enabled)
937                 goto already_enabled;
938
939         for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
940                 head = &kprobe_table[i];
941                 hlist_for_each_entry_rcu(p, node, head, hlist)
942                         arch_arm_kprobe(p);
943         }
944
945         kprobe_enabled = true;
946         printk(KERN_INFO "Kprobes globally enabled\n");
947
948 already_enabled:
949         mutex_unlock(&kprobe_mutex);
950         return;
951 }
952
953 static void __kprobes disable_all_kprobes(void)
954 {
955         struct hlist_head *head;
956         struct hlist_node *node;
957         struct kprobe *p;
958         unsigned int i;
959
960         mutex_lock(&kprobe_mutex);
961
962         /* If kprobes are already disabled, just return */
963         if (!kprobe_enabled)
964                 goto already_disabled;
965
966         kprobe_enabled = false;
967         printk(KERN_INFO "Kprobes globally disabled\n");
968         for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
969                 head = &kprobe_table[i];
970                 hlist_for_each_entry_rcu(p, node, head, hlist) {
971                         if (!arch_trampoline_kprobe(p))
972                                 arch_disarm_kprobe(p);
973                 }
974         }
975
976         mutex_unlock(&kprobe_mutex);
977         /* Allow all currently running kprobes to complete */
978         synchronize_sched();
979         return;
980
981 already_disabled:
982         mutex_unlock(&kprobe_mutex);
983         return;
984 }
985
986 /*
987  * XXX: The debugfs bool file interface doesn't allow for callbacks
988  * when the bool state is switched. We can reuse that facility when
989  * available
990  */
991 static ssize_t read_enabled_file_bool(struct file *file,
992                char __user *user_buf, size_t count, loff_t *ppos)
993 {
994         char buf[3];
995
996         if (kprobe_enabled)
997                 buf[0] = '1';
998         else
999                 buf[0] = '0';
1000         buf[1] = '\n';
1001         buf[2] = 0x00;
1002         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
1003 }
1004
1005 static ssize_t write_enabled_file_bool(struct file *file,
1006                const char __user *user_buf, size_t count, loff_t *ppos)
1007 {
1008         char buf[32];
1009         int buf_size;
1010
1011         buf_size = min(count, (sizeof(buf)-1));
1012         if (copy_from_user(buf, user_buf, buf_size))
1013                 return -EFAULT;
1014
1015         switch (buf[0]) {
1016         case 'y':
1017         case 'Y':
1018         case '1':
1019                 enable_all_kprobes();
1020                 break;
1021         case 'n':
1022         case 'N':
1023         case '0':
1024                 disable_all_kprobes();
1025                 break;
1026         }
1027
1028         return count;
1029 }
1030
1031 static struct file_operations fops_kp = {
1032         .read =         read_enabled_file_bool,
1033         .write =        write_enabled_file_bool,
1034 };
1035
1036 static int __kprobes debugfs_kprobe_init(void)
1037 {
1038         struct dentry *dir, *file;
1039         unsigned int value = 1;
1040
1041         dir = debugfs_create_dir("kprobes", NULL);
1042         if (!dir)
1043                 return -ENOMEM;
1044
1045         file = debugfs_create_file("list", 0444, dir, NULL,
1046                                 &debugfs_kprobes_operations);
1047         if (!file) {
1048                 debugfs_remove(dir);
1049                 return -ENOMEM;
1050         }
1051
1052         file = debugfs_create_file("enabled", 0600, dir,
1053                                         &value, &fops_kp);
1054         if (!file) {
1055                 debugfs_remove(dir);
1056                 return -ENOMEM;
1057         }
1058
1059         return 0;
1060 }
1061
1062 late_initcall(debugfs_kprobe_init);
1063 #endif /* CONFIG_DEBUG_FS */
1064
1065 module_init(init_kprobes);
1066
1067 EXPORT_SYMBOL_GPL(register_kprobe);
1068 EXPORT_SYMBOL_GPL(unregister_kprobe);
1069 EXPORT_SYMBOL_GPL(register_jprobe);
1070 EXPORT_SYMBOL_GPL(unregister_jprobe);
1071 #ifdef CONFIG_KPROBES
1072 EXPORT_SYMBOL_GPL(jprobe_return);
1073 #endif
1074
1075 #ifdef CONFIG_KPROBES
1076 EXPORT_SYMBOL_GPL(register_kretprobe);
1077 EXPORT_SYMBOL_GPL(unregister_kretprobe);
1078 #endif