Merge Christoph's freeze cleanup patch
[linux-2.6] / arch / ia64 / kernel / kprobes.c
1 /*
2  *  Kernel Probes (KProbes)
3  *  arch/ia64/kernel/kprobes.c
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18  *
19  * Copyright (C) IBM Corporation, 2002, 2004
20  * Copyright (C) Intel Corporation, 2005
21  *
22  * 2005-Apr     Rusty Lynch <rusty.lynch@intel.com> and Anil S Keshavamurthy
23  *              <anil.s.keshavamurthy@intel.com> adapted from i386
24  */
25
26 #include <linux/config.h>
27 #include <linux/kprobes.h>
28 #include <linux/ptrace.h>
29 #include <linux/spinlock.h>
30 #include <linux/string.h>
31 #include <linux/slab.h>
32 #include <linux/preempt.h>
33 #include <linux/moduleloader.h>
34
35 #include <asm/pgtable.h>
36 #include <asm/kdebug.h>
37
38 extern void jprobe_inst_return(void);
39
40 /* kprobe_status settings */
41 #define KPROBE_HIT_ACTIVE       0x00000001
42 #define KPROBE_HIT_SS           0x00000002
43
44 static struct kprobe *current_kprobe, *kprobe_prev;
45 static unsigned long kprobe_status, kprobe_status_prev;
46 static struct pt_regs jprobe_saved_regs;
47
48 enum instruction_type {A, I, M, F, B, L, X, u};
49 static enum instruction_type bundle_encoding[32][3] = {
50   { M, I, I },                          /* 00 */
51   { M, I, I },                          /* 01 */
52   { M, I, I },                          /* 02 */
53   { M, I, I },                          /* 03 */
54   { M, L, X },                          /* 04 */
55   { M, L, X },                          /* 05 */
56   { u, u, u },                          /* 06 */
57   { u, u, u },                          /* 07 */
58   { M, M, I },                          /* 08 */
59   { M, M, I },                          /* 09 */
60   { M, M, I },                          /* 0A */
61   { M, M, I },                          /* 0B */
62   { M, F, I },                          /* 0C */
63   { M, F, I },                          /* 0D */
64   { M, M, F },                          /* 0E */
65   { M, M, F },                          /* 0F */
66   { M, I, B },                          /* 10 */
67   { M, I, B },                          /* 11 */
68   { M, B, B },                          /* 12 */
69   { M, B, B },                          /* 13 */
70   { u, u, u },                          /* 14 */
71   { u, u, u },                          /* 15 */
72   { B, B, B },                          /* 16 */
73   { B, B, B },                          /* 17 */
74   { M, M, B },                          /* 18 */
75   { M, M, B },                          /* 19 */
76   { u, u, u },                          /* 1A */
77   { u, u, u },                          /* 1B */
78   { M, F, B },                          /* 1C */
79   { M, F, B },                          /* 1D */
80   { u, u, u },                          /* 1E */
81   { u, u, u },                          /* 1F */
82 };
83
84 /*
85  * In this function we check to see if the instruction
86  * is IP relative instruction and update the kprobe
87  * inst flag accordingly
88  */
89 static void update_kprobe_inst_flag(uint template, uint  slot, uint major_opcode,
90         unsigned long kprobe_inst, struct kprobe *p)
91 {
92         p->ainsn.inst_flag = 0;
93         p->ainsn.target_br_reg = 0;
94
95         if (bundle_encoding[template][slot] == B) {
96                 switch (major_opcode) {
97                   case INDIRECT_CALL_OPCODE:
98                         p->ainsn.inst_flag |= INST_FLAG_FIX_BRANCH_REG;
99                         p->ainsn.target_br_reg = ((kprobe_inst >> 6) & 0x7);
100                         break;
101                   case IP_RELATIVE_PREDICT_OPCODE:
102                   case IP_RELATIVE_BRANCH_OPCODE:
103                         p->ainsn.inst_flag |= INST_FLAG_FIX_RELATIVE_IP_ADDR;
104                         break;
105                   case IP_RELATIVE_CALL_OPCODE:
106                         p->ainsn.inst_flag |= INST_FLAG_FIX_RELATIVE_IP_ADDR;
107                         p->ainsn.inst_flag |= INST_FLAG_FIX_BRANCH_REG;
108                         p->ainsn.target_br_reg = ((kprobe_inst >> 6) & 0x7);
109                         break;
110                 }
111         } else if (bundle_encoding[template][slot] == X) {
112                 switch (major_opcode) {
113                   case LONG_CALL_OPCODE:
114                         p->ainsn.inst_flag |= INST_FLAG_FIX_BRANCH_REG;
115                         p->ainsn.target_br_reg = ((kprobe_inst >> 6) & 0x7);
116                   break;
117                 }
118         }
119         return;
120 }
121
122 /*
123  * In this function we check to see if the instruction
124  * on which we are inserting kprobe is supported.
125  * Returns 0 if supported
126  * Returns -EINVAL if unsupported
127  */
128 static int unsupported_inst(uint template, uint  slot, uint major_opcode,
129         unsigned long kprobe_inst, struct kprobe *p)
130 {
131         unsigned long addr = (unsigned long)p->addr;
132
133         if (bundle_encoding[template][slot] == I) {
134                 switch (major_opcode) {
135                         case 0x0: //I_UNIT_MISC_OPCODE:
136                         /*
137                          * Check for Integer speculation instruction
138                          * - Bit 33-35 to be equal to 0x1
139                          */
140                         if (((kprobe_inst >> 33) & 0x7) == 1) {
141                                 printk(KERN_WARNING
142                                         "Kprobes on speculation inst at <0x%lx> not supported\n",
143                                         addr);
144                                 return -EINVAL;
145                         }
146
147                         /*
148                          * IP relative mov instruction
149                          *  - Bit 27-35 to be equal to 0x30
150                          */
151                         if (((kprobe_inst >> 27) & 0x1FF) == 0x30) {
152                                 printk(KERN_WARNING
153                                         "Kprobes on \"mov r1=ip\" at <0x%lx> not supported\n",
154                                         addr);
155                                 return -EINVAL;
156
157                         }
158                 }
159         }
160         return 0;
161 }
162
163
164 /*
165  * In this function we check to see if the instruction
166  * (qp) cmpx.crel.ctype p1,p2=r2,r3
167  * on which we are inserting kprobe is cmp instruction
168  * with ctype as unc.
169  */
170 static uint is_cmp_ctype_unc_inst(uint template, uint slot, uint major_opcode,
171 unsigned long kprobe_inst)
172 {
173         cmp_inst_t cmp_inst;
174         uint ctype_unc = 0;
175
176         if (!((bundle_encoding[template][slot] == I) ||
177                 (bundle_encoding[template][slot] == M)))
178                 goto out;
179
180         if (!((major_opcode == 0xC) || (major_opcode == 0xD) ||
181                 (major_opcode == 0xE)))
182                 goto out;
183
184         cmp_inst.l = kprobe_inst;
185         if ((cmp_inst.f.x2 == 0) || (cmp_inst.f.x2 == 1)) {
186                 /* Integere compare - Register Register (A6 type)*/
187                 if ((cmp_inst.f.tb == 0) && (cmp_inst.f.ta == 0)
188                                 &&(cmp_inst.f.c == 1))
189                         ctype_unc = 1;
190         } else if ((cmp_inst.f.x2 == 2)||(cmp_inst.f.x2 == 3)) {
191                 /* Integere compare - Immediate Register (A8 type)*/
192                 if ((cmp_inst.f.ta == 0) &&(cmp_inst.f.c == 1))
193                         ctype_unc = 1;
194         }
195 out:
196         return ctype_unc;
197 }
198
199 /*
200  * In this function we override the bundle with
201  * the break instruction at the given slot.
202  */
203 static void prepare_break_inst(uint template, uint  slot, uint major_opcode,
204         unsigned long kprobe_inst, struct kprobe *p)
205 {
206         unsigned long break_inst = BREAK_INST;
207         bundle_t *bundle = &p->ainsn.insn.bundle;
208
209         /*
210          * Copy the original kprobe_inst qualifying predicate(qp)
211          * to the break instruction iff !is_cmp_ctype_unc_inst
212          * because for cmp instruction with ctype equal to unc,
213          * which is a special instruction always needs to be
214          * executed regradless of qp
215          */
216         if (!is_cmp_ctype_unc_inst(template, slot, major_opcode, kprobe_inst))
217                 break_inst |= (0x3f & kprobe_inst);
218
219         switch (slot) {
220           case 0:
221                 bundle->quad0.slot0 = break_inst;
222                 break;
223           case 1:
224                 bundle->quad0.slot1_p0 = break_inst;
225                 bundle->quad1.slot1_p1 = break_inst >> (64-46);
226                 break;
227           case 2:
228                 bundle->quad1.slot2 = break_inst;
229                 break;
230         }
231
232         /*
233          * Update the instruction flag, so that we can
234          * emulate the instruction properly after we
235          * single step on original instruction
236          */
237         update_kprobe_inst_flag(template, slot, major_opcode, kprobe_inst, p);
238 }
239
240 static inline void get_kprobe_inst(bundle_t *bundle, uint slot,
241                 unsigned long *kprobe_inst, uint *major_opcode)
242 {
243         unsigned long kprobe_inst_p0, kprobe_inst_p1;
244         unsigned int template;
245
246         template = bundle->quad0.template;
247
248         switch (slot) {
249           case 0:
250                 *major_opcode = (bundle->quad0.slot0 >> SLOT0_OPCODE_SHIFT);
251                 *kprobe_inst = bundle->quad0.slot0;
252                 break;
253           case 1:
254                 *major_opcode = (bundle->quad1.slot1_p1 >> SLOT1_p1_OPCODE_SHIFT);
255                 kprobe_inst_p0 = bundle->quad0.slot1_p0;
256                 kprobe_inst_p1 = bundle->quad1.slot1_p1;
257                 *kprobe_inst = kprobe_inst_p0 | (kprobe_inst_p1 << (64-46));
258                 break;
259           case 2:
260                 *major_opcode = (bundle->quad1.slot2 >> SLOT2_OPCODE_SHIFT);
261                 *kprobe_inst = bundle->quad1.slot2;
262                 break;
263         }
264 }
265
266 static int valid_kprobe_addr(int template, int slot, unsigned long addr)
267 {
268         if ((slot > 2) || ((bundle_encoding[template][1] == L) && slot > 1)) {
269                 printk(KERN_WARNING "Attempting to insert unaligned kprobe at 0x%lx\n",
270                                 addr);
271                 return -EINVAL;
272         }
273         return 0;
274 }
275
276 static inline void save_previous_kprobe(void)
277 {
278         kprobe_prev = current_kprobe;
279         kprobe_status_prev = kprobe_status;
280 }
281
282 static inline void restore_previous_kprobe(void)
283 {
284         current_kprobe = kprobe_prev;
285         kprobe_status = kprobe_status_prev;
286 }
287
288 static inline void set_current_kprobe(struct kprobe *p)
289 {
290         current_kprobe = p;
291 }
292
293 int arch_prepare_kprobe(struct kprobe *p)
294 {
295         unsigned long addr = (unsigned long) p->addr;
296         unsigned long *kprobe_addr = (unsigned long *)(addr & ~0xFULL);
297         unsigned long kprobe_inst=0;
298         unsigned int slot = addr & 0xf, template, major_opcode = 0;
299         bundle_t *bundle = &p->ainsn.insn.bundle;
300
301         memcpy(&p->opcode.bundle, kprobe_addr, sizeof(bundle_t));
302         memcpy(&p->ainsn.insn.bundle, kprobe_addr, sizeof(bundle_t));
303
304         template = bundle->quad0.template;
305
306         if(valid_kprobe_addr(template, slot, addr))
307                 return -EINVAL;
308
309         /* Move to slot 2, if bundle is MLX type and kprobe slot is 1 */
310         if (slot == 1 && bundle_encoding[template][1] == L)
311                 slot++;
312
313         /* Get kprobe_inst and major_opcode from the bundle */
314         get_kprobe_inst(bundle, slot, &kprobe_inst, &major_opcode);
315
316         if (unsupported_inst(template, slot, major_opcode, kprobe_inst, p))
317                         return -EINVAL;
318
319         prepare_break_inst(template, slot, major_opcode, kprobe_inst, p);
320
321         return 0;
322 }
323
324 void arch_arm_kprobe(struct kprobe *p)
325 {
326         unsigned long addr = (unsigned long)p->addr;
327         unsigned long arm_addr = addr & ~0xFULL;
328
329         memcpy((char *)arm_addr, &p->ainsn.insn.bundle, sizeof(bundle_t));
330         flush_icache_range(arm_addr, arm_addr + sizeof(bundle_t));
331 }
332
333 void arch_disarm_kprobe(struct kprobe *p)
334 {
335         unsigned long addr = (unsigned long)p->addr;
336         unsigned long arm_addr = addr & ~0xFULL;
337
338         /* p->opcode contains the original unaltered bundle */
339         memcpy((char *) arm_addr, (char *) &p->opcode.bundle, sizeof(bundle_t));
340         flush_icache_range(arm_addr, arm_addr + sizeof(bundle_t));
341 }
342
343 void arch_remove_kprobe(struct kprobe *p)
344 {
345 }
346
347 /*
348  * We are resuming execution after a single step fault, so the pt_regs
349  * structure reflects the register state after we executed the instruction
350  * located in the kprobe (p->ainsn.insn.bundle).  We still need to adjust
351  * the ip to point back to the original stack address. To set the IP address
352  * to original stack address, handle the case where we need to fixup the
353  * relative IP address and/or fixup branch register.
354  */
355 static void resume_execution(struct kprobe *p, struct pt_regs *regs)
356 {
357         unsigned long bundle_addr = ((unsigned long) (&p->opcode.bundle)) & ~0xFULL;
358         unsigned long resume_addr = (unsigned long)p->addr & ~0xFULL;
359         unsigned long template;
360         int slot = ((unsigned long)p->addr & 0xf);
361
362         template = p->opcode.bundle.quad0.template;
363
364         if (slot == 1 && bundle_encoding[template][1] == L)
365                 slot = 2;
366
367         if (p->ainsn.inst_flag) {
368
369                 if (p->ainsn.inst_flag & INST_FLAG_FIX_RELATIVE_IP_ADDR) {
370                         /* Fix relative IP address */
371                         regs->cr_iip = (regs->cr_iip - bundle_addr) + resume_addr;
372                 }
373
374                 if (p->ainsn.inst_flag & INST_FLAG_FIX_BRANCH_REG) {
375                 /*
376                  * Fix target branch register, software convention is
377                  * to use either b0 or b6 or b7, so just checking
378                  * only those registers
379                  */
380                         switch (p->ainsn.target_br_reg) {
381                         case 0:
382                                 if ((regs->b0 == bundle_addr) ||
383                                         (regs->b0 == bundle_addr + 0x10)) {
384                                         regs->b0 = (regs->b0 - bundle_addr) +
385                                                 resume_addr;
386                                 }
387                                 break;
388                         case 6:
389                                 if ((regs->b6 == bundle_addr) ||
390                                         (regs->b6 == bundle_addr + 0x10)) {
391                                         regs->b6 = (regs->b6 - bundle_addr) +
392                                                 resume_addr;
393                                 }
394                                 break;
395                         case 7:
396                                 if ((regs->b7 == bundle_addr) ||
397                                         (regs->b7 == bundle_addr + 0x10)) {
398                                         regs->b7 = (regs->b7 - bundle_addr) +
399                                                 resume_addr;
400                                 }
401                                 break;
402                         } /* end switch */
403                 }
404                 goto turn_ss_off;
405         }
406
407         if (slot == 2) {
408                 if (regs->cr_iip == bundle_addr + 0x10) {
409                         regs->cr_iip = resume_addr + 0x10;
410                 }
411         } else {
412                 if (regs->cr_iip == bundle_addr) {
413                         regs->cr_iip = resume_addr;
414                 }
415         }
416
417 turn_ss_off:
418         /* Turn off Single Step bit */
419         ia64_psr(regs)->ss = 0;
420 }
421
422 static void prepare_ss(struct kprobe *p, struct pt_regs *regs)
423 {
424         unsigned long bundle_addr = (unsigned long) &p->opcode.bundle;
425         unsigned long slot = (unsigned long)p->addr & 0xf;
426
427         /* Update instruction pointer (IIP) and slot number (IPSR.ri) */
428         regs->cr_iip = bundle_addr & ~0xFULL;
429
430         if (slot > 2)
431                 slot = 0;
432
433         ia64_psr(regs)->ri = slot;
434
435         /* turn on single stepping */
436         ia64_psr(regs)->ss = 1;
437 }
438
439 static int pre_kprobes_handler(struct die_args *args)
440 {
441         struct kprobe *p;
442         int ret = 0;
443         struct pt_regs *regs = args->regs;
444         kprobe_opcode_t *addr = (kprobe_opcode_t *)instruction_pointer(regs);
445
446         preempt_disable();
447
448         /* Handle recursion cases */
449         if (kprobe_running()) {
450                 p = get_kprobe(addr);
451                 if (p) {
452                         if (kprobe_status == KPROBE_HIT_SS) {
453                                 unlock_kprobes();
454                                 goto no_kprobe;
455                         }
456                         /* We have reentered the pre_kprobe_handler(), since
457                          * another probe was hit while within the handler.
458                          * We here save the original kprobes variables and
459                          * just single step on the instruction of the new probe
460                          * without calling any user handlers.
461                          */
462                         save_previous_kprobe();
463                         set_current_kprobe(p);
464                         p->nmissed++;
465                         prepare_ss(p, regs);
466                         kprobe_status = KPROBE_REENTER;
467                         return 1;
468                 } else if (args->err == __IA64_BREAK_JPROBE) {
469                         /*
470                          * jprobe instrumented function just completed
471                          */
472                         p = current_kprobe;
473                         if (p->break_handler && p->break_handler(p, regs)) {
474                                 goto ss_probe;
475                         }
476                 } else {
477                         /* Not our break */
478                         goto no_kprobe;
479                 }
480         }
481
482         lock_kprobes();
483         p = get_kprobe(addr);
484         if (!p) {
485                 unlock_kprobes();
486                 goto no_kprobe;
487         }
488
489         kprobe_status = KPROBE_HIT_ACTIVE;
490         set_current_kprobe(p);
491
492         if (p->pre_handler && p->pre_handler(p, regs))
493                 /*
494                  * Our pre-handler is specifically requesting that we just
495                  * do a return.  This is handling the case where the
496                  * pre-handler is really our special jprobe pre-handler.
497                  */
498                 return 1;
499
500 ss_probe:
501         prepare_ss(p, regs);
502         kprobe_status = KPROBE_HIT_SS;
503         return 1;
504
505 no_kprobe:
506         preempt_enable_no_resched();
507         return ret;
508 }
509
510 static int post_kprobes_handler(struct pt_regs *regs)
511 {
512         if (!kprobe_running())
513                 return 0;
514
515         if ((kprobe_status != KPROBE_REENTER) && current_kprobe->post_handler) {
516                 kprobe_status = KPROBE_HIT_SSDONE;
517                 current_kprobe->post_handler(current_kprobe, regs, 0);
518         }
519
520         resume_execution(current_kprobe, regs);
521
522         /*Restore back the original saved kprobes variables and continue. */
523         if (kprobe_status == KPROBE_REENTER) {
524                 restore_previous_kprobe();
525                 goto out;
526         }
527
528         unlock_kprobes();
529
530 out:
531         preempt_enable_no_resched();
532         return 1;
533 }
534
535 static int kprobes_fault_handler(struct pt_regs *regs, int trapnr)
536 {
537         if (!kprobe_running())
538                 return 0;
539
540         if (current_kprobe->fault_handler &&
541             current_kprobe->fault_handler(current_kprobe, regs, trapnr))
542                 return 1;
543
544         if (kprobe_status & KPROBE_HIT_SS) {
545                 resume_execution(current_kprobe, regs);
546                 unlock_kprobes();
547                 preempt_enable_no_resched();
548         }
549
550         return 0;
551 }
552
553 int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
554                              void *data)
555 {
556         struct die_args *args = (struct die_args *)data;
557         switch(val) {
558         case DIE_BREAK:
559                 if (pre_kprobes_handler(args))
560                         return NOTIFY_STOP;
561                 break;
562         case DIE_SS:
563                 if (post_kprobes_handler(args->regs))
564                         return NOTIFY_STOP;
565                 break;
566         case DIE_PAGE_FAULT:
567                 if (kprobes_fault_handler(args->regs, args->trapnr))
568                         return NOTIFY_STOP;
569         default:
570                 break;
571         }
572         return NOTIFY_DONE;
573 }
574
575 int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
576 {
577         struct jprobe *jp = container_of(p, struct jprobe, kp);
578         unsigned long addr = ((struct fnptr *)(jp->entry))->ip;
579
580         /* save architectural state */
581         jprobe_saved_regs = *regs;
582
583         /* after rfi, execute the jprobe instrumented function */
584         regs->cr_iip = addr & ~0xFULL;
585         ia64_psr(regs)->ri = addr & 0xf;
586         regs->r1 = ((struct fnptr *)(jp->entry))->gp;
587
588         /*
589          * fix the return address to our jprobe_inst_return() function
590          * in the jprobes.S file
591          */
592         regs->b0 = ((struct fnptr *)(jprobe_inst_return))->ip;
593
594         return 1;
595 }
596
597 int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
598 {
599         *regs = jprobe_saved_regs;
600         return 1;
601 }