2 * SuperH process tracing
4 * Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
5 * Copyright (C) 2002 - 2008 Paul Mundt
7 * Audit support by Yuichi Nakamura <ynakam@hitachisoft.jp>
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
13 #include <linux/kernel.h>
14 #include <linux/sched.h>
16 #include <linux/smp.h>
17 #include <linux/errno.h>
18 #include <linux/ptrace.h>
19 #include <linux/user.h>
20 #include <linux/slab.h>
21 #include <linux/security.h>
22 #include <linux/signal.h>
24 #include <linux/audit.h>
25 #include <linux/seccomp.h>
26 #include <linux/tracehook.h>
27 #include <linux/elf.h>
28 #include <linux/regset.h>
29 #include <asm/uaccess.h>
30 #include <asm/pgtable.h>
31 #include <asm/system.h>
32 #include <asm/processor.h>
33 #include <asm/mmu_context.h>
34 #include <asm/syscalls.h>
38 * This routine will get a word off of the process kernel stack.
40 static inline int get_stack_long(struct task_struct *task, int offset)
44 stack = (unsigned char *)task_pt_regs(task);
46 return (*((int *)stack));
50 * This routine will put a word on the process kernel stack.
52 static inline int put_stack_long(struct task_struct *task, int offset,
57 stack = (unsigned char *)task_pt_regs(task);
59 *(unsigned long *) stack = data;
63 void user_enable_single_step(struct task_struct *child)
65 /* Next scheduling will set up UBC */
66 if (child->thread.ubc_pc == 0)
69 child->thread.ubc_pc = get_stack_long(child,
70 offsetof(struct pt_regs, pc));
72 set_tsk_thread_flag(child, TIF_SINGLESTEP);
75 void user_disable_single_step(struct task_struct *child)
77 clear_tsk_thread_flag(child, TIF_SINGLESTEP);
80 * Ensure the UBC is not programmed at the next context switch.
82 * Normally this is not needed but there are sequences such as
83 * singlestep, signal delivery, and continue that leave the
84 * ubc_pc non-zero leading to spurious SIGTRAPs.
86 if (child->thread.ubc_pc != 0) {
88 child->thread.ubc_pc = 0;
93 * Called by kernel/ptrace.c when detaching..
95 * Make sure single step bits etc are not set.
97 void ptrace_disable(struct task_struct *child)
99 user_disable_single_step(child);
102 static int genregs_get(struct task_struct *target,
103 const struct user_regset *regset,
104 unsigned int pos, unsigned int count,
105 void *kbuf, void __user *ubuf)
107 const struct pt_regs *regs = task_pt_regs(target);
110 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
112 0, 16 * sizeof(unsigned long));
114 /* PC, PR, SR, GBR, MACH, MACL, TRA */
115 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
117 offsetof(struct pt_regs, pc),
118 sizeof(struct pt_regs));
120 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
121 sizeof(struct pt_regs), -1);
126 static int genregs_set(struct task_struct *target,
127 const struct user_regset *regset,
128 unsigned int pos, unsigned int count,
129 const void *kbuf, const void __user *ubuf)
131 struct pt_regs *regs = task_pt_regs(target);
134 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
136 0, 16 * sizeof(unsigned long));
137 if (!ret && count > 0)
138 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
140 offsetof(struct pt_regs, pc),
141 sizeof(struct pt_regs));
143 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
144 sizeof(struct pt_regs), -1);
150 int fpregs_get(struct task_struct *target,
151 const struct user_regset *regset,
152 unsigned int pos, unsigned int count,
153 void *kbuf, void __user *ubuf)
157 ret = init_fpu(target);
161 if ((boot_cpu_data.flags & CPU_HAS_FPU))
162 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
163 &target->thread.fpu.hard, 0, -1);
165 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
166 &target->thread.fpu.soft, 0, -1);
169 static int fpregs_set(struct task_struct *target,
170 const struct user_regset *regset,
171 unsigned int pos, unsigned int count,
172 const void *kbuf, const void __user *ubuf)
176 ret = init_fpu(target);
180 set_stopped_child_used_math(target);
182 if ((boot_cpu_data.flags & CPU_HAS_FPU))
183 return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
184 &target->thread.fpu.hard, 0, -1);
186 return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
187 &target->thread.fpu.soft, 0, -1);
190 static int fpregs_active(struct task_struct *target,
191 const struct user_regset *regset)
193 return tsk_used_math(target) ? regset->n : 0;
198 static int dspregs_get(struct task_struct *target,
199 const struct user_regset *regset,
200 unsigned int pos, unsigned int count,
201 void *kbuf, void __user *ubuf)
203 const struct pt_dspregs *regs =
204 (struct pt_dspregs *)&target->thread.dsp_status.dsp_regs;
207 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, regs,
208 0, sizeof(struct pt_dspregs));
210 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
211 sizeof(struct pt_dspregs), -1);
216 static int dspregs_set(struct task_struct *target,
217 const struct user_regset *regset,
218 unsigned int pos, unsigned int count,
219 const void *kbuf, const void __user *ubuf)
221 struct pt_dspregs *regs =
222 (struct pt_dspregs *)&target->thread.dsp_status.dsp_regs;
225 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, regs,
226 0, sizeof(struct pt_dspregs));
228 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
229 sizeof(struct pt_dspregs), -1);
234 static int dspregs_active(struct task_struct *target,
235 const struct user_regset *regset)
237 struct pt_regs *regs = task_pt_regs(target);
239 return regs->sr & SR_DSP ? regset->n : 0;
244 * These are our native regset flavours.
256 static const struct user_regset sh_regsets[] = {
260 * PC, PR, SR, GBR, MACH, MACL, TRA
263 .core_note_type = NT_PRSTATUS,
265 .size = sizeof(long),
266 .align = sizeof(long),
273 .core_note_type = NT_PRFPREG,
274 .n = sizeof(struct user_fpu_struct) / sizeof(long),
275 .size = sizeof(long),
276 .align = sizeof(long),
279 .active = fpregs_active,
285 .n = sizeof(struct pt_dspregs) / sizeof(long),
286 .size = sizeof(long),
287 .align = sizeof(long),
290 .active = dspregs_active,
295 static const struct user_regset_view user_sh_native_view = {
298 .regsets = sh_regsets,
299 .n = ARRAY_SIZE(sh_regsets),
302 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
304 return &user_sh_native_view;
307 long arch_ptrace(struct task_struct *child, long request, long addr, long data)
309 struct user * dummy = NULL;
310 unsigned long __user *datap = (unsigned long __user *)data;
314 /* read the word at location addr in the USER area. */
315 case PTRACE_PEEKUSR: {
319 if ((addr & 3) || addr < 0 ||
320 addr > sizeof(struct user) - 3)
323 if (addr < sizeof(struct pt_regs))
324 tmp = get_stack_long(child, addr);
325 else if (addr >= (long) &dummy->fpu &&
326 addr < (long) &dummy->u_fpvalid) {
327 if (!tsk_used_math(child)) {
328 if (addr == (long)&dummy->fpu.fpscr)
333 tmp = ((long *)&child->thread.fpu)
334 [(addr - (long)&dummy->fpu) >> 2];
335 } else if (addr == (long) &dummy->u_fpvalid)
336 tmp = !!tsk_used_math(child);
339 ret = put_user(tmp, datap);
343 case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
345 if ((addr & 3) || addr < 0 ||
346 addr > sizeof(struct user) - 3)
349 if (addr < sizeof(struct pt_regs))
350 ret = put_stack_long(child, addr, data);
351 else if (addr >= (long) &dummy->fpu &&
352 addr < (long) &dummy->u_fpvalid) {
353 set_stopped_child_used_math(child);
354 ((long *)&child->thread.fpu)
355 [(addr - (long)&dummy->fpu) >> 2] = data;
357 } else if (addr == (long) &dummy->u_fpvalid) {
358 conditional_stopped_child_used_math(data, child);
364 return copy_regset_to_user(child, &user_sh_native_view,
366 0, sizeof(struct pt_regs),
367 (void __user *)data);
369 return copy_regset_from_user(child, &user_sh_native_view,
371 0, sizeof(struct pt_regs),
372 (const void __user *)data);
374 case PTRACE_GETFPREGS:
375 return copy_regset_to_user(child, &user_sh_native_view,
377 0, sizeof(struct user_fpu_struct),
378 (void __user *)data);
379 case PTRACE_SETFPREGS:
380 return copy_regset_from_user(child, &user_sh_native_view,
382 0, sizeof(struct user_fpu_struct),
383 (const void __user *)data);
386 case PTRACE_GETDSPREGS:
387 return copy_regset_to_user(child, &user_sh_native_view,
389 0, sizeof(struct pt_dspregs),
390 (void __user *)data);
391 case PTRACE_SETDSPREGS:
392 return copy_regset_from_user(child, &user_sh_native_view,
394 0, sizeof(struct pt_dspregs),
395 (const void __user *)data);
397 #ifdef CONFIG_BINFMT_ELF_FDPIC
398 case PTRACE_GETFDPIC: {
399 unsigned long tmp = 0;
402 case PTRACE_GETFDPIC_EXEC:
403 tmp = child->mm->context.exec_fdpic_loadmap;
405 case PTRACE_GETFDPIC_INTERP:
406 tmp = child->mm->context.interp_fdpic_loadmap;
413 if (put_user(tmp, datap)) {
421 ret = ptrace_request(child, request, addr, data);
428 static inline int audit_arch(void)
432 #ifdef CONFIG_CPU_LITTLE_ENDIAN
433 arch |= __AUDIT_ARCH_LE;
439 asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
443 secure_computing(regs->regs[0]);
445 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
446 tracehook_report_syscall_entry(regs))
448 * Tracing decided this syscall should not happen.
449 * We'll return a bogus call number to get an ENOSYS
450 * error, but leave the original number in regs->regs[0].
454 if (unlikely(current->audit_context))
455 audit_syscall_entry(audit_arch(), regs->regs[3],
456 regs->regs[4], regs->regs[5],
457 regs->regs[6], regs->regs[7]);
459 return ret ?: regs->regs[0];
462 asmlinkage void do_syscall_trace_leave(struct pt_regs *regs)
466 if (unlikely(current->audit_context))
467 audit_syscall_exit(AUDITSC_RESULT(regs->regs[0]),
470 step = test_thread_flag(TIF_SINGLESTEP);
471 if (step || test_thread_flag(TIF_SYSCALL_TRACE))
472 tracehook_report_syscall_exit(regs, step);