1 /* By Ross Biro 1/23/92 */
3 * Pentium III FXSR, SSE support
4 * Gareth Hughes <gareth@valinux.com>, May 2000
7 #include <linux/kernel.h>
8 #include <linux/sched.h>
10 #include <linux/smp.h>
11 #include <linux/errno.h>
12 #include <linux/ptrace.h>
13 #include <linux/user.h>
14 #include <linux/security.h>
15 #include <linux/audit.h>
16 #include <linux/seccomp.h>
17 #include <linux/signal.h>
19 #include <asm/uaccess.h>
20 #include <asm/pgtable.h>
21 #include <asm/system.h>
22 #include <asm/processor.h>
24 #include <asm/debugreg.h>
27 #include <asm/prctl.h>
28 #include <asm/proto.h>
31 * does not yet catch signals sent when the child dies.
32 * in exit.c or in signal.c.
36 * Determines which flags the user has access to [1 = access, 0 = no access].
38 #define FLAG_MASK_32 ((unsigned long) \
39 (X86_EFLAGS_CF | X86_EFLAGS_PF | \
40 X86_EFLAGS_AF | X86_EFLAGS_ZF | \
41 X86_EFLAGS_SF | X86_EFLAGS_TF | \
42 X86_EFLAGS_DF | X86_EFLAGS_OF | \
43 X86_EFLAGS_RF | X86_EFLAGS_AC))
46 * Determines whether a value may be installed in a segment register.
48 static inline bool invalid_selector(u16 value)
50 return unlikely(value != 0 && (value & SEGMENT_RPL_MASK) != USER_RPL);
55 #define FLAG_MASK FLAG_MASK_32
57 static long *pt_regs_access(struct pt_regs *regs, unsigned long regno)
59 BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0);
63 return ®s->bx + regno;
66 static u16 get_segment_reg(struct task_struct *task, unsigned long offset)
69 * Returning the value truncates it to 16 bits.
72 if (offset != offsetof(struct user_regs_struct, gs))
73 retval = *pt_regs_access(task_pt_regs(task), offset);
75 retval = task->thread.gs;
77 savesegment(gs, retval);
82 static int set_segment_reg(struct task_struct *task,
83 unsigned long offset, u16 value)
86 * The value argument was already truncated to 16 bits.
88 if (invalid_selector(value))
91 if (offset != offsetof(struct user_regs_struct, gs))
92 *pt_regs_access(task_pt_regs(task), offset) = value;
94 task->thread.gs = value;
97 * The user-mode %gs is not affected by
98 * kernel entry, so we must update the CPU.
100 loadsegment(gs, value);
106 static unsigned long debugreg_addr_limit(struct task_struct *task)
108 return TASK_SIZE - 3;
111 #else /* CONFIG_X86_64 */
113 #define FLAG_MASK (FLAG_MASK_32 | X86_EFLAGS_NT)
115 static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long offset)
117 BUILD_BUG_ON(offsetof(struct pt_regs, r15) != 0);
118 return ®s->r15 + (offset / sizeof(regs->r15));
121 static u16 get_segment_reg(struct task_struct *task, unsigned long offset)
124 * Returning the value truncates it to 16 bits.
129 case offsetof(struct user_regs_struct, fs):
130 if (task == current) {
131 /* Older gas can't assemble movq %?s,%r?? */
132 asm("movl %%fs,%0" : "=r" (seg));
135 return task->thread.fsindex;
136 case offsetof(struct user_regs_struct, gs):
137 if (task == current) {
138 asm("movl %%gs,%0" : "=r" (seg));
141 return task->thread.gsindex;
142 case offsetof(struct user_regs_struct, ds):
143 if (task == current) {
144 asm("movl %%ds,%0" : "=r" (seg));
147 return task->thread.ds;
148 case offsetof(struct user_regs_struct, es):
149 if (task == current) {
150 asm("movl %%es,%0" : "=r" (seg));
153 return task->thread.es;
155 case offsetof(struct user_regs_struct, cs):
156 case offsetof(struct user_regs_struct, ss):
159 return *pt_regs_access(task_pt_regs(task), offset);
162 static int set_segment_reg(struct task_struct *task,
163 unsigned long offset, u16 value)
166 * The value argument was already truncated to 16 bits.
168 if (invalid_selector(value))
172 case offsetof(struct user_regs_struct,fs):
174 * If this is setting fs as for normal 64-bit use but
175 * setting fs_base has implicitly changed it, leave it.
177 if ((value == FS_TLS_SEL && task->thread.fsindex == 0 &&
178 task->thread.fs != 0) ||
179 (value == 0 && task->thread.fsindex == FS_TLS_SEL &&
180 task->thread.fs == 0))
182 task->thread.fsindex = value;
184 loadsegment(fs, task->thread.fsindex);
186 case offsetof(struct user_regs_struct,gs):
188 * If this is setting gs as for normal 64-bit use but
189 * setting gs_base has implicitly changed it, leave it.
191 if ((value == GS_TLS_SEL && task->thread.gsindex == 0 &&
192 task->thread.gs != 0) ||
193 (value == 0 && task->thread.gsindex == GS_TLS_SEL &&
194 task->thread.gs == 0))
196 task->thread.gsindex = value;
198 load_gs_index(task->thread.gsindex);
200 case offsetof(struct user_regs_struct,ds):
201 task->thread.ds = value;
203 loadsegment(ds, task->thread.ds);
205 case offsetof(struct user_regs_struct,es):
206 task->thread.es = value;
208 loadsegment(es, task->thread.es);
212 * Can't actually change these in 64-bit mode.
214 case offsetof(struct user_regs_struct,cs):
215 #ifdef CONFIG_IA32_EMULATION
216 if (test_tsk_thread_flag(task, TIF_IA32))
217 task_pt_regs(task)->cs = value;
220 case offsetof(struct user_regs_struct,ss):
221 #ifdef CONFIG_IA32_EMULATION
222 if (test_tsk_thread_flag(task, TIF_IA32))
223 task_pt_regs(task)->ss = value;
231 static unsigned long debugreg_addr_limit(struct task_struct *task)
233 #ifdef CONFIG_IA32_EMULATION
234 if (test_tsk_thread_flag(task, TIF_IA32))
235 return IA32_PAGE_OFFSET - 3;
237 return TASK_SIZE64 - 7;
240 #endif /* CONFIG_X86_32 */
242 static unsigned long get_flags(struct task_struct *task)
244 unsigned long retval = task_pt_regs(task)->flags;
247 * If the debugger set TF, hide it from the readout.
249 if (test_tsk_thread_flag(task, TIF_FORCED_TF))
250 retval &= ~X86_EFLAGS_TF;
255 static int set_flags(struct task_struct *task, unsigned long value)
257 struct pt_regs *regs = task_pt_regs(task);
260 * If the user value contains TF, mark that
261 * it was not "us" (the debugger) that set it.
262 * If not, make sure it stays set if we had.
264 if (value & X86_EFLAGS_TF)
265 clear_tsk_thread_flag(task, TIF_FORCED_TF);
266 else if (test_tsk_thread_flag(task, TIF_FORCED_TF))
267 value |= X86_EFLAGS_TF;
269 regs->flags = (regs->flags & ~FLAG_MASK) | (value & FLAG_MASK);
274 static int putreg(struct task_struct *child,
275 unsigned long offset, unsigned long value)
278 case offsetof(struct user_regs_struct, cs):
279 case offsetof(struct user_regs_struct, ds):
280 case offsetof(struct user_regs_struct, es):
281 case offsetof(struct user_regs_struct, fs):
282 case offsetof(struct user_regs_struct, gs):
283 case offsetof(struct user_regs_struct, ss):
284 return set_segment_reg(child, offset, value);
286 case offsetof(struct user_regs_struct, flags):
287 return set_flags(child, value);
290 case offsetof(struct user_regs_struct,fs_base):
291 if (value >= TASK_SIZE_OF(child))
294 * When changing the segment base, use do_arch_prctl
295 * to set either thread.fs or thread.fsindex and the
296 * corresponding GDT slot.
298 if (child->thread.fs != value)
299 return do_arch_prctl(child, ARCH_SET_FS, value);
301 case offsetof(struct user_regs_struct,gs_base):
303 * Exactly the same here as the %fs handling above.
305 if (value >= TASK_SIZE_OF(child))
307 if (child->thread.gs != value)
308 return do_arch_prctl(child, ARCH_SET_GS, value);
313 *pt_regs_access(task_pt_regs(child), offset) = value;
317 static unsigned long getreg(struct task_struct *task, unsigned long offset)
320 case offsetof(struct user_regs_struct, cs):
321 case offsetof(struct user_regs_struct, ds):
322 case offsetof(struct user_regs_struct, es):
323 case offsetof(struct user_regs_struct, fs):
324 case offsetof(struct user_regs_struct, gs):
325 case offsetof(struct user_regs_struct, ss):
326 return get_segment_reg(task, offset);
328 case offsetof(struct user_regs_struct, flags):
329 return get_flags(task);
332 case offsetof(struct user_regs_struct, fs_base): {
334 * do_arch_prctl may have used a GDT slot instead of
335 * the MSR. To userland, it appears the same either
336 * way, except the %fs segment selector might not be 0.
338 unsigned int seg = task->thread.fsindex;
339 if (task->thread.fs != 0)
340 return task->thread.fs;
342 asm("movl %%fs,%0" : "=r" (seg));
343 if (seg != FS_TLS_SEL)
345 return get_desc_base(&task->thread.tls_array[FS_TLS]);
347 case offsetof(struct user_regs_struct, gs_base): {
349 * Exactly the same here as the %fs handling above.
351 unsigned int seg = task->thread.gsindex;
352 if (task->thread.gs != 0)
353 return task->thread.gs;
355 asm("movl %%gs,%0" : "=r" (seg));
356 if (seg != GS_TLS_SEL)
358 return get_desc_base(&task->thread.tls_array[GS_TLS]);
363 return *pt_regs_access(task_pt_regs(task), offset);
367 * This function is trivial and will be inlined by the compiler.
368 * Having it separates the implementation details of debug
369 * registers from the interface details of ptrace.
371 static unsigned long ptrace_get_debugreg(struct task_struct *child, int n)
374 case 0: return child->thread.debugreg0;
375 case 1: return child->thread.debugreg1;
376 case 2: return child->thread.debugreg2;
377 case 3: return child->thread.debugreg3;
378 case 6: return child->thread.debugreg6;
379 case 7: return child->thread.debugreg7;
384 static int ptrace_set_debugreg(struct task_struct *child,
385 int n, unsigned long data)
389 if (unlikely(n == 4 || n == 5))
392 if (n < 4 && unlikely(data >= debugreg_addr_limit(child)))
396 case 0: child->thread.debugreg0 = data; break;
397 case 1: child->thread.debugreg1 = data; break;
398 case 2: child->thread.debugreg2 = data; break;
399 case 3: child->thread.debugreg3 = data; break;
402 if ((data & ~0xffffffffUL) != 0)
404 child->thread.debugreg6 = data;
409 * Sanity-check data. Take one half-byte at once with
410 * check = (val >> (16 + 4*i)) & 0xf. It contains the
411 * R/Wi and LENi bits; bits 0 and 1 are R/Wi, and bits
412 * 2 and 3 are LENi. Given a list of invalid values,
413 * we do mask |= 1 << invalid_value, so that
414 * (mask >> check) & 1 is a correct test for invalid
417 * R/Wi contains the type of the breakpoint /
418 * watchpoint, LENi contains the length of the watched
419 * data in the watchpoint case.
421 * The invalid values are:
422 * - LENi == 0x10 (undefined), so mask |= 0x0f00. [32-bit]
423 * - R/Wi == 0x10 (break on I/O reads or writes), so
425 * - R/Wi == 0x00 && LENi != 0x00, so we have mask |=
428 * Finally, mask = 0x0f00 | 0x4444 | 0x1110 == 0x5f54.
430 * See the Intel Manual "System Programming Guide",
433 * Note that LENi == 0x10 is defined on x86_64 in long
434 * mode (i.e. even for 32-bit userspace software, but
435 * 64-bit kernel), so the x86_64 mask value is 0x5454.
436 * See the AMD manual no. 24593 (AMD64 System Programming)
439 #define DR7_MASK 0x5f54
441 #define DR7_MASK 0x5554
443 data &= ~DR_CONTROL_RESERVED;
444 for (i = 0; i < 4; i++)
445 if ((DR7_MASK >> ((data >> (16 + 4*i)) & 0xf)) & 1)
447 child->thread.debugreg7 = data;
449 set_tsk_thread_flag(child, TIF_DEBUG);
451 clear_tsk_thread_flag(child, TIF_DEBUG);
459 * Called by kernel/ptrace.c when detaching..
461 * Make sure the single step bit is not set.
463 void ptrace_disable(struct task_struct *child)
465 user_disable_single_step(child);
466 #ifdef TIF_SYSCALL_EMU
467 clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
471 long arch_ptrace(struct task_struct *child, long request, long addr, long data)
474 unsigned long __user *datap = (unsigned long __user *)data;
477 /* when I and D space are separate, these will need to be fixed. */
478 case PTRACE_PEEKTEXT: /* read word at location addr. */
479 case PTRACE_PEEKDATA:
480 ret = generic_ptrace_peekdata(child, addr, data);
483 /* read the word at location addr in the USER area. */
484 case PTRACE_PEEKUSR: {
488 if ((addr & (sizeof(data) - 1)) || addr < 0 ||
489 addr >= sizeof(struct user))
492 tmp = 0; /* Default return condition */
493 if (addr < sizeof(struct user_regs_struct))
494 tmp = getreg(child, addr);
495 else if (addr >= offsetof(struct user, u_debugreg[0]) &&
496 addr <= offsetof(struct user, u_debugreg[7])) {
497 addr -= offsetof(struct user, u_debugreg[0]);
498 tmp = ptrace_get_debugreg(child, addr / sizeof(data));
500 ret = put_user(tmp, datap);
504 /* when I and D space are separate, this will have to be fixed. */
505 case PTRACE_POKETEXT: /* write the word at location addr. */
506 case PTRACE_POKEDATA:
507 ret = generic_ptrace_pokedata(child, addr, data);
510 case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
512 if ((addr & (sizeof(data) - 1)) || addr < 0 ||
513 addr >= sizeof(struct user))
516 if (addr < sizeof(struct user_regs_struct))
517 ret = putreg(child, addr, data);
518 else if (addr >= offsetof(struct user, u_debugreg[0]) &&
519 addr <= offsetof(struct user, u_debugreg[7])) {
520 addr -= offsetof(struct user, u_debugreg[0]);
521 ret = ptrace_set_debugreg(child,
522 addr / sizeof(data), data);
526 case PTRACE_GETREGS: { /* Get all gp regs from the child. */
527 if (!access_ok(VERIFY_WRITE, datap, sizeof(struct user_regs_struct))) {
531 for (i = 0; i < sizeof(struct user_regs_struct); i += sizeof(long)) {
532 __put_user(getreg(child, i), datap);
539 case PTRACE_SETREGS: { /* Set all gp regs in the child. */
541 if (!access_ok(VERIFY_READ, datap, sizeof(struct user_regs_struct))) {
545 for (i = 0; i < sizeof(struct user_regs_struct); i += sizeof(long)) {
546 __get_user(tmp, datap);
547 putreg(child, i, tmp);
554 case PTRACE_GETFPREGS: { /* Get the child FPU state. */
555 if (!access_ok(VERIFY_WRITE, datap,
556 sizeof(struct user_i387_struct))) {
561 if (!tsk_used_math(child))
563 get_fpregs((struct user_i387_struct __user *)data, child);
567 case PTRACE_SETFPREGS: { /* Set the child FPU state. */
568 if (!access_ok(VERIFY_READ, datap,
569 sizeof(struct user_i387_struct))) {
573 set_stopped_child_used_math(child);
574 set_fpregs(child, (struct user_i387_struct __user *)data);
580 case PTRACE_GETFPXREGS: { /* Get the child extended FPU state. */
581 if (!access_ok(VERIFY_WRITE, datap,
582 sizeof(struct user_fxsr_struct))) {
586 if (!tsk_used_math(child))
588 ret = get_fpxregs((struct user_fxsr_struct __user *)data, child);
592 case PTRACE_SETFPXREGS: { /* Set the child extended FPU state. */
593 if (!access_ok(VERIFY_READ, datap,
594 sizeof(struct user_fxsr_struct))) {
598 set_stopped_child_used_math(child);
599 ret = set_fpxregs(child, (struct user_fxsr_struct __user *)data);
604 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
605 case PTRACE_GET_THREAD_AREA:
608 ret = do_get_thread_area(child, addr,
609 (struct user_desc __user *) data);
612 case PTRACE_SET_THREAD_AREA:
615 ret = do_set_thread_area(child, addr,
616 (struct user_desc __user *) data, 0);
621 /* normal 64bit interface to access TLS data.
622 Works just like arch_prctl, except that the arguments
624 case PTRACE_ARCH_PRCTL:
625 ret = do_arch_prctl(child, data, addr);
630 ret = ptrace_request(child, request, addr, data);
637 #ifdef CONFIG_IA32_EMULATION
639 #include <linux/compat.h>
640 #include <linux/syscalls.h>
641 #include <asm/ia32.h>
642 #include <asm/fpu32.h>
643 #include <asm/user32.h>
646 case offsetof(struct user32, regs.l): \
647 regs->q = value; break
650 case offsetof(struct user32, regs.rs): \
651 return set_segment_reg(child, \
652 offsetof(struct user_regs_struct, rs), \
656 static int putreg32(struct task_struct *child, unsigned regno, u32 value)
658 struct pt_regs *regs = task_pt_regs(child);
676 R32(orig_eax, orig_ax);
680 case offsetof(struct user32, regs.eflags):
681 return set_flags(child, value);
683 case offsetof(struct user32, u_debugreg[0]) ...
684 offsetof(struct user32, u_debugreg[7]):
685 regno -= offsetof(struct user32, u_debugreg[0]);
686 return ptrace_set_debugreg(child, regno / 4, value);
689 if (regno > sizeof(struct user32) || (regno & 3))
693 * Other dummy fields in the virtual user structure
705 case offsetof(struct user32, regs.l): \
706 *val = regs->q; break
709 case offsetof(struct user32, regs.rs): \
710 *val = get_segment_reg(child, \
711 offsetof(struct user_regs_struct, rs)); \
714 static int getreg32(struct task_struct *child, unsigned regno, u32 *val)
716 struct pt_regs *regs = task_pt_regs(child);
734 R32(orig_eax, orig_ax);
738 case offsetof(struct user32, regs.eflags):
739 *val = get_flags(child);
742 case offsetof(struct user32, u_debugreg[0]) ...
743 offsetof(struct user32, u_debugreg[7]):
744 regno -= offsetof(struct user32, u_debugreg[0]);
745 *val = ptrace_get_debugreg(child, regno / 4);
749 if (regno > sizeof(struct user32) || (regno & 3))
753 * Other dummy fields in the virtual user structure
765 static long ptrace32_siginfo(unsigned request, u32 pid, u32 addr, u32 data)
767 siginfo_t __user *si = compat_alloc_user_space(sizeof(siginfo_t));
768 compat_siginfo_t __user *si32 = compat_ptr(data);
772 if (request == PTRACE_SETSIGINFO) {
773 memset(&ssi, 0, sizeof(siginfo_t));
774 ret = copy_siginfo_from_user32(&ssi, si32);
777 if (copy_to_user(si, &ssi, sizeof(siginfo_t)))
780 ret = sys_ptrace(request, pid, addr, (unsigned long)si);
783 if (request == PTRACE_GETSIGINFO) {
784 if (copy_from_user(&ssi, si, sizeof(siginfo_t)))
786 ret = copy_siginfo_to_user32(si32, &ssi);
791 asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data)
793 struct task_struct *child;
794 struct pt_regs *childregs;
795 void __user *datap = compat_ptr(data);
804 case PTRACE_SINGLESTEP:
805 case PTRACE_SINGLEBLOCK:
808 case PTRACE_OLDSETOPTIONS:
809 case PTRACE_SETOPTIONS:
810 case PTRACE_SET_THREAD_AREA:
811 case PTRACE_GET_THREAD_AREA:
812 return sys_ptrace(request, pid, addr, data);
817 case PTRACE_PEEKTEXT:
818 case PTRACE_PEEKDATA:
819 case PTRACE_POKEDATA:
820 case PTRACE_POKETEXT:
825 case PTRACE_SETFPREGS:
826 case PTRACE_GETFPREGS:
827 case PTRACE_SETFPXREGS:
828 case PTRACE_GETFPXREGS:
829 case PTRACE_GETEVENTMSG:
832 case PTRACE_SETSIGINFO:
833 case PTRACE_GETSIGINFO:
834 return ptrace32_siginfo(request, pid, addr, data);
837 child = ptrace_get_task_struct(pid);
839 return PTR_ERR(child);
841 ret = ptrace_check_attach(child, request == PTRACE_KILL);
845 childregs = task_pt_regs(child);
848 case PTRACE_PEEKDATA:
849 case PTRACE_PEEKTEXT:
851 if (access_process_vm(child, addr, &val, sizeof(u32), 0) !=
855 ret = put_user(val, (unsigned int __user *)datap);
858 case PTRACE_POKEDATA:
859 case PTRACE_POKETEXT:
861 if (access_process_vm(child, addr, &data, sizeof(u32), 1) !=
867 ret = getreg32(child, addr, &val);
869 ret = put_user(val, (__u32 __user *)datap);
873 ret = putreg32(child, addr, data);
876 case PTRACE_GETREGS: { /* Get all gp regs from the child. */
879 if (!access_ok(VERIFY_WRITE, datap, 16*4)) {
884 for (i = 0; i < sizeof(struct user_regs_struct32); i += sizeof(__u32)) {
885 getreg32(child, i, &val);
886 ret |= __put_user(val, (u32 __user *)datap);
887 datap += sizeof(u32);
892 case PTRACE_SETREGS: { /* Set all gp regs in the child. */
896 if (!access_ok(VERIFY_READ, datap, 16*4)) {
901 for (i = 0; i < sizeof(struct user_regs_struct32); i += sizeof(u32)) {
902 ret |= __get_user(tmp, (u32 __user *)datap);
903 putreg32(child, i, tmp);
904 datap += sizeof(u32);
909 case PTRACE_GETFPREGS:
911 if (!access_ok(VERIFY_READ, compat_ptr(data),
912 sizeof(struct user_i387_struct)))
914 save_i387_ia32(child, datap, childregs, 1);
918 case PTRACE_SETFPREGS:
920 if (!access_ok(VERIFY_WRITE, datap,
921 sizeof(struct user_i387_struct)))
924 /* don't check EFAULT to be bug-to-bug compatible to i386 */
925 restore_i387_ia32(child, datap, 1);
928 case PTRACE_GETFPXREGS: {
929 struct user32_fxsr_struct __user *u = datap;
933 if (!access_ok(VERIFY_WRITE, u, sizeof(*u)))
936 if (__copy_to_user(u, &child->thread.i387.fxsave, sizeof(*u)))
938 ret = __put_user(childregs->cs, &u->fcs);
939 ret |= __put_user(child->thread.ds, &u->fos);
942 case PTRACE_SETFPXREGS: {
943 struct user32_fxsr_struct __user *u = datap;
947 if (!access_ok(VERIFY_READ, u, sizeof(*u)))
950 * no checking to be bug-to-bug compatible with i386.
951 * but silence warning
953 if (__copy_from_user(&child->thread.i387.fxsave, u, sizeof(*u)))
955 set_stopped_child_used_math(child);
956 child->thread.i387.fxsave.mxcsr &= mxcsr_feature_mask;
961 case PTRACE_GETEVENTMSG:
962 ret = put_user(child->ptrace_message,
963 (unsigned int __user *)compat_ptr(data));
971 put_task_struct(child);
975 #endif /* CONFIG_IA32_EMULATION */
979 void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code)
983 tsk->thread.trap_no = 1;
984 tsk->thread.error_code = error_code;
986 memset(&info, 0, sizeof(info));
987 info.si_signo = SIGTRAP;
988 info.si_code = TRAP_BRKPT;
991 info.si_addr = user_mode_vm(regs) ? (void __user *) regs->ip : NULL;
993 /* Send us the fake SIGTRAP */
994 force_sig_info(SIGTRAP, &info, tsk);
997 /* notification of system call entry/exit
998 * - triggered by current->work.syscall_trace
1000 __attribute__((regparm(3)))
1001 int do_syscall_trace(struct pt_regs *regs, int entryexit)
1003 int is_sysemu = test_thread_flag(TIF_SYSCALL_EMU);
1005 * With TIF_SYSCALL_EMU set we want to ignore TIF_SINGLESTEP for syscall
1008 int is_singlestep = !is_sysemu && test_thread_flag(TIF_SINGLESTEP);
1011 /* do the secure computing check first */
1013 secure_computing(regs->orig_ax);
1015 if (unlikely(current->audit_context)) {
1017 audit_syscall_exit(AUDITSC_RESULT(regs->ax),
1019 /* Debug traps, when using PTRACE_SINGLESTEP, must be sent only
1020 * on the syscall exit path. Normally, when TIF_SYSCALL_AUDIT is
1021 * not used, entry.S will call us only on syscall exit, not
1022 * entry; so when TIF_SYSCALL_AUDIT is used we must avoid
1023 * calling send_sigtrap() on syscall entry.
1025 * Note that when PTRACE_SYSEMU_SINGLESTEP is used,
1026 * is_singlestep is false, despite his name, so we will still do
1027 * the correct thing.
1029 else if (is_singlestep)
1033 if (!(current->ptrace & PT_PTRACED))
1036 /* If a process stops on the 1st tracepoint with SYSCALL_TRACE
1037 * and then is resumed with SYSEMU_SINGLESTEP, it will come in
1038 * here. We have to check this and return */
1039 if (is_sysemu && entryexit)
1042 /* Fake a debug trap */
1044 send_sigtrap(current, regs, 0);
1046 if (!test_thread_flag(TIF_SYSCALL_TRACE) && !is_sysemu)
1049 /* the 0x80 provides a way for the tracing parent to distinguish
1050 between a syscall stop and SIGTRAP delivery */
1051 /* Note that the debugger could change the result of test_thread_flag!*/
1052 ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) ? 0x80:0));
1055 * this isn't the same as continuing with a signal, but it will do
1056 * for normal use. strace only continues with a signal if the
1057 * stopping signal is not SIGTRAP. -brl
1059 if (current->exit_code) {
1060 send_sig(current->exit_code, current, 1);
1061 current->exit_code = 0;
1065 if (unlikely(current->audit_context) && !entryexit)
1066 audit_syscall_entry(AUDIT_ARCH_I386, regs->orig_ax,
1067 regs->bx, regs->cx, regs->dx, regs->si);
1071 regs->orig_ax = -1; /* force skip of syscall restarting */
1072 if (unlikely(current->audit_context))
1073 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
1077 #else /* CONFIG_X86_64 */
1079 static void syscall_trace(struct pt_regs *regs)
1083 printk("trace %s ip %lx sp %lx ax %d origrax %d caller %lx tiflags %x ptrace %x\n",
1085 regs->ip, regs->sp, regs->ax, regs->orig_ax, __builtin_return_address(0),
1086 current_thread_info()->flags, current->ptrace);
1089 ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
1092 * this isn't the same as continuing with a signal, but it will do
1093 * for normal use. strace only continues with a signal if the
1094 * stopping signal is not SIGTRAP. -brl
1096 if (current->exit_code) {
1097 send_sig(current->exit_code, current, 1);
1098 current->exit_code = 0;
1102 asmlinkage void syscall_trace_enter(struct pt_regs *regs)
1104 /* do the secure computing check first */
1105 secure_computing(regs->orig_ax);
1107 if (test_thread_flag(TIF_SYSCALL_TRACE)
1108 && (current->ptrace & PT_PTRACED))
1109 syscall_trace(regs);
1111 if (unlikely(current->audit_context)) {
1112 if (test_thread_flag(TIF_IA32)) {
1113 audit_syscall_entry(AUDIT_ARCH_I386,
1116 regs->dx, regs->si);
1118 audit_syscall_entry(AUDIT_ARCH_X86_64,
1121 regs->dx, regs->r10);
1126 asmlinkage void syscall_trace_leave(struct pt_regs *regs)
1128 if (unlikely(current->audit_context))
1129 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
1131 if ((test_thread_flag(TIF_SYSCALL_TRACE)
1132 || test_thread_flag(TIF_SINGLESTEP))
1133 && (current->ptrace & PT_PTRACED))
1134 syscall_trace(regs);
1137 #endif /* CONFIG_X86_32 */