1 /* By Ross Biro 1/23/92 */
3 * Pentium III FXSR, SSE support
4 * Gareth Hughes <gareth@valinux.com>, May 2000
7 * Markus Metzger <markus.t.metzger@intel.com>, Dec 2007
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
13 #include <linux/smp.h>
14 #include <linux/errno.h>
15 #include <linux/ptrace.h>
16 #include <linux/regset.h>
17 #include <linux/tracehook.h>
18 #include <linux/user.h>
19 #include <linux/elf.h>
20 #include <linux/security.h>
21 #include <linux/audit.h>
22 #include <linux/seccomp.h>
23 #include <linux/signal.h>
24 #include <linux/workqueue.h>
26 #include <asm/uaccess.h>
27 #include <asm/pgtable.h>
28 #include <asm/system.h>
29 #include <asm/processor.h>
31 #include <asm/debugreg.h>
34 #include <asm/prctl.h>
35 #include <asm/proto.h>
38 #include <trace/syscall.h>
46 REGSET_IOPERM64 = REGSET_XFP,
52 * does not yet catch signals sent when the child dies.
53 * in exit.c or in signal.c.
57 * Determines which flags the user has access to [1 = access, 0 = no access].
59 #define FLAG_MASK_32 ((unsigned long) \
60 (X86_EFLAGS_CF | X86_EFLAGS_PF | \
61 X86_EFLAGS_AF | X86_EFLAGS_ZF | \
62 X86_EFLAGS_SF | X86_EFLAGS_TF | \
63 X86_EFLAGS_DF | X86_EFLAGS_OF | \
64 X86_EFLAGS_RF | X86_EFLAGS_AC))
67 * Determines whether a value may be installed in a segment register.
69 static inline bool invalid_selector(u16 value)
71 return unlikely(value != 0 && (value & SEGMENT_RPL_MASK) != USER_RPL);
76 #define FLAG_MASK FLAG_MASK_32
78 static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long regno)
80 BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0);
81 return ®s->bx + (regno >> 2);
84 static u16 get_segment_reg(struct task_struct *task, unsigned long offset)
87 * Returning the value truncates it to 16 bits.
90 if (offset != offsetof(struct user_regs_struct, gs))
91 retval = *pt_regs_access(task_pt_regs(task), offset);
94 retval = get_user_gs(task_pt_regs(task));
96 retval = task_user_gs(task);
101 static int set_segment_reg(struct task_struct *task,
102 unsigned long offset, u16 value)
105 * The value argument was already truncated to 16 bits.
107 if (invalid_selector(value))
111 * For %cs and %ss we cannot permit a null selector.
112 * We can permit a bogus selector as long as it has USER_RPL.
113 * Null selectors are fine for other segment registers, but
114 * we will never get back to user mode with invalid %cs or %ss
115 * and will take the trap in iret instead. Much code relies
116 * on user_mode() to distinguish a user trap frame (which can
117 * safely use invalid selectors) from a kernel trap frame.
120 case offsetof(struct user_regs_struct, cs):
121 case offsetof(struct user_regs_struct, ss):
122 if (unlikely(value == 0))
126 *pt_regs_access(task_pt_regs(task), offset) = value;
129 case offsetof(struct user_regs_struct, gs):
131 set_user_gs(task_pt_regs(task), value);
133 task_user_gs(task) = value;
139 static unsigned long debugreg_addr_limit(struct task_struct *task)
141 return TASK_SIZE - 3;
144 #else /* CONFIG_X86_64 */
146 #define FLAG_MASK (FLAG_MASK_32 | X86_EFLAGS_NT)
148 static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long offset)
150 BUILD_BUG_ON(offsetof(struct pt_regs, r15) != 0);
151 return ®s->r15 + (offset / sizeof(regs->r15));
154 static u16 get_segment_reg(struct task_struct *task, unsigned long offset)
157 * Returning the value truncates it to 16 bits.
162 case offsetof(struct user_regs_struct, fs):
163 if (task == current) {
164 /* Older gas can't assemble movq %?s,%r?? */
165 asm("movl %%fs,%0" : "=r" (seg));
168 return task->thread.fsindex;
169 case offsetof(struct user_regs_struct, gs):
170 if (task == current) {
171 asm("movl %%gs,%0" : "=r" (seg));
174 return task->thread.gsindex;
175 case offsetof(struct user_regs_struct, ds):
176 if (task == current) {
177 asm("movl %%ds,%0" : "=r" (seg));
180 return task->thread.ds;
181 case offsetof(struct user_regs_struct, es):
182 if (task == current) {
183 asm("movl %%es,%0" : "=r" (seg));
186 return task->thread.es;
188 case offsetof(struct user_regs_struct, cs):
189 case offsetof(struct user_regs_struct, ss):
192 return *pt_regs_access(task_pt_regs(task), offset);
195 static int set_segment_reg(struct task_struct *task,
196 unsigned long offset, u16 value)
199 * The value argument was already truncated to 16 bits.
201 if (invalid_selector(value))
205 case offsetof(struct user_regs_struct,fs):
207 * If this is setting fs as for normal 64-bit use but
208 * setting fs_base has implicitly changed it, leave it.
210 if ((value == FS_TLS_SEL && task->thread.fsindex == 0 &&
211 task->thread.fs != 0) ||
212 (value == 0 && task->thread.fsindex == FS_TLS_SEL &&
213 task->thread.fs == 0))
215 task->thread.fsindex = value;
217 loadsegment(fs, task->thread.fsindex);
219 case offsetof(struct user_regs_struct,gs):
221 * If this is setting gs as for normal 64-bit use but
222 * setting gs_base has implicitly changed it, leave it.
224 if ((value == GS_TLS_SEL && task->thread.gsindex == 0 &&
225 task->thread.gs != 0) ||
226 (value == 0 && task->thread.gsindex == GS_TLS_SEL &&
227 task->thread.gs == 0))
229 task->thread.gsindex = value;
231 load_gs_index(task->thread.gsindex);
233 case offsetof(struct user_regs_struct,ds):
234 task->thread.ds = value;
236 loadsegment(ds, task->thread.ds);
238 case offsetof(struct user_regs_struct,es):
239 task->thread.es = value;
241 loadsegment(es, task->thread.es);
245 * Can't actually change these in 64-bit mode.
247 case offsetof(struct user_regs_struct,cs):
248 if (unlikely(value == 0))
250 #ifdef CONFIG_IA32_EMULATION
251 if (test_tsk_thread_flag(task, TIF_IA32))
252 task_pt_regs(task)->cs = value;
255 case offsetof(struct user_regs_struct,ss):
256 if (unlikely(value == 0))
258 #ifdef CONFIG_IA32_EMULATION
259 if (test_tsk_thread_flag(task, TIF_IA32))
260 task_pt_regs(task)->ss = value;
268 static unsigned long debugreg_addr_limit(struct task_struct *task)
270 #ifdef CONFIG_IA32_EMULATION
271 if (test_tsk_thread_flag(task, TIF_IA32))
272 return IA32_PAGE_OFFSET - 3;
274 return TASK_SIZE_MAX - 7;
277 #endif /* CONFIG_X86_32 */
279 static unsigned long get_flags(struct task_struct *task)
281 unsigned long retval = task_pt_regs(task)->flags;
284 * If the debugger set TF, hide it from the readout.
286 if (test_tsk_thread_flag(task, TIF_FORCED_TF))
287 retval &= ~X86_EFLAGS_TF;
292 static int set_flags(struct task_struct *task, unsigned long value)
294 struct pt_regs *regs = task_pt_regs(task);
297 * If the user value contains TF, mark that
298 * it was not "us" (the debugger) that set it.
299 * If not, make sure it stays set if we had.
301 if (value & X86_EFLAGS_TF)
302 clear_tsk_thread_flag(task, TIF_FORCED_TF);
303 else if (test_tsk_thread_flag(task, TIF_FORCED_TF))
304 value |= X86_EFLAGS_TF;
306 regs->flags = (regs->flags & ~FLAG_MASK) | (value & FLAG_MASK);
311 static int putreg(struct task_struct *child,
312 unsigned long offset, unsigned long value)
315 case offsetof(struct user_regs_struct, cs):
316 case offsetof(struct user_regs_struct, ds):
317 case offsetof(struct user_regs_struct, es):
318 case offsetof(struct user_regs_struct, fs):
319 case offsetof(struct user_regs_struct, gs):
320 case offsetof(struct user_regs_struct, ss):
321 return set_segment_reg(child, offset, value);
323 case offsetof(struct user_regs_struct, flags):
324 return set_flags(child, value);
328 * Orig_ax is really just a flag with small positive and
329 * negative values, so make sure to always sign-extend it
330 * from 32 bits so that it works correctly regardless of
331 * whether we come from a 32-bit environment or not.
333 case offsetof(struct user_regs_struct, orig_ax):
334 value = (long) (s32) value;
337 case offsetof(struct user_regs_struct,fs_base):
338 if (value >= TASK_SIZE_OF(child))
341 * When changing the segment base, use do_arch_prctl
342 * to set either thread.fs or thread.fsindex and the
343 * corresponding GDT slot.
345 if (child->thread.fs != value)
346 return do_arch_prctl(child, ARCH_SET_FS, value);
348 case offsetof(struct user_regs_struct,gs_base):
350 * Exactly the same here as the %fs handling above.
352 if (value >= TASK_SIZE_OF(child))
354 if (child->thread.gs != value)
355 return do_arch_prctl(child, ARCH_SET_GS, value);
360 *pt_regs_access(task_pt_regs(child), offset) = value;
364 static unsigned long getreg(struct task_struct *task, unsigned long offset)
367 case offsetof(struct user_regs_struct, cs):
368 case offsetof(struct user_regs_struct, ds):
369 case offsetof(struct user_regs_struct, es):
370 case offsetof(struct user_regs_struct, fs):
371 case offsetof(struct user_regs_struct, gs):
372 case offsetof(struct user_regs_struct, ss):
373 return get_segment_reg(task, offset);
375 case offsetof(struct user_regs_struct, flags):
376 return get_flags(task);
379 case offsetof(struct user_regs_struct, fs_base): {
381 * do_arch_prctl may have used a GDT slot instead of
382 * the MSR. To userland, it appears the same either
383 * way, except the %fs segment selector might not be 0.
385 unsigned int seg = task->thread.fsindex;
386 if (task->thread.fs != 0)
387 return task->thread.fs;
389 asm("movl %%fs,%0" : "=r" (seg));
390 if (seg != FS_TLS_SEL)
392 return get_desc_base(&task->thread.tls_array[FS_TLS]);
394 case offsetof(struct user_regs_struct, gs_base): {
396 * Exactly the same here as the %fs handling above.
398 unsigned int seg = task->thread.gsindex;
399 if (task->thread.gs != 0)
400 return task->thread.gs;
402 asm("movl %%gs,%0" : "=r" (seg));
403 if (seg != GS_TLS_SEL)
405 return get_desc_base(&task->thread.tls_array[GS_TLS]);
410 return *pt_regs_access(task_pt_regs(task), offset);
413 static int genregs_get(struct task_struct *target,
414 const struct user_regset *regset,
415 unsigned int pos, unsigned int count,
416 void *kbuf, void __user *ubuf)
419 unsigned long *k = kbuf;
421 *k++ = getreg(target, pos);
426 unsigned long __user *u = ubuf;
428 if (__put_user(getreg(target, pos), u++))
438 static int genregs_set(struct task_struct *target,
439 const struct user_regset *regset,
440 unsigned int pos, unsigned int count,
441 const void *kbuf, const void __user *ubuf)
445 const unsigned long *k = kbuf;
446 while (count > 0 && !ret) {
447 ret = putreg(target, pos, *k++);
452 const unsigned long __user *u = ubuf;
453 while (count > 0 && !ret) {
455 ret = __get_user(word, u++);
458 ret = putreg(target, pos, word);
467 * This function is trivial and will be inlined by the compiler.
468 * Having it separates the implementation details of debug
469 * registers from the interface details of ptrace.
471 static unsigned long ptrace_get_debugreg(struct task_struct *child, int n)
474 case 0: return child->thread.debugreg0;
475 case 1: return child->thread.debugreg1;
476 case 2: return child->thread.debugreg2;
477 case 3: return child->thread.debugreg3;
478 case 6: return child->thread.debugreg6;
479 case 7: return child->thread.debugreg7;
484 static int ptrace_set_debugreg(struct task_struct *child,
485 int n, unsigned long data)
489 if (unlikely(n == 4 || n == 5))
492 if (n < 4 && unlikely(data >= debugreg_addr_limit(child)))
496 case 0: child->thread.debugreg0 = data; break;
497 case 1: child->thread.debugreg1 = data; break;
498 case 2: child->thread.debugreg2 = data; break;
499 case 3: child->thread.debugreg3 = data; break;
502 if ((data & ~0xffffffffUL) != 0)
504 child->thread.debugreg6 = data;
509 * Sanity-check data. Take one half-byte at once with
510 * check = (val >> (16 + 4*i)) & 0xf. It contains the
511 * R/Wi and LENi bits; bits 0 and 1 are R/Wi, and bits
512 * 2 and 3 are LENi. Given a list of invalid values,
513 * we do mask |= 1 << invalid_value, so that
514 * (mask >> check) & 1 is a correct test for invalid
517 * R/Wi contains the type of the breakpoint /
518 * watchpoint, LENi contains the length of the watched
519 * data in the watchpoint case.
521 * The invalid values are:
522 * - LENi == 0x10 (undefined), so mask |= 0x0f00. [32-bit]
523 * - R/Wi == 0x10 (break on I/O reads or writes), so
525 * - R/Wi == 0x00 && LENi != 0x00, so we have mask |=
528 * Finally, mask = 0x0f00 | 0x4444 | 0x1110 == 0x5f54.
530 * See the Intel Manual "System Programming Guide",
533 * Note that LENi == 0x10 is defined on x86_64 in long
534 * mode (i.e. even for 32-bit userspace software, but
535 * 64-bit kernel), so the x86_64 mask value is 0x5454.
536 * See the AMD manual no. 24593 (AMD64 System Programming)
539 #define DR7_MASK 0x5f54
541 #define DR7_MASK 0x5554
543 data &= ~DR_CONTROL_RESERVED;
544 for (i = 0; i < 4; i++)
545 if ((DR7_MASK >> ((data >> (16 + 4*i)) & 0xf)) & 1)
547 child->thread.debugreg7 = data;
549 set_tsk_thread_flag(child, TIF_DEBUG);
551 clear_tsk_thread_flag(child, TIF_DEBUG);
559 * These access the current or another (stopped) task's io permission
560 * bitmap for debugging or core dump.
562 static int ioperm_active(struct task_struct *target,
563 const struct user_regset *regset)
565 return target->thread.io_bitmap_max / regset->size;
568 static int ioperm_get(struct task_struct *target,
569 const struct user_regset *regset,
570 unsigned int pos, unsigned int count,
571 void *kbuf, void __user *ubuf)
573 if (!target->thread.io_bitmap_ptr)
576 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
577 target->thread.io_bitmap_ptr,
581 #ifdef CONFIG_X86_PTRACE_BTS
583 * A branch trace store context.
585 * Contexts may only be installed by ptrace_bts_config() and only for
588 * Contexts are destroyed when the tracee is detached from the tracer.
589 * The actual destruction work requires interrupts enabled, so the
590 * work is deferred and will be scheduled during __ptrace_unlink().
592 * Contexts hold an additional task_struct reference on the traced
593 * task, as well as a reference on the tracer's mm.
595 * Ptrace already holds a task_struct for the duration of ptrace operations,
596 * but since destruction is deferred, it may be executed after both
597 * tracer and tracee exited.
600 /* The branch trace handle. */
601 struct bts_tracer *tracer;
603 /* The buffer used to store the branch trace and its size. */
607 /* The mm that paid for the above buffer. */
608 struct mm_struct *mm;
610 /* The task this context belongs to. */
611 struct task_struct *task;
613 /* The signal to send on a bts buffer overflow. */
614 unsigned int bts_ovfl_signal;
616 /* The work struct to destroy a context. */
617 struct work_struct work;
620 static inline void alloc_bts_buffer(struct bts_context *context,
625 buffer = alloc_locked_buffer(size);
627 context->buffer = buffer;
628 context->size = size;
629 context->mm = get_task_mm(current);
633 static inline void free_bts_buffer(struct bts_context *context)
635 if (!context->buffer)
638 kfree(context->buffer);
639 context->buffer = NULL;
641 refund_locked_buffer_memory(context->mm, context->size);
648 static void free_bts_context_work(struct work_struct *w)
650 struct bts_context *context;
652 context = container_of(w, struct bts_context, work);
654 ds_release_bts(context->tracer);
655 put_task_struct(context->task);
656 free_bts_buffer(context);
660 static inline void free_bts_context(struct bts_context *context)
662 INIT_WORK(&context->work, free_bts_context_work);
663 schedule_work(&context->work);
666 static inline struct bts_context *alloc_bts_context(struct task_struct *task)
668 struct bts_context *context = kzalloc(sizeof(*context), GFP_KERNEL);
670 context->task = task;
673 get_task_struct(task);
679 static int ptrace_bts_read_record(struct task_struct *child, size_t index,
680 struct bts_struct __user *out)
682 struct bts_context *context;
683 const struct bts_trace *trace;
684 struct bts_struct bts;
685 const unsigned char *at;
688 context = child->bts;
692 trace = ds_read_bts(context->tracer);
696 at = trace->ds.top - ((index + 1) * trace->ds.size);
697 if ((void *)at < trace->ds.begin)
698 at += (trace->ds.n * trace->ds.size);
703 error = trace->read(context->tracer, at, &bts);
707 if (copy_to_user(out, &bts, sizeof(bts)))
713 static int ptrace_bts_drain(struct task_struct *child,
715 struct bts_struct __user *out)
717 struct bts_context *context;
718 const struct bts_trace *trace;
719 const unsigned char *at;
720 int error, drained = 0;
722 context = child->bts;
726 trace = ds_read_bts(context->tracer);
733 if (size < (trace->ds.top - trace->ds.begin))
736 for (at = trace->ds.begin; (void *)at < trace->ds.top;
737 out++, drained++, at += trace->ds.size) {
738 struct bts_struct bts;
740 error = trace->read(context->tracer, at, &bts);
744 if (copy_to_user(out, &bts, sizeof(bts)))
748 memset(trace->ds.begin, 0, trace->ds.n * trace->ds.size);
750 error = ds_reset_bts(context->tracer);
757 static int ptrace_bts_config(struct task_struct *child,
759 const struct ptrace_bts_config __user *ucfg)
761 struct bts_context *context;
762 struct ptrace_bts_config cfg;
763 unsigned int flags = 0;
765 if (cfg_size < sizeof(cfg))
768 if (copy_from_user(&cfg, ucfg, sizeof(cfg)))
771 context = child->bts;
773 context = alloc_bts_context(child);
777 if (cfg.flags & PTRACE_BTS_O_SIGNAL) {
782 context->bts_ovfl_signal = cfg.signal;
785 ds_release_bts(context->tracer);
786 context->tracer = NULL;
788 if ((cfg.flags & PTRACE_BTS_O_ALLOC) && (cfg.size != context->size)) {
789 free_bts_buffer(context);
793 alloc_bts_buffer(context, cfg.size);
794 if (!context->buffer)
798 if (cfg.flags & PTRACE_BTS_O_TRACE)
801 if (cfg.flags & PTRACE_BTS_O_SCHED)
802 flags |= BTS_TIMESTAMPS;
805 ds_request_bts_task(child, context->buffer, context->size,
806 NULL, (size_t)-1, flags);
807 if (unlikely(IS_ERR(context->tracer))) {
808 int error = PTR_ERR(context->tracer);
810 free_bts_buffer(context);
811 context->tracer = NULL;
818 static int ptrace_bts_status(struct task_struct *child,
820 struct ptrace_bts_config __user *ucfg)
822 struct bts_context *context;
823 const struct bts_trace *trace;
824 struct ptrace_bts_config cfg;
826 context = child->bts;
830 if (cfg_size < sizeof(cfg))
833 trace = ds_read_bts(context->tracer);
837 memset(&cfg, 0, sizeof(cfg));
838 cfg.size = trace->ds.end - trace->ds.begin;
839 cfg.signal = context->bts_ovfl_signal;
840 cfg.bts_size = sizeof(struct bts_struct);
843 cfg.flags |= PTRACE_BTS_O_SIGNAL;
845 if (trace->ds.flags & BTS_USER)
846 cfg.flags |= PTRACE_BTS_O_TRACE;
848 if (trace->ds.flags & BTS_TIMESTAMPS)
849 cfg.flags |= PTRACE_BTS_O_SCHED;
851 if (copy_to_user(ucfg, &cfg, sizeof(cfg)))
857 static int ptrace_bts_clear(struct task_struct *child)
859 struct bts_context *context;
860 const struct bts_trace *trace;
862 context = child->bts;
866 trace = ds_read_bts(context->tracer);
870 memset(trace->ds.begin, 0, trace->ds.n * trace->ds.size);
872 return ds_reset_bts(context->tracer);
875 static int ptrace_bts_size(struct task_struct *child)
877 struct bts_context *context;
878 const struct bts_trace *trace;
880 context = child->bts;
884 trace = ds_read_bts(context->tracer);
888 return (trace->ds.top - trace->ds.begin) / trace->ds.size;
892 * Called from __ptrace_unlink() after the child has been moved back
893 * to its original parent.
895 void ptrace_bts_untrace(struct task_struct *child)
897 if (unlikely(child->bts)) {
898 free_bts_context(child->bts);
902 #endif /* CONFIG_X86_PTRACE_BTS */
905 * Called by kernel/ptrace.c when detaching..
907 * Make sure the single step bit is not set.
909 void ptrace_disable(struct task_struct *child)
911 user_disable_single_step(child);
912 #ifdef TIF_SYSCALL_EMU
913 clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
917 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
918 static const struct user_regset_view user_x86_32_view; /* Initialized below. */
921 long arch_ptrace(struct task_struct *child, long request, long addr, long data)
924 unsigned long __user *datap = (unsigned long __user *)data;
927 /* read the word at location addr in the USER area. */
928 case PTRACE_PEEKUSR: {
932 if ((addr & (sizeof(data) - 1)) || addr < 0 ||
933 addr >= sizeof(struct user))
936 tmp = 0; /* Default return condition */
937 if (addr < sizeof(struct user_regs_struct))
938 tmp = getreg(child, addr);
939 else if (addr >= offsetof(struct user, u_debugreg[0]) &&
940 addr <= offsetof(struct user, u_debugreg[7])) {
941 addr -= offsetof(struct user, u_debugreg[0]);
942 tmp = ptrace_get_debugreg(child, addr / sizeof(data));
944 ret = put_user(tmp, datap);
948 case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
950 if ((addr & (sizeof(data) - 1)) || addr < 0 ||
951 addr >= sizeof(struct user))
954 if (addr < sizeof(struct user_regs_struct))
955 ret = putreg(child, addr, data);
956 else if (addr >= offsetof(struct user, u_debugreg[0]) &&
957 addr <= offsetof(struct user, u_debugreg[7])) {
958 addr -= offsetof(struct user, u_debugreg[0]);
959 ret = ptrace_set_debugreg(child,
960 addr / sizeof(data), data);
964 case PTRACE_GETREGS: /* Get all gp regs from the child. */
965 return copy_regset_to_user(child,
966 task_user_regset_view(current),
968 0, sizeof(struct user_regs_struct),
971 case PTRACE_SETREGS: /* Set all gp regs in the child. */
972 return copy_regset_from_user(child,
973 task_user_regset_view(current),
975 0, sizeof(struct user_regs_struct),
978 case PTRACE_GETFPREGS: /* Get the child FPU state. */
979 return copy_regset_to_user(child,
980 task_user_regset_view(current),
982 0, sizeof(struct user_i387_struct),
985 case PTRACE_SETFPREGS: /* Set the child FPU state. */
986 return copy_regset_from_user(child,
987 task_user_regset_view(current),
989 0, sizeof(struct user_i387_struct),
993 case PTRACE_GETFPXREGS: /* Get the child extended FPU state. */
994 return copy_regset_to_user(child, &user_x86_32_view,
996 0, sizeof(struct user_fxsr_struct),
999 case PTRACE_SETFPXREGS: /* Set the child extended FPU state. */
1000 return copy_regset_from_user(child, &user_x86_32_view,
1002 0, sizeof(struct user_fxsr_struct),
1006 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
1007 case PTRACE_GET_THREAD_AREA:
1010 ret = do_get_thread_area(child, addr,
1011 (struct user_desc __user *) data);
1014 case PTRACE_SET_THREAD_AREA:
1017 ret = do_set_thread_area(child, addr,
1018 (struct user_desc __user *) data, 0);
1022 #ifdef CONFIG_X86_64
1023 /* normal 64bit interface to access TLS data.
1024 Works just like arch_prctl, except that the arguments
1026 case PTRACE_ARCH_PRCTL:
1027 ret = do_arch_prctl(child, data, addr);
1032 * These bits need more cooking - not enabled yet:
1034 #ifdef CONFIG_X86_PTRACE_BTS
1035 case PTRACE_BTS_CONFIG:
1036 ret = ptrace_bts_config
1037 (child, data, (struct ptrace_bts_config __user *)addr);
1040 case PTRACE_BTS_STATUS:
1041 ret = ptrace_bts_status
1042 (child, data, (struct ptrace_bts_config __user *)addr);
1045 case PTRACE_BTS_SIZE:
1046 ret = ptrace_bts_size(child);
1049 case PTRACE_BTS_GET:
1050 ret = ptrace_bts_read_record
1051 (child, data, (struct bts_struct __user *) addr);
1054 case PTRACE_BTS_CLEAR:
1055 ret = ptrace_bts_clear(child);
1058 case PTRACE_BTS_DRAIN:
1059 ret = ptrace_bts_drain
1060 (child, data, (struct bts_struct __user *) addr);
1062 #endif /* CONFIG_X86_PTRACE_BTS */
1065 ret = ptrace_request(child, request, addr, data);
1072 #ifdef CONFIG_IA32_EMULATION
1074 #include <linux/compat.h>
1075 #include <linux/syscalls.h>
1076 #include <asm/ia32.h>
1077 #include <asm/user32.h>
1080 case offsetof(struct user32, regs.l): \
1081 regs->q = value; break
1084 case offsetof(struct user32, regs.rs): \
1085 return set_segment_reg(child, \
1086 offsetof(struct user_regs_struct, rs), \
1090 static int putreg32(struct task_struct *child, unsigned regno, u32 value)
1092 struct pt_regs *regs = task_pt_regs(child);
1113 case offsetof(struct user32, regs.orig_eax):
1115 * Sign-extend the value so that orig_eax = -1
1116 * causes (long)orig_ax < 0 tests to fire correctly.
1118 regs->orig_ax = (long) (s32) value;
1121 case offsetof(struct user32, regs.eflags):
1122 return set_flags(child, value);
1124 case offsetof(struct user32, u_debugreg[0]) ...
1125 offsetof(struct user32, u_debugreg[7]):
1126 regno -= offsetof(struct user32, u_debugreg[0]);
1127 return ptrace_set_debugreg(child, regno / 4, value);
1130 if (regno > sizeof(struct user32) || (regno & 3))
1134 * Other dummy fields in the virtual user structure
1146 case offsetof(struct user32, regs.l): \
1147 *val = regs->q; break
1150 case offsetof(struct user32, regs.rs): \
1151 *val = get_segment_reg(child, \
1152 offsetof(struct user_regs_struct, rs)); \
1155 static int getreg32(struct task_struct *child, unsigned regno, u32 *val)
1157 struct pt_regs *regs = task_pt_regs(child);
1175 R32(orig_eax, orig_ax);
1179 case offsetof(struct user32, regs.eflags):
1180 *val = get_flags(child);
1183 case offsetof(struct user32, u_debugreg[0]) ...
1184 offsetof(struct user32, u_debugreg[7]):
1185 regno -= offsetof(struct user32, u_debugreg[0]);
1186 *val = ptrace_get_debugreg(child, regno / 4);
1190 if (regno > sizeof(struct user32) || (regno & 3))
1194 * Other dummy fields in the virtual user structure
1206 static int genregs32_get(struct task_struct *target,
1207 const struct user_regset *regset,
1208 unsigned int pos, unsigned int count,
1209 void *kbuf, void __user *ubuf)
1212 compat_ulong_t *k = kbuf;
1214 getreg32(target, pos, k++);
1215 count -= sizeof(*k);
1219 compat_ulong_t __user *u = ubuf;
1221 compat_ulong_t word;
1222 getreg32(target, pos, &word);
1223 if (__put_user(word, u++))
1225 count -= sizeof(*u);
1233 static int genregs32_set(struct task_struct *target,
1234 const struct user_regset *regset,
1235 unsigned int pos, unsigned int count,
1236 const void *kbuf, const void __user *ubuf)
1240 const compat_ulong_t *k = kbuf;
1241 while (count > 0 && !ret) {
1242 ret = putreg32(target, pos, *k++);
1243 count -= sizeof(*k);
1247 const compat_ulong_t __user *u = ubuf;
1248 while (count > 0 && !ret) {
1249 compat_ulong_t word;
1250 ret = __get_user(word, u++);
1253 ret = putreg32(target, pos, word);
1254 count -= sizeof(*u);
1261 long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
1262 compat_ulong_t caddr, compat_ulong_t cdata)
1264 unsigned long addr = caddr;
1265 unsigned long data = cdata;
1266 void __user *datap = compat_ptr(data);
1271 case PTRACE_PEEKUSR:
1272 ret = getreg32(child, addr, &val);
1274 ret = put_user(val, (__u32 __user *)datap);
1277 case PTRACE_POKEUSR:
1278 ret = putreg32(child, addr, data);
1281 case PTRACE_GETREGS: /* Get all gp regs from the child. */
1282 return copy_regset_to_user(child, &user_x86_32_view,
1284 0, sizeof(struct user_regs_struct32),
1287 case PTRACE_SETREGS: /* Set all gp regs in the child. */
1288 return copy_regset_from_user(child, &user_x86_32_view,
1290 sizeof(struct user_regs_struct32),
1293 case PTRACE_GETFPREGS: /* Get the child FPU state. */
1294 return copy_regset_to_user(child, &user_x86_32_view,
1296 sizeof(struct user_i387_ia32_struct),
1299 case PTRACE_SETFPREGS: /* Set the child FPU state. */
1300 return copy_regset_from_user(
1301 child, &user_x86_32_view, REGSET_FP,
1302 0, sizeof(struct user_i387_ia32_struct), datap);
1304 case PTRACE_GETFPXREGS: /* Get the child extended FPU state. */
1305 return copy_regset_to_user(child, &user_x86_32_view,
1307 sizeof(struct user32_fxsr_struct),
1310 case PTRACE_SETFPXREGS: /* Set the child extended FPU state. */
1311 return copy_regset_from_user(child, &user_x86_32_view,
1313 sizeof(struct user32_fxsr_struct),
1316 case PTRACE_GET_THREAD_AREA:
1317 case PTRACE_SET_THREAD_AREA:
1318 #ifdef CONFIG_X86_PTRACE_BTS
1319 case PTRACE_BTS_CONFIG:
1320 case PTRACE_BTS_STATUS:
1321 case PTRACE_BTS_SIZE:
1322 case PTRACE_BTS_GET:
1323 case PTRACE_BTS_CLEAR:
1324 case PTRACE_BTS_DRAIN:
1325 #endif /* CONFIG_X86_PTRACE_BTS */
1326 return arch_ptrace(child, request, addr, data);
1329 return compat_ptrace_request(child, request, addr, data);
1335 #endif /* CONFIG_IA32_EMULATION */
1337 #ifdef CONFIG_X86_64
1339 static const struct user_regset x86_64_regsets[] = {
1340 [REGSET_GENERAL] = {
1341 .core_note_type = NT_PRSTATUS,
1342 .n = sizeof(struct user_regs_struct) / sizeof(long),
1343 .size = sizeof(long), .align = sizeof(long),
1344 .get = genregs_get, .set = genregs_set
1347 .core_note_type = NT_PRFPREG,
1348 .n = sizeof(struct user_i387_struct) / sizeof(long),
1349 .size = sizeof(long), .align = sizeof(long),
1350 .active = xfpregs_active, .get = xfpregs_get, .set = xfpregs_set
1352 [REGSET_IOPERM64] = {
1353 .core_note_type = NT_386_IOPERM,
1354 .n = IO_BITMAP_LONGS,
1355 .size = sizeof(long), .align = sizeof(long),
1356 .active = ioperm_active, .get = ioperm_get
1360 static const struct user_regset_view user_x86_64_view = {
1361 .name = "x86_64", .e_machine = EM_X86_64,
1362 .regsets = x86_64_regsets, .n = ARRAY_SIZE(x86_64_regsets)
1365 #else /* CONFIG_X86_32 */
1367 #define user_regs_struct32 user_regs_struct
1368 #define genregs32_get genregs_get
1369 #define genregs32_set genregs_set
1371 #define user_i387_ia32_struct user_i387_struct
1372 #define user32_fxsr_struct user_fxsr_struct
1374 #endif /* CONFIG_X86_64 */
1376 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
1377 static const struct user_regset x86_32_regsets[] = {
1378 [REGSET_GENERAL] = {
1379 .core_note_type = NT_PRSTATUS,
1380 .n = sizeof(struct user_regs_struct32) / sizeof(u32),
1381 .size = sizeof(u32), .align = sizeof(u32),
1382 .get = genregs32_get, .set = genregs32_set
1385 .core_note_type = NT_PRFPREG,
1386 .n = sizeof(struct user_i387_ia32_struct) / sizeof(u32),
1387 .size = sizeof(u32), .align = sizeof(u32),
1388 .active = fpregs_active, .get = fpregs_get, .set = fpregs_set
1391 .core_note_type = NT_PRXFPREG,
1392 .n = sizeof(struct user32_fxsr_struct) / sizeof(u32),
1393 .size = sizeof(u32), .align = sizeof(u32),
1394 .active = xfpregs_active, .get = xfpregs_get, .set = xfpregs_set
1397 .core_note_type = NT_386_TLS,
1398 .n = GDT_ENTRY_TLS_ENTRIES, .bias = GDT_ENTRY_TLS_MIN,
1399 .size = sizeof(struct user_desc),
1400 .align = sizeof(struct user_desc),
1401 .active = regset_tls_active,
1402 .get = regset_tls_get, .set = regset_tls_set
1404 [REGSET_IOPERM32] = {
1405 .core_note_type = NT_386_IOPERM,
1406 .n = IO_BITMAP_BYTES / sizeof(u32),
1407 .size = sizeof(u32), .align = sizeof(u32),
1408 .active = ioperm_active, .get = ioperm_get
1412 static const struct user_regset_view user_x86_32_view = {
1413 .name = "i386", .e_machine = EM_386,
1414 .regsets = x86_32_regsets, .n = ARRAY_SIZE(x86_32_regsets)
1418 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
1420 #ifdef CONFIG_IA32_EMULATION
1421 if (test_tsk_thread_flag(task, TIF_IA32))
1423 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
1424 return &user_x86_32_view;
1426 #ifdef CONFIG_X86_64
1427 return &user_x86_64_view;
1431 void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
1432 int error_code, int si_code)
1434 struct siginfo info;
1436 tsk->thread.trap_no = 1;
1437 tsk->thread.error_code = error_code;
1439 memset(&info, 0, sizeof(info));
1440 info.si_signo = SIGTRAP;
1441 info.si_code = si_code;
1444 info.si_addr = user_mode_vm(regs) ? (void __user *) regs->ip : NULL;
1446 /* Send us the fake SIGTRAP */
1447 force_sig_info(SIGTRAP, &info, tsk);
1451 #ifdef CONFIG_X86_32
1453 #elif defined CONFIG_IA32_EMULATION
1454 # define IS_IA32 is_compat_task()
1460 * We must return the syscall number to actually look up in the table.
1461 * This can be -1L to skip running any syscall at all.
1463 asmregparm long syscall_trace_enter(struct pt_regs *regs)
1468 * If we stepped into a sysenter/syscall insn, it trapped in
1469 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
1470 * If user-mode had set TF itself, then it's still clear from
1471 * do_debug() and we need to set it again to restore the user
1472 * state. If we entered on the slow path, TF was already set.
1474 if (test_thread_flag(TIF_SINGLESTEP))
1475 regs->flags |= X86_EFLAGS_TF;
1477 /* do the secure computing check first */
1478 secure_computing(regs->orig_ax);
1480 if (unlikely(test_thread_flag(TIF_SYSCALL_EMU)))
1483 if ((ret || test_thread_flag(TIF_SYSCALL_TRACE)) &&
1484 tracehook_report_syscall_entry(regs))
1487 if (unlikely(test_thread_flag(TIF_SYSCALL_FTRACE)))
1488 ftrace_syscall_enter(regs);
1490 if (unlikely(current->audit_context)) {
1492 audit_syscall_entry(AUDIT_ARCH_I386,
1495 regs->dx, regs->si);
1496 #ifdef CONFIG_X86_64
1498 audit_syscall_entry(AUDIT_ARCH_X86_64,
1501 regs->dx, regs->r10);
1505 return ret ?: regs->orig_ax;
1508 asmregparm void syscall_trace_leave(struct pt_regs *regs)
1510 if (unlikely(current->audit_context))
1511 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
1513 if (unlikely(test_thread_flag(TIF_SYSCALL_FTRACE)))
1514 ftrace_syscall_exit(regs);
1516 if (test_thread_flag(TIF_SYSCALL_TRACE))
1517 tracehook_report_syscall_exit(regs, 0);
1520 * If TIF_SYSCALL_EMU is set, we only get here because of
1521 * TIF_SINGLESTEP (i.e. this is PTRACE_SYSEMU_SINGLESTEP).
1522 * We already reported this syscall instruction in
1523 * syscall_trace_enter(), so don't do any more now.
1525 if (unlikely(test_thread_flag(TIF_SYSCALL_EMU)))
1529 * If we are single-stepping, synthesize a trap to follow the
1530 * system call instruction.
1532 if (test_thread_flag(TIF_SINGLESTEP) &&
1533 tracehook_consider_fatal_signal(current, SIGTRAP))
1534 send_sigtrap(current, regs, 0, TRAP_BRKPT);