[PATCH] MAINTAINERS: line duplication
[linux-2.6] / include / asm-x86_64 / i387.h
1 /*
2  * include/asm-x86_64/i387.h
3  *
4  * Copyright (C) 1994 Linus Torvalds
5  *
6  * Pentium III FXSR, SSE support
7  * General FPU state handling cleanups
8  *      Gareth Hughes <gareth@valinux.com>, May 2000
9  * x86-64 work by Andi Kleen 2002
10  */
11
12 #ifndef __ASM_X86_64_I387_H
13 #define __ASM_X86_64_I387_H
14
15 #include <linux/sched.h>
16 #include <asm/processor.h>
17 #include <asm/sigcontext.h>
18 #include <asm/user.h>
19 #include <asm/thread_info.h>
20 #include <asm/uaccess.h>
21
22 extern void fpu_init(void);
23 extern unsigned int mxcsr_feature_mask;
24 extern void mxcsr_feature_mask_init(void);
25 extern void init_fpu(struct task_struct *child);
26 extern int save_i387(struct _fpstate __user *buf);
27
28 /*
29  * FPU lazy state save handling...
30  */
31
32 #define unlazy_fpu(tsk) do { \
33         if ((tsk)->thread_info->status & TS_USEDFPU) \
34                 save_init_fpu(tsk); \
35 } while (0)
36
37 /* Ignore delayed exceptions from user space */
38 static inline void tolerant_fwait(void)
39 {
40         asm volatile("1: fwait\n"
41                      "2:\n"
42                      "   .section __ex_table,\"a\"\n"
43                      "  .align 8\n"
44                      "  .quad 1b,2b\n"
45                      "  .previous\n");
46 }
47
48 #define clear_fpu(tsk) do { \
49         if ((tsk)->thread_info->status & TS_USEDFPU) {          \
50                 tolerant_fwait();                               \
51                 (tsk)->thread_info->status &= ~TS_USEDFPU;      \
52                 stts();                                         \
53         }                                                       \
54 } while (0)
55
56 /*
57  * ptrace request handers...
58  */
59 extern int get_fpregs(struct user_i387_struct __user *buf,
60                       struct task_struct *tsk);
61 extern int set_fpregs(struct task_struct *tsk,
62                       struct user_i387_struct __user *buf);
63
64 /*
65  * i387 state interaction
66  */
67 #define get_fpu_mxcsr(t) ((t)->thread.i387.fxsave.mxcsr)
68 #define get_fpu_cwd(t) ((t)->thread.i387.fxsave.cwd)
69 #define get_fpu_fxsr_twd(t) ((t)->thread.i387.fxsave.twd)
70 #define get_fpu_swd(t) ((t)->thread.i387.fxsave.swd)
71 #define set_fpu_cwd(t,val) ((t)->thread.i387.fxsave.cwd = (val))
72 #define set_fpu_swd(t,val) ((t)->thread.i387.fxsave.swd = (val))
73 #define set_fpu_fxsr_twd(t,val) ((t)->thread.i387.fxsave.twd = (val))
74
75 static inline int restore_fpu_checking(struct i387_fxsave_struct *fx) 
76
77         int err;
78         asm volatile("1:  rex64 ; fxrstor (%[fx])\n\t"
79                      "2:\n"
80                      ".section .fixup,\"ax\"\n"
81                      "3:  movl $-1,%[err]\n"
82                      "    jmp  2b\n"
83                      ".previous\n"
84                      ".section __ex_table,\"a\"\n"
85                      "   .align 8\n"
86                      "   .quad  1b,3b\n"
87                      ".previous"
88                      : [err] "=r" (err)
89                      : [fx] "r" (fx), "0" (0)); 
90         if (unlikely(err))
91                 init_fpu(current);
92         return err;
93
94
95 static inline int save_i387_checking(struct i387_fxsave_struct __user *fx) 
96
97         int err;
98         asm volatile("1:  rex64 ; fxsave (%[fx])\n\t"
99                      "2:\n"
100                      ".section .fixup,\"ax\"\n"
101                      "3:  movl $-1,%[err]\n"
102                      "    jmp  2b\n"
103                      ".previous\n"
104                      ".section __ex_table,\"a\"\n"
105                      "   .align 8\n"
106                      "   .quad  1b,3b\n"
107                      ".previous"
108                      : [err] "=r" (err)
109                      : [fx] "r" (fx), "0" (0)); 
110         if (unlikely(err))
111                 __clear_user(fx, sizeof(struct i387_fxsave_struct));
112         return err;
113
114
115 static inline void kernel_fpu_begin(void)
116 {
117         struct thread_info *me = current_thread_info();
118         preempt_disable();
119         if (me->status & TS_USEDFPU) { 
120                 asm volatile("rex64 ; fxsave %0 ; fnclex"
121                               : "=m" (me->task->thread.i387.fxsave));
122                 me->status &= ~TS_USEDFPU;
123                 return;
124         }
125         clts();
126 }
127
128 static inline void kernel_fpu_end(void)
129 {
130         stts();
131         preempt_enable();
132 }
133
134 static inline void save_init_fpu( struct task_struct *tsk )
135 {
136         asm volatile( "rex64 ; fxsave %0 ; fnclex"
137                       : "=m" (tsk->thread.i387.fxsave));
138         tsk->thread_info->status &= ~TS_USEDFPU;
139         stts();
140 }
141
142 /* 
143  * This restores directly out of user space. Exceptions are handled.
144  */
145 static inline int restore_i387(struct _fpstate __user *buf)
146 {
147         return restore_fpu_checking((__force struct i387_fxsave_struct *)buf);
148 }
149
150 #endif /* __ASM_X86_64_I387_H */