x86: cpufeature: add Intel features from CPUID and AVX specs
[linux-2.6] / include / asm-x86 / irqflags.h
1 #ifndef _X86_IRQFLAGS_H_
2 #define _X86_IRQFLAGS_H_
3
4 #include <asm/processor-flags.h>
5
6 #ifndef __ASSEMBLY__
7 /*
8  * Interrupt control:
9  */
10
11 static inline unsigned long native_save_fl(void)
12 {
13         unsigned long flags;
14
15         asm volatile("# __raw_save_flags\n\t"
16                      "pushf ; pop %0"
17                      : "=g" (flags)
18                      : /* no input */
19                      : "memory");
20
21         return flags;
22 }
23
24 static inline void native_restore_fl(unsigned long flags)
25 {
26         asm volatile("push %0 ; popf"
27                      : /* no output */
28                      :"g" (flags)
29                      :"memory", "cc");
30 }
31
32 static inline void native_irq_disable(void)
33 {
34         asm volatile("cli": : :"memory");
35 }
36
37 static inline void native_irq_enable(void)
38 {
39         asm volatile("sti": : :"memory");
40 }
41
42 static inline void native_safe_halt(void)
43 {
44         asm volatile("sti; hlt": : :"memory");
45 }
46
47 static inline void native_halt(void)
48 {
49         asm volatile("hlt": : :"memory");
50 }
51
52 #endif
53
54 #ifdef CONFIG_PARAVIRT
55 #include <asm/paravirt.h>
56 #else
57 #ifndef __ASSEMBLY__
58
59 static inline unsigned long __raw_local_save_flags(void)
60 {
61         return native_save_fl();
62 }
63
64 static inline void raw_local_irq_restore(unsigned long flags)
65 {
66         native_restore_fl(flags);
67 }
68
69 static inline void raw_local_irq_disable(void)
70 {
71         native_irq_disable();
72 }
73
74 static inline void raw_local_irq_enable(void)
75 {
76         native_irq_enable();
77 }
78
79 /*
80  * Used in the idle loop; sti takes one instruction cycle
81  * to complete:
82  */
83 static inline void raw_safe_halt(void)
84 {
85         native_safe_halt();
86 }
87
88 /*
89  * Used when interrupts are already enabled or to
90  * shutdown the processor:
91  */
92 static inline void halt(void)
93 {
94         native_halt();
95 }
96
97 /*
98  * For spinlocks, etc:
99  */
100 static inline unsigned long __raw_local_irq_save(void)
101 {
102         unsigned long flags = __raw_local_save_flags();
103
104         raw_local_irq_disable();
105
106         return flags;
107 }
108 #else
109
110 #define ENABLE_INTERRUPTS(x)    sti
111 #define DISABLE_INTERRUPTS(x)   cli
112
113 #ifdef CONFIG_X86_64
114 #define SWAPGS  swapgs
115 /*
116  * Currently paravirt can't handle swapgs nicely when we
117  * don't have a stack we can rely on (such as a user space
118  * stack).  So we either find a way around these or just fault
119  * and emulate if a guest tries to call swapgs directly.
120  *
121  * Either way, this is a good way to document that we don't
122  * have a reliable stack. x86_64 only.
123  */
124 #define SWAPGS_UNSAFE_STACK     swapgs
125
126 #define PARAVIRT_ADJUST_EXCEPTION_FRAME /*  */
127
128 #define INTERRUPT_RETURN        iretq
129 #define USERGS_SYSRET64                         \
130         swapgs;                                 \
131         sysretq;
132 #define USERGS_SYSRET32                         \
133         swapgs;                                 \
134         sysretl
135 #define ENABLE_INTERRUPTS_SYSEXIT32             \
136         swapgs;                                 \
137         sti;                                    \
138         sysexit
139
140 #else
141 #define INTERRUPT_RETURN                iret
142 #define ENABLE_INTERRUPTS_SYSEXIT       sti; sysexit
143 #define GET_CR0_INTO_EAX                movl %cr0, %eax
144 #endif
145
146
147 #endif /* __ASSEMBLY__ */
148 #endif /* CONFIG_PARAVIRT */
149
150 #ifndef __ASSEMBLY__
151 #define raw_local_save_flags(flags)                             \
152         do { (flags) = __raw_local_save_flags(); } while (0)
153
154 #define raw_local_irq_save(flags)                               \
155         do { (flags) = __raw_local_irq_save(); } while (0)
156
157 static inline int raw_irqs_disabled_flags(unsigned long flags)
158 {
159         return !(flags & X86_EFLAGS_IF);
160 }
161
162 static inline int raw_irqs_disabled(void)
163 {
164         unsigned long flags = __raw_local_save_flags();
165
166         return raw_irqs_disabled_flags(flags);
167 }
168
169 /*
170  * makes the traced hardirq state match with the machine state
171  *
172  * should be a rarely used function, only in places where its
173  * otherwise impossible to know the irq state, like in traps.
174  */
175 static inline void trace_hardirqs_fixup_flags(unsigned long flags)
176 {
177         if (raw_irqs_disabled_flags(flags))
178                 trace_hardirqs_off();
179         else
180                 trace_hardirqs_on();
181 }
182
183 static inline void trace_hardirqs_fixup(void)
184 {
185         unsigned long flags = __raw_local_save_flags();
186
187         trace_hardirqs_fixup_flags(flags);
188 }
189
190 #else
191
192 #ifdef CONFIG_X86_64
193 #define ARCH_LOCKDEP_SYS_EXIT           call lockdep_sys_exit_thunk
194 #define ARCH_LOCKDEP_SYS_EXIT_IRQ       \
195         TRACE_IRQS_ON; \
196         sti; \
197         SAVE_REST; \
198         LOCKDEP_SYS_EXIT; \
199         RESTORE_REST; \
200         cli; \
201         TRACE_IRQS_OFF;
202
203 #else
204 #define ARCH_LOCKDEP_SYS_EXIT                   \
205         pushl %eax;                             \
206         pushl %ecx;                             \
207         pushl %edx;                             \
208         call lockdep_sys_exit;                  \
209         popl %edx;                              \
210         popl %ecx;                              \
211         popl %eax;
212
213 #define ARCH_LOCKDEP_SYS_EXIT_IRQ
214 #endif
215
216 #ifdef CONFIG_TRACE_IRQFLAGS
217 #  define TRACE_IRQS_ON         call trace_hardirqs_on_thunk;
218 #  define TRACE_IRQS_OFF        call trace_hardirqs_off_thunk;
219 #else
220 #  define TRACE_IRQS_ON
221 #  define TRACE_IRQS_OFF
222 #endif
223 #ifdef CONFIG_DEBUG_LOCK_ALLOC
224 #  define LOCKDEP_SYS_EXIT      ARCH_LOCKDEP_SYS_EXIT
225 #  define LOCKDEP_SYS_EXIT_IRQ  ARCH_LOCKDEP_SYS_EXIT_IRQ
226 # else
227 #  define LOCKDEP_SYS_EXIT
228 #  define LOCKDEP_SYS_EXIT_IRQ
229 # endif
230
231 #endif /* __ASSEMBLY__ */
232 #endif