Merge branch 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc
[linux-2.6] / include / asm-i386 / irqflags.h
1 /*
2  * include/asm-i386/irqflags.h
3  *
4  * IRQ flags handling
5  *
6  * This file gets included from lowlevel asm headers too, to provide
7  * wrapped versions of the local_irq_*() APIs, based on the
8  * raw_local_irq_*() functions from the lowlevel headers.
9  */
10 #ifndef _ASM_IRQFLAGS_H
11 #define _ASM_IRQFLAGS_H
12
13 #ifdef CONFIG_PARAVIRT
14 #include <asm/paravirt.h>
15 #else
16 #ifndef __ASSEMBLY__
17
18 static inline unsigned long __raw_local_save_flags(void)
19 {
20         unsigned long flags;
21
22         __asm__ __volatile__(
23                 "pushfl ; popl %0"
24                 : "=g" (flags)
25                 : /* no input */
26         );
27
28         return flags;
29 }
30
31 static inline void raw_local_irq_restore(unsigned long flags)
32 {
33         __asm__ __volatile__(
34                 "pushl %0 ; popfl"
35                 : /* no output */
36                 :"g" (flags)
37                 :"memory", "cc"
38         );
39 }
40
41 static inline void raw_local_irq_disable(void)
42 {
43         __asm__ __volatile__("cli" : : : "memory");
44 }
45
46 static inline void raw_local_irq_enable(void)
47 {
48         __asm__ __volatile__("sti" : : : "memory");
49 }
50
51 /*
52  * Used in the idle loop; sti takes one instruction cycle
53  * to complete:
54  */
55 static inline void raw_safe_halt(void)
56 {
57         __asm__ __volatile__("sti; hlt" : : : "memory");
58 }
59
60 /*
61  * Used when interrupts are already enabled or to
62  * shutdown the processor:
63  */
64 static inline void halt(void)
65 {
66         __asm__ __volatile__("hlt": : :"memory");
67 }
68
69 /*
70  * For spinlocks, etc:
71  */
72 static inline unsigned long __raw_local_irq_save(void)
73 {
74         unsigned long flags = __raw_local_save_flags();
75
76         raw_local_irq_disable();
77
78         return flags;
79 }
80
81 #else
82 #define DISABLE_INTERRUPTS(clobbers)    cli
83 #define ENABLE_INTERRUPTS(clobbers)     sti
84 #define ENABLE_INTERRUPTS_SYSEXIT       sti; sysexit
85 #define INTERRUPT_RETURN                iret
86 #define GET_CR0_INTO_EAX                movl %cr0, %eax
87 #endif /* __ASSEMBLY__ */
88 #endif /* CONFIG_PARAVIRT */
89
90 #ifndef __ASSEMBLY__
91 #define raw_local_save_flags(flags) \
92                 do { (flags) = __raw_local_save_flags(); } while (0)
93
94 #define raw_local_irq_save(flags) \
95                 do { (flags) = __raw_local_irq_save(); } while (0)
96
97 static inline int raw_irqs_disabled_flags(unsigned long flags)
98 {
99         return !(flags & (1 << 9));
100 }
101
102 static inline int raw_irqs_disabled(void)
103 {
104         unsigned long flags = __raw_local_save_flags();
105
106         return raw_irqs_disabled_flags(flags);
107 }
108 #endif /* __ASSEMBLY__ */
109
110 /*
111  * Do the CPU's IRQ-state tracing from assembly code. We call a
112  * C function, so save all the C-clobbered registers:
113  */
114 #ifdef CONFIG_TRACE_IRQFLAGS
115
116 # define TRACE_IRQS_ON                          \
117         pushl %eax;                             \
118         pushl %ecx;                             \
119         pushl %edx;                             \
120         call trace_hardirqs_on;                 \
121         popl %edx;                              \
122         popl %ecx;                              \
123         popl %eax;
124
125 # define TRACE_IRQS_OFF                         \
126         pushl %eax;                             \
127         pushl %ecx;                             \
128         pushl %edx;                             \
129         call trace_hardirqs_off;                \
130         popl %edx;                              \
131         popl %ecx;                              \
132         popl %eax;
133
134 #else
135 # define TRACE_IRQS_ON
136 # define TRACE_IRQS_OFF
137 #endif
138
139 #endif