[MIPS] Optimize __alloc_zeroed_user_highpage implementation.
[linux-2.6] / include / asm-mips / irqflags.h
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003 by Ralf Baechle
7  * Copyright (C) 1996 by Paul M. Antoine
8  * Copyright (C) 1999 Silicon Graphics
9  * Copyright (C) 2000 MIPS Technologies, Inc.
10  */
11 #ifndef _ASM_IRQFLAGS_H
12 #define _ASM_IRQFLAGS_H
13
14 #ifndef __ASSEMBLY__
15
16 #include <linux/compiler.h>
17 #include <asm/hazards.h>
18
19 __asm__ (
20         "       .macro  raw_local_irq_enable                            \n"
21         "       .set    push                                            \n"
22         "       .set    reorder                                         \n"
23         "       .set    noat                                            \n"
24 #ifdef CONFIG_MIPS_MT_SMTC
25         "       mfc0    $1, $2, 1       # SMTC - clear TCStatus.IXMT    \n"
26         "       ori     $1, 0x400                                       \n"
27         "       xori    $1, 0x400                                       \n"
28         "       mtc0    $1, $2, 1                                       \n"
29 #elif defined(CONFIG_CPU_MIPSR2)
30         "       ei                                                      \n"
31 #else
32         "       mfc0    $1,$12                                          \n"
33         "       ori     $1,0x1f                                         \n"
34         "       xori    $1,0x1e                                         \n"
35         "       mtc0    $1,$12                                          \n"
36 #endif
37         "       irq_enable_hazard                                       \n"
38         "       .set    pop                                             \n"
39         "       .endm");
40
41 static inline void raw_local_irq_enable(void)
42 {
43         __asm__ __volatile__(
44                 "raw_local_irq_enable"
45                 : /* no outputs */
46                 : /* no inputs */
47                 : "memory");
48 }
49
50 /*
51  * For cli() we have to insert nops to make sure that the new value
52  * has actually arrived in the status register before the end of this
53  * macro.
54  * R4000/R4400 need three nops, the R4600 two nops and the R10000 needs
55  * no nops at all.
56  */
57 /*
58  * For TX49, operating only IE bit is not enough.
59  *
60  * If mfc0 $12 follows store and the mfc0 is last instruction of a
61  * page and fetching the next instruction causes TLB miss, the result
62  * of the mfc0 might wrongly contain EXL bit.
63  *
64  * ERT-TX49H2-027, ERT-TX49H3-012, ERT-TX49HL3-006, ERT-TX49H4-008
65  *
66  * Workaround: mask EXL bit of the result or place a nop before mfc0.
67  */
68 __asm__ (
69         "       .macro  raw_local_irq_disable\n"
70         "       .set    push                                            \n"
71         "       .set    noat                                            \n"
72 #ifdef CONFIG_MIPS_MT_SMTC
73         "       mfc0    $1, $2, 1                                       \n"
74         "       ori     $1, 0x400                                       \n"
75         "       .set    noreorder                                       \n"
76         "       mtc0    $1, $2, 1                                       \n"
77 #elif defined(CONFIG_CPU_MIPSR2)
78         "       di                                                      \n"
79 #else
80         "       mfc0    $1,$12                                          \n"
81         "       ori     $1,0x1f                                         \n"
82         "       xori    $1,0x1f                                         \n"
83         "       .set    noreorder                                       \n"
84         "       mtc0    $1,$12                                          \n"
85 #endif
86         "       irq_disable_hazard                                      \n"
87         "       .set    pop                                             \n"
88         "       .endm                                                   \n");
89
90 static inline void raw_local_irq_disable(void)
91 {
92         __asm__ __volatile__(
93                 "raw_local_irq_disable"
94                 : /* no outputs */
95                 : /* no inputs */
96                 : "memory");
97 }
98
99 __asm__ (
100         "       .macro  raw_local_save_flags flags                      \n"
101         "       .set    push                                            \n"
102         "       .set    reorder                                         \n"
103 #ifdef CONFIG_MIPS_MT_SMTC
104         "       mfc0    \\flags, $2, 1                                  \n"
105 #else
106         "       mfc0    \\flags, $12                                    \n"
107 #endif
108         "       .set    pop                                             \n"
109         "       .endm                                                   \n");
110
111 #define raw_local_save_flags(x)                                         \
112 __asm__ __volatile__(                                                   \
113         "raw_local_save_flags %0"                                       \
114         : "=r" (x))
115
116 __asm__ (
117         "       .macro  raw_local_irq_save result                       \n"
118         "       .set    push                                            \n"
119         "       .set    reorder                                         \n"
120         "       .set    noat                                            \n"
121 #ifdef CONFIG_MIPS_MT_SMTC
122         "       mfc0    \\result, $2, 1                                 \n"
123         "       ori     $1, \\result, 0x400                             \n"
124         "       .set    noreorder                                       \n"
125         "       mtc0    $1, $2, 1                                       \n"
126         "       andi    \\result, \\result, 0x400                       \n"
127 #elif defined(CONFIG_CPU_MIPSR2)
128         "       di      \\result                                        \n"
129         "       andi    \\result, 1                                     \n"
130 #else
131         "       mfc0    \\result, $12                                   \n"
132         "       ori     $1, \\result, 0x1f                              \n"
133         "       xori    $1, 0x1f                                        \n"
134         "       .set    noreorder                                       \n"
135         "       mtc0    $1, $12                                         \n"
136 #endif
137         "       irq_disable_hazard                                      \n"
138         "       .set    pop                                             \n"
139         "       .endm                                                   \n");
140
141 #define raw_local_irq_save(x)                                           \
142 __asm__ __volatile__(                                                   \
143         "raw_local_irq_save\t%0"                                        \
144         : "=r" (x)                                                      \
145         : /* no inputs */                                               \
146         : "memory")
147
148 __asm__ (
149         "       .macro  raw_local_irq_restore flags                     \n"
150         "       .set    push                                            \n"
151         "       .set    noreorder                                       \n"
152         "       .set    noat                                            \n"
153 #ifdef CONFIG_MIPS_MT_SMTC
154         "mfc0   $1, $2, 1                                               \n"
155         "andi   \\flags, 0x400                                          \n"
156         "ori    $1, 0x400                                               \n"
157         "xori   $1, 0x400                                               \n"
158         "or     \\flags, $1                                             \n"
159         "mtc0   \\flags, $2, 1                                          \n"
160 #elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
161         /*
162          * Slow, but doesn't suffer from a relativly unlikely race
163          * condition we're having since days 1.
164          */
165         "       beqz    \\flags, 1f                                     \n"
166         "        di                                                     \n"
167         "       ei                                                      \n"
168         "1:                                                             \n"
169 #elif defined(CONFIG_CPU_MIPSR2)
170         /*
171          * Fast, dangerous.  Life is fun, life is good.
172          */
173         "       mfc0    $1, $12                                         \n"
174         "       ins     $1, \\flags, 0, 1                               \n"
175         "       mtc0    $1, $12                                         \n"
176 #else
177         "       mfc0    $1, $12                                         \n"
178         "       andi    \\flags, 1                                      \n"
179         "       ori     $1, 0x1f                                        \n"
180         "       xori    $1, 0x1f                                        \n"
181         "       or      \\flags, $1                                     \n"
182         "       mtc0    \\flags, $12                                    \n"
183 #endif
184         "       irq_disable_hazard                                      \n"
185         "       .set    pop                                             \n"
186         "       .endm                                                   \n");
187
188 extern void smtc_ipi_replay(void);
189
190 static inline void raw_local_irq_restore(unsigned long flags)
191 {
192         unsigned long __tmp1;
193
194 #ifdef CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY
195         /*
196          * CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY does prompt replay of deferred
197          * IPIs, at the cost of branch and call overhead on each
198          * local_irq_restore()
199          */
200         if (unlikely(!(flags & 0x0400)))
201                 smtc_ipi_replay();
202 #endif
203
204         __asm__ __volatile__(
205                 "raw_local_irq_restore\t%0"
206                 : "=r" (__tmp1)
207                 : "0" (flags)
208                 : "memory");
209 }
210
211 static inline int raw_irqs_disabled_flags(unsigned long flags)
212 {
213 #ifdef CONFIG_MIPS_MT_SMTC
214         /*
215          * SMTC model uses TCStatus.IXMT to disable interrupts for a thread/CPU
216          */
217         return flags & 0x400;
218 #else
219         return !(flags & 1);
220 #endif
221 }
222
223 #endif
224
225 /*
226  * Do the CPU's IRQ-state tracing from assembly code.
227  */
228 #ifdef CONFIG_TRACE_IRQFLAGS
229 /* Reload some registers clobbered by trace_hardirqs_on */
230 #ifdef CONFIG_64BIT
231 # define TRACE_IRQS_RELOAD_REGS                                         \
232         LONG_L  $11, PT_R11(sp);                                        \
233         LONG_L  $10, PT_R10(sp);                                        \
234         LONG_L  $9, PT_R9(sp);                                          \
235         LONG_L  $8, PT_R8(sp);                                          \
236         LONG_L  $7, PT_R7(sp);                                          \
237         LONG_L  $6, PT_R6(sp);                                          \
238         LONG_L  $5, PT_R5(sp);                                          \
239         LONG_L  $4, PT_R4(sp);                                          \
240         LONG_L  $2, PT_R2(sp)
241 #else
242 # define TRACE_IRQS_RELOAD_REGS                                         \
243         LONG_L  $7, PT_R7(sp);                                          \
244         LONG_L  $6, PT_R6(sp);                                          \
245         LONG_L  $5, PT_R5(sp);                                          \
246         LONG_L  $4, PT_R4(sp);                                          \
247         LONG_L  $2, PT_R2(sp)
248 #endif
249 # define TRACE_IRQS_ON                                                  \
250         CLI;    /* make sure trace_hardirqs_on() is called in kernel level */ \
251         jal     trace_hardirqs_on
252 # define TRACE_IRQS_ON_RELOAD                                           \
253         TRACE_IRQS_ON;                                                  \
254         TRACE_IRQS_RELOAD_REGS
255 # define TRACE_IRQS_OFF                                                 \
256         jal     trace_hardirqs_off
257 #else
258 # define TRACE_IRQS_ON
259 # define TRACE_IRQS_ON_RELOAD
260 # define TRACE_IRQS_OFF
261 #endif
262
263 #endif /* _ASM_IRQFLAGS_H */