On MIPS the struct sigev preamble is only 8 bytes.
[linux-2.6] / include / asm-mips / hazards.h
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 2003, 2004 Ralf Baechle
7  */
8 #ifndef _ASM_HAZARDS_H
9 #define _ASM_HAZARDS_H
10
11 #include <linux/config.h>
12
13 #ifdef __ASSEMBLY__
14
15         .macro  _ssnop
16         sll     $0, $0, 1
17         .endm
18
19         .macro  _ehb
20         sll     $0, $0, 3
21         .endm
22
23 /*
24  * RM9000 hazards.  When the JTLB is updated by tlbwi or tlbwr, a subsequent
25  * use of the JTLB for instructions should not occur for 4 cpu cycles and use
26  * for data translations should not occur for 3 cpu cycles.
27  */
28 #ifdef CONFIG_CPU_RM9000
29
30         .macro  mtc0_tlbw_hazard
31         .set    push
32         .set    mips32
33         _ssnop; _ssnop; _ssnop; _ssnop
34         .set    pop
35         .endm
36
37         .macro  tlbw_eret_hazard
38         .set    push
39         .set    mips32
40         _ssnop; _ssnop; _ssnop; _ssnop
41         .set    pop
42         .endm
43
44 #else
45
46 /*
47  * The taken branch will result in a two cycle penalty for the two killed
48  * instructions on R4000 / R4400.  Other processors only have a single cycle
49  * hazard so this is nice trick to have an optimal code for a range of
50  * processors.
51  */
52         .macro  mtc0_tlbw_hazard
53         b       . + 8
54         .endm
55
56         .macro  tlbw_eret_hazard
57         .endm
58 #endif
59
60 /*
61  * mtc0->mfc0 hazard
62  * The 24K has a 2 cycle mtc0/mfc0 execution hazard.
63  * It is a MIPS32R2 processor so ehb will clear the hazard.
64  */
65
66 #ifdef CONFIG_CPU_MIPSR2
67 /*
68  * Use a macro for ehb unless explicit support for MIPSR2 is enabled
69  */
70
71 #define irq_enable_hazard
72         _ehb
73
74 #define irq_disable_hazard
75         _ehb
76
77 #elif defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_RM9000)
78
79 /*
80  * R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer.
81  */
82
83 #define irq_enable_hazard
84
85 #define irq_disable_hazard
86
87 #else
88
89 /*
90  * Classic MIPS needs 1 - 3 nops or ssnops
91  */
92 #define irq_enable_hazard
93 #define irq_disable_hazard                                              \
94         _ssnop; _ssnop; _ssnop
95
96 #endif
97
98 #else /* __ASSEMBLY__ */
99
100 __asm__(
101         "       .macro  _ssnop                                  \n\t"
102         "       sll     $0, $2, 1                               \n\t"
103         "       .endm                                           \n\t"
104         "                                                       \n\t"
105         "       .macro  _ehb                                    \n\t"
106         "       sll     $0, $0, 3                               \n\t"
107         "       .endm                                           \n\t");
108
109 #ifdef CONFIG_CPU_RM9000
110
111 /*
112  * RM9000 hazards.  When the JTLB is updated by tlbwi or tlbwr, a subsequent
113  * use of the JTLB for instructions should not occur for 4 cpu cycles and use
114  * for data translations should not occur for 3 cpu cycles.
115  */
116
117 #define mtc0_tlbw_hazard()                                              \
118         __asm__ __volatile__(                                           \
119                 ".set\tmips32\n\t"                                      \
120                 "_ssnop; _ssnop; _ssnop; _ssnop\n\t"                    \
121                 ".set\tmips0")
122
123 #define tlbw_use_hazard()                                               \
124         __asm__ __volatile__(                                           \
125                 ".set\tmips32\n\t"                                      \
126                 "_ssnop; _ssnop; _ssnop; _ssnop\n\t"                    \
127                 ".set\tmips0")
128
129 #define back_to_back_c0_hazard()        do { } while (0)
130
131 #else
132
133 /*
134  * Overkill warning ...
135  */
136 #define mtc0_tlbw_hazard()                                              \
137         __asm__ __volatile__(                                           \
138                 ".set noreorder\n\t"                                    \
139                 "nop; nop; nop; nop; nop; nop;\n\t"                     \
140                 ".set reorder\n\t")
141
142 #define tlbw_use_hazard()                                               \
143         __asm__ __volatile__(                                           \
144                 ".set noreorder\n\t"                                    \
145                 "nop; nop; nop; nop; nop; nop;\n\t"                     \
146                 ".set reorder\n\t")
147
148 #endif
149
150 /*
151  * mtc0->mfc0 hazard
152  * The 24K has a 2 cycle mtc0/mfc0 execution hazard.
153  * It is a MIPS32R2 processor so ehb will clear the hazard.
154  */
155
156 #ifdef CONFIG_CPU_MIPSR2
157 /*
158  * Use a macro for ehb unless explicit support for MIPSR2 is enabled
159  */
160 __asm__(
161         "       .macro\tirq_enable_hazard                       \n\t"
162         "       _ehb                                            \n\t"
163         "       .endm                                           \n\t"
164         "                                                       \n\t"
165         "       .macro\tirq_disable_hazard                      \n\t"
166         "       _ehb                                            \n\t"
167         "       .endm");
168
169 #define irq_enable_hazard()                                             \
170         __asm__ __volatile__(                                           \
171         "_ehb\t\t\t\t# irq_enable_hazard")
172
173 #define irq_disable_hazard()                                            \
174         __asm__ __volatile__(                                           \
175         "_ehb\t\t\t\t# irq_disable_hazard")
176
177 #define back_to_back_c0_hazard()                                        \
178         __asm__ __volatile__(                                           \
179         "_ehb\t\t\t\t# back_to_back_c0_hazard")
180
181 #elif defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_RM9000)
182
183 /*
184  * R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer.
185  */
186
187 __asm__(
188         "       .macro\tirq_enable_hazard                       \n\t"
189         "       .endm                                           \n\t"
190         "                                                       \n\t"
191         "       .macro\tirq_disable_hazard                      \n\t"
192         "       .endm");
193
194 #define irq_enable_hazard()     do { } while (0)
195 #define irq_disable_hazard()    do { } while (0)
196
197 #define back_to_back_c0_hazard()        do { } while (0)
198
199 #else
200
201 /*
202  * Default for classic MIPS processors.  Assume worst case hazards but don't
203  * care about the irq_enable_hazard - sooner or later the hardware will
204  * enable it and we don't care when exactly.
205  */
206
207 __asm__(
208         "       #                                               \n\t"
209         "       # There is a hazard but we do not care          \n\t"
210         "       #                                               \n\t"
211         "       .macro\tirq_enable_hazard                       \n\t"
212         "       .endm                                           \n\t"
213         "                                                       \n\t"
214         "       .macro\tirq_disable_hazard                      \n\t"
215         "       _ssnop; _ssnop; _ssnop                          \n\t"
216         "       .endm");
217
218 #define irq_enable_hazard()     do { } while (0)
219 #define irq_disable_hazard()                                            \
220         __asm__ __volatile__(                                           \
221         "_ssnop; _ssnop; _ssnop;\t\t# irq_disable_hazard")
222
223 #define back_to_back_c0_hazard()                                        \
224         __asm__ __volatile__(                                           \
225         "       .set noreorder                          \n"             \
226         "       nop; nop; nop                           \n"             \
227         "       .set reorder                            \n")
228
229 #endif
230
231 #endif /* __ASSEMBLY__ */
232
233 #endif /* _ASM_HAZARDS_H */