Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _ASM_IA64_INTRINSICS_H |
2 | #define _ASM_IA64_INTRINSICS_H | |
3 | ||
4 | /* | |
5 | * Compiler-dependent intrinsics. | |
6 | * | |
7 | * Copyright (C) 2002-2003 Hewlett-Packard Co | |
8 | * David Mosberger-Tang <davidm@hpl.hp.com> | |
9 | */ | |
10 | ||
11 | #ifndef __ASSEMBLY__ | |
1da177e4 | 12 | |
1ecbb7fc | 13 | #include <linux/types.h> |
1da177e4 LT |
14 | /* include compiler specific intrinsics */ |
15 | #include <asm/ia64regs.h> | |
16 | #ifdef __INTEL_COMPILER | |
17 | # include <asm/intel_intrin.h> | |
18 | #else | |
19 | # include <asm/gcc_intrin.h> | |
20 | #endif | |
21 | ||
1ff730b5 IY |
22 | #define ia64_native_get_psr_i() (ia64_native_getreg(_IA64_REG_PSR) & IA64_PSR_I) |
23 | ||
24 | #define ia64_native_set_rr0_to_rr4(val0, val1, val2, val3, val4) \ | |
25 | do { \ | |
26 | ia64_native_set_rr(0x0000000000000000UL, (val0)); \ | |
27 | ia64_native_set_rr(0x2000000000000000UL, (val1)); \ | |
28 | ia64_native_set_rr(0x4000000000000000UL, (val2)); \ | |
29 | ia64_native_set_rr(0x6000000000000000UL, (val3)); \ | |
30 | ia64_native_set_rr(0x8000000000000000UL, (val4)); \ | |
0e1a77cc IY |
31 | } while (0) |
32 | ||
1da177e4 LT |
33 | /* |
34 | * Force an unresolved reference if someone tries to use | |
35 | * ia64_fetch_and_add() with a bad value. | |
36 | */ | |
37 | extern unsigned long __bad_size_for_ia64_fetch_and_add (void); | |
38 | extern unsigned long __bad_increment_for_ia64_fetch_and_add (void); | |
39 | ||
40 | #define IA64_FETCHADD(tmp,v,n,sz,sem) \ | |
41 | ({ \ | |
42 | switch (sz) { \ | |
43 | case 4: \ | |
44 | tmp = ia64_fetchadd4_##sem((unsigned int *) v, n); \ | |
45 | break; \ | |
46 | \ | |
47 | case 8: \ | |
48 | tmp = ia64_fetchadd8_##sem((unsigned long *) v, n); \ | |
49 | break; \ | |
50 | \ | |
51 | default: \ | |
52 | __bad_size_for_ia64_fetch_and_add(); \ | |
53 | } \ | |
54 | }) | |
55 | ||
56 | #define ia64_fetchadd(i,v,sem) \ | |
57 | ({ \ | |
58 | __u64 _tmp; \ | |
59 | volatile __typeof__(*(v)) *_v = (v); \ | |
60 | /* Can't use a switch () here: gcc isn't always smart enough for that... */ \ | |
61 | if ((i) == -16) \ | |
62 | IA64_FETCHADD(_tmp, _v, -16, sizeof(*(v)), sem); \ | |
63 | else if ((i) == -8) \ | |
64 | IA64_FETCHADD(_tmp, _v, -8, sizeof(*(v)), sem); \ | |
65 | else if ((i) == -4) \ | |
66 | IA64_FETCHADD(_tmp, _v, -4, sizeof(*(v)), sem); \ | |
67 | else if ((i) == -1) \ | |
68 | IA64_FETCHADD(_tmp, _v, -1, sizeof(*(v)), sem); \ | |
69 | else if ((i) == 1) \ | |
70 | IA64_FETCHADD(_tmp, _v, 1, sizeof(*(v)), sem); \ | |
71 | else if ((i) == 4) \ | |
72 | IA64_FETCHADD(_tmp, _v, 4, sizeof(*(v)), sem); \ | |
73 | else if ((i) == 8) \ | |
74 | IA64_FETCHADD(_tmp, _v, 8, sizeof(*(v)), sem); \ | |
75 | else if ((i) == 16) \ | |
76 | IA64_FETCHADD(_tmp, _v, 16, sizeof(*(v)), sem); \ | |
77 | else \ | |
78 | _tmp = __bad_increment_for_ia64_fetch_and_add(); \ | |
79 | (__typeof__(*(v))) (_tmp); /* return old value */ \ | |
80 | }) | |
81 | ||
82 | #define ia64_fetch_and_add(i,v) (ia64_fetchadd(i, v, rel) + (i)) /* return new value */ | |
83 | ||
84 | /* | |
85 | * This function doesn't exist, so you'll get a linker error if | |
86 | * something tries to do an invalid xchg(). | |
87 | */ | |
88 | extern void ia64_xchg_called_with_bad_pointer (void); | |
89 | ||
90 | #define __xchg(x,ptr,size) \ | |
91 | ({ \ | |
92 | unsigned long __xchg_result; \ | |
93 | \ | |
94 | switch (size) { \ | |
95 | case 1: \ | |
96 | __xchg_result = ia64_xchg1((__u8 *)ptr, x); \ | |
97 | break; \ | |
98 | \ | |
99 | case 2: \ | |
100 | __xchg_result = ia64_xchg2((__u16 *)ptr, x); \ | |
101 | break; \ | |
102 | \ | |
103 | case 4: \ | |
104 | __xchg_result = ia64_xchg4((__u32 *)ptr, x); \ | |
105 | break; \ | |
106 | \ | |
107 | case 8: \ | |
108 | __xchg_result = ia64_xchg8((__u64 *)ptr, x); \ | |
109 | break; \ | |
110 | default: \ | |
111 | ia64_xchg_called_with_bad_pointer(); \ | |
112 | } \ | |
113 | __xchg_result; \ | |
114 | }) | |
115 | ||
116 | #define xchg(ptr,x) \ | |
117 | ((__typeof__(*(ptr))) __xchg ((unsigned long) (x), (ptr), sizeof(*(ptr)))) | |
118 | ||
119 | /* | |
120 | * Atomic compare and exchange. Compare OLD with MEM, if identical, | |
121 | * store NEW in MEM. Return the initial value in MEM. Success is | |
122 | * indicated by comparing RETURN with OLD. | |
123 | */ | |
124 | ||
125 | #define __HAVE_ARCH_CMPXCHG 1 | |
126 | ||
127 | /* | |
128 | * This function doesn't exist, so you'll get a linker error | |
129 | * if something tries to do an invalid cmpxchg(). | |
130 | */ | |
131 | extern long ia64_cmpxchg_called_with_bad_pointer (void); | |
132 | ||
133 | #define ia64_cmpxchg(sem,ptr,old,new,size) \ | |
134 | ({ \ | |
135 | __u64 _o_, _r_; \ | |
136 | \ | |
137 | switch (size) { \ | |
138 | case 1: _o_ = (__u8 ) (long) (old); break; \ | |
139 | case 2: _o_ = (__u16) (long) (old); break; \ | |
140 | case 4: _o_ = (__u32) (long) (old); break; \ | |
141 | case 8: _o_ = (__u64) (long) (old); break; \ | |
142 | default: break; \ | |
143 | } \ | |
144 | switch (size) { \ | |
145 | case 1: \ | |
146 | _r_ = ia64_cmpxchg1_##sem((__u8 *) ptr, new, _o_); \ | |
147 | break; \ | |
148 | \ | |
149 | case 2: \ | |
150 | _r_ = ia64_cmpxchg2_##sem((__u16 *) ptr, new, _o_); \ | |
151 | break; \ | |
152 | \ | |
153 | case 4: \ | |
154 | _r_ = ia64_cmpxchg4_##sem((__u32 *) ptr, new, _o_); \ | |
155 | break; \ | |
156 | \ | |
157 | case 8: \ | |
158 | _r_ = ia64_cmpxchg8_##sem((__u64 *) ptr, new, _o_); \ | |
159 | break; \ | |
160 | \ | |
161 | default: \ | |
162 | _r_ = ia64_cmpxchg_called_with_bad_pointer(); \ | |
163 | break; \ | |
164 | } \ | |
165 | (__typeof__(old)) _r_; \ | |
166 | }) | |
167 | ||
4b78fff6 MD |
168 | #define cmpxchg_acq(ptr, o, n) \ |
169 | ia64_cmpxchg(acq, (ptr), (o), (n), sizeof(*(ptr))) | |
170 | #define cmpxchg_rel(ptr, o, n) \ | |
171 | ia64_cmpxchg(rel, (ptr), (o), (n), sizeof(*(ptr))) | |
1da177e4 LT |
172 | |
173 | /* for compatibility with other platforms: */ | |
4b78fff6 MD |
174 | #define cmpxchg(ptr, o, n) cmpxchg_acq((ptr), (o), (n)) |
175 | #define cmpxchg64(ptr, o, n) cmpxchg_acq((ptr), (o), (n)) | |
176 | ||
177 | #define cmpxchg_local cmpxchg | |
178 | #define cmpxchg64_local cmpxchg64 | |
1da177e4 LT |
179 | |
180 | #ifdef CONFIG_IA64_DEBUG_CMPXCHG | |
181 | # define CMPXCHG_BUGCHECK_DECL int _cmpxchg_bugcheck_count = 128; | |
182 | # define CMPXCHG_BUGCHECK(v) \ | |
183 | do { \ | |
184 | if (_cmpxchg_bugcheck_count-- <= 0) { \ | |
185 | void *ip; \ | |
186 | extern int printk(const char *fmt, ...); \ | |
187 | ip = (void *) ia64_getreg(_IA64_REG_IP); \ | |
188 | printk("CMPXCHG_BUGCHECK: stuck at %p on word %p\n", ip, (v)); \ | |
189 | break; \ | |
190 | } \ | |
191 | } while (0) | |
192 | #else /* !CONFIG_IA64_DEBUG_CMPXCHG */ | |
193 | # define CMPXCHG_BUGCHECK_DECL | |
194 | # define CMPXCHG_BUGCHECK(v) | |
195 | #endif /* !CONFIG_IA64_DEBUG_CMPXCHG */ | |
196 | ||
197 | #endif | |
1ff730b5 IY |
198 | |
199 | #ifdef __KERNEL__ | |
200 | #include <asm/paravirt_privop.h> | |
201 | #endif | |
202 | ||
203 | #ifndef __ASSEMBLY__ | |
204 | #if defined(CONFIG_PARAVIRT) && defined(__KERNEL__) | |
03f511dd IY |
205 | #ifdef ASM_SUPPORTED |
206 | # define IA64_INTRINSIC_API(name) paravirt_ ## name | |
207 | #else | |
208 | # define IA64_INTRINSIC_API(name) pv_cpu_ops.name | |
209 | #endif | |
1ff730b5 IY |
210 | #define IA64_INTRINSIC_MACRO(name) paravirt_ ## name |
211 | #else | |
212 | #define IA64_INTRINSIC_API(name) ia64_native_ ## name | |
213 | #define IA64_INTRINSIC_MACRO(name) ia64_native_ ## name | |
214 | #endif | |
215 | ||
216 | /************************************************/ | |
217 | /* Instructions paravirtualized for correctness */ | |
218 | /************************************************/ | |
219 | /* fc, thash, get_cpuid, get_pmd, get_eflags, set_eflags */ | |
220 | /* Note that "ttag" and "cover" are also privilege-sensitive; "ttag" | |
221 | * is not currently used (though it may be in a long-format VHPT system!) | |
222 | */ | |
223 | #define ia64_fc IA64_INTRINSIC_API(fc) | |
224 | #define ia64_thash IA64_INTRINSIC_API(thash) | |
225 | #define ia64_get_cpuid IA64_INTRINSIC_API(get_cpuid) | |
226 | #define ia64_get_pmd IA64_INTRINSIC_API(get_pmd) | |
227 | ||
228 | ||
229 | /************************************************/ | |
230 | /* Instructions paravirtualized for performance */ | |
231 | /************************************************/ | |
232 | #define ia64_ssm IA64_INTRINSIC_MACRO(ssm) | |
233 | #define ia64_rsm IA64_INTRINSIC_MACRO(rsm) | |
93fe10b6 | 234 | #define ia64_getreg IA64_INTRINSIC_MACRO(getreg) |
1ff730b5 IY |
235 | #define ia64_setreg IA64_INTRINSIC_API(setreg) |
236 | #define ia64_set_rr IA64_INTRINSIC_API(set_rr) | |
237 | #define ia64_get_rr IA64_INTRINSIC_API(get_rr) | |
238 | #define ia64_ptcga IA64_INTRINSIC_API(ptcga) | |
239 | #define ia64_get_psr_i IA64_INTRINSIC_API(get_psr_i) | |
240 | #define ia64_intrin_local_irq_restore \ | |
241 | IA64_INTRINSIC_API(intrin_local_irq_restore) | |
242 | #define ia64_set_rr0_to_rr4 IA64_INTRINSIC_API(set_rr0_to_rr4) | |
243 | ||
244 | #endif /* !__ASSEMBLY__ */ | |
245 | ||
1da177e4 | 246 | #endif /* _ASM_IA64_INTRINSICS_H */ |