oleaut32: Added implementation of ITypeInfo2_fnGetTypeAttr.
[wine] / libs / port / interlocked.c
1 /*
2  * interlocked functions
3  *
4  * Copyright 1996 Alexandre Julliard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
19  */
20
21 #include "config.h"
22 #include "wine/port.h"
23 #include <assert.h>
24
25 #ifdef __i386__
26
27 #if defined(_MSC_VER)
28
29 __declspec(naked) int interlocked_cmpxchg( int *dest, int xchg, int compare )
30 {
31     __asm mov eax, 12[esp];
32     __asm mov ecx, 8[esp];
33     __asm mov edx, 4[esp];
34     __asm lock cmpxchg [edx], ecx;
35     __asm ret;
36 }
37
38 __declspec(naked) void *interlocked_cmpxchg_ptr( void **dest, void *xchg, void *compare )
39 {
40     __asm mov eax, 12[esp];
41     __asm mov ecx, 8[esp];
42     __asm mov edx, 4[esp];
43     __asm lock cmpxchg [edx], ecx;
44     __asm ret;
45 }
46
47 __declspec(naked) __int64 interlocked_cmpxchg64( __int64 *dest, __int64 xchg, __int64 compare)
48 {
49     __asm push ebx;
50     __asm push esi;
51     __asm mov esi, 12[esp];
52     __asm mov ebx, 16[esp];
53     __asm mov ecx, 20[esp];
54     __asm mov eax, 24[esp];
55     __asm mov edx, 28[esp];
56     __asm lock cmpxchg8b [esi];
57     __asm pop esi;
58     __asm pop ebx;
59     __asm ret;
60 }
61
62 __declspec(naked) int interlocked_xchg( int *dest, int val )
63 {
64     __asm mov eax, 8[esp];
65     __asm mov edx, 4[esp];
66     __asm lock xchg [edx], eax;
67     __asm ret;
68 }
69
70 __declspec(naked) void *interlocked_xchg_ptr( void **dest, void *val )
71 {
72     __asm mov eax, 8[esp];
73     __asm mov edx, 4[esp];
74     __asm lock xchg [edx], eax;
75     __asm ret;
76 }
77
78 __declspec(naked) int interlocked_xchg_add( int *dest, int incr )
79 {
80     __asm mov eax, 8[esp];
81     __asm mov edx, 4[esp];
82     __asm lock xadd [edx], eax;
83     __asm ret;
84 }
85
86 #else
87 /* use gcc compatible asm code as default for __i386__ */
88
89 __ASM_GLOBAL_FUNC(interlocked_cmpxchg,
90                   "movl 12(%esp),%eax\n\t"
91                   "movl 8(%esp),%ecx\n\t"
92                   "movl 4(%esp),%edx\n\t"
93                   "lock; cmpxchgl %ecx,(%edx)\n\t"
94                   "ret")
95 __ASM_GLOBAL_FUNC(interlocked_cmpxchg_ptr,
96                   "movl 12(%esp),%eax\n\t"
97                   "movl 8(%esp),%ecx\n\t"
98                   "movl 4(%esp),%edx\n\t"
99                   "lock; cmpxchgl %ecx,(%edx)\n\t"
100                   "ret")
101  __ASM_GLOBAL_FUNC(interlocked_cmpxchg64,
102                    "push %ebx\n\t"
103                    __ASM_CFI(".cfi_adjust_cfa_offset 4\n\t")
104                    __ASM_CFI(".cfi_rel_offset %ebx,0\n\t")
105                    "push %esi\n\t"
106                    __ASM_CFI(".cfi_adjust_cfa_offset 4\n\t")
107                    __ASM_CFI(".cfi_rel_offset %esi,0\n\t")
108                    "movl 12(%esp),%esi\n\t"
109                    "movl 16(%esp),%ebx\n\t"
110                    "movl 20(%esp),%ecx\n\t"
111                    "movl 24(%esp),%eax\n\t"
112                    "movl 28(%esp),%edx\n\t"
113                    "lock; cmpxchg8b (%esi)\n\t"
114                    "pop %esi\n\t"
115                    __ASM_CFI(".cfi_same_value %esi\n\t")
116                    __ASM_CFI(".cfi_adjust_cfa_offset -4\n\t")
117                    "pop %ebx\n\t"
118                    __ASM_CFI(".cfi_same_value %ebx\n\t")
119                    __ASM_CFI(".cfi_adjust_cfa_offset -4\n\t")
120                    "ret")
121 __ASM_GLOBAL_FUNC(interlocked_xchg,
122                   "movl 8(%esp),%eax\n\t"
123                   "movl 4(%esp),%edx\n\t"
124                   "lock; xchgl %eax,(%edx)\n\t"
125                   "ret")
126 __ASM_GLOBAL_FUNC(interlocked_xchg_ptr,
127                   "movl 8(%esp),%eax\n\t"
128                   "movl 4(%esp),%edx\n\t"
129                   "lock; xchgl %eax,(%edx)\n\t"
130                   "ret")
131 __ASM_GLOBAL_FUNC(interlocked_xchg_add,
132                   "movl 8(%esp),%eax\n\t"
133                   "movl 4(%esp),%edx\n\t"
134                   "lock; xaddl %eax,(%edx)\n\t"
135                   "ret")
136
137 #endif
138
139 #elif defined(__x86_64__)
140
141 __ASM_GLOBAL_FUNC(interlocked_cmpxchg,
142                   "mov %edx, %eax\n\t"
143                   "lock cmpxchgl %esi,(%rdi)\n\t"
144                   "ret")
145 __ASM_GLOBAL_FUNC(interlocked_cmpxchg_ptr,
146                   "mov %rdx, %rax\n\t"
147                   "lock cmpxchgq %rsi,(%rdi)\n\t"
148                   "ret")
149 __ASM_GLOBAL_FUNC(interlocked_cmpxchg64,
150                   "mov %rdx, %rax\n\t"
151                   "lock cmpxchgq %rsi,(%rdi)\n\t"
152                   "ret")
153 __ASM_GLOBAL_FUNC(interlocked_xchg,
154                   "mov %esi, %eax\n\t"
155                   "lock xchgl %eax, (%rdi)\n\t"
156                   "ret")
157 __ASM_GLOBAL_FUNC(interlocked_xchg_ptr,
158                   "mov %rsi, %rax\n\t"
159                   "lock xchgq %rax,(%rdi)\n\t"
160                   "ret")
161 __ASM_GLOBAL_FUNC(interlocked_xchg_add,
162                   "mov %esi, %eax\n\t"
163                   "lock xaddl %eax, (%rdi)\n\t"
164                   "ret")
165 __ASM_GLOBAL_FUNC(interlocked_cmpxchg128,
166                   "push %rbx\n\t"
167                    ".cfi_adjust_cfa_offset 8\n\t"
168                    ".cfi_rel_offset %rbx,0\n\t"
169                   "mov %rcx,%r8\n\t"  /* compare */
170                   "mov %rdx,%rbx\n\t" /* xchg_low */
171                   "mov %rsi,%rcx\n\t" /* xchg_high */
172                   "mov 0(%r8),%rax\n\t"
173                   "mov 8(%r8),%rdx\n\t"
174                   "lock cmpxchg16b (%rdi)\n\t"
175                   "mov %rax,0(%r8)\n\t"
176                   "mov %rdx,8(%r8)\n\t"
177                   "setz %al\n\t"
178                   "pop %rbx\n\t"
179                    ".cfi_adjust_cfa_offset -8\n\t"
180                    ".cfi_same_value %rbx\n\t"
181                   "ret")
182
183 #elif defined(__powerpc__)
184 void* interlocked_cmpxchg_ptr( void **dest, void* xchg, void* compare)
185 {
186     void *ret = 0;
187     void *scratch;
188     __asm__ __volatile__(
189         "0:    lwarx %0,0,%2\n"
190         "      xor. %1,%4,%0\n"
191         "      bne 1f\n"
192         "      stwcx. %3,0,%2\n"
193         "      bne- 0b\n"
194         "      isync\n"
195         "1:    "
196         : "=&r"(ret), "=&r"(scratch)
197         : "r"(dest), "r"(xchg), "r"(compare)
198         : "cr0","memory");
199     return ret;
200 }
201
202 __int64 interlocked_cmpxchg64( __int64 *dest, __int64 xchg, __int64 compare)
203 {
204     /* FIXME: add code */
205     assert(0);
206 }
207
208 int interlocked_cmpxchg( int *dest, int xchg, int compare)
209 {
210     int ret = 0;
211     int scratch;
212     __asm__ __volatile__(
213         "0:    lwarx %0,0,%2\n"
214         "      xor. %1,%4,%0\n"
215         "      bne 1f\n"
216         "      stwcx. %3,0,%2\n"
217         "      bne- 0b\n"
218         "      isync\n"
219         "1:    "
220         : "=&r"(ret), "=&r"(scratch)
221         : "r"(dest), "r"(xchg), "r"(compare)
222         : "cr0","memory","r0");
223     return ret;
224 }
225
226 int interlocked_xchg_add( int *dest, int incr )
227 {
228     int ret = 0;
229     int zero = 0;
230     __asm__ __volatile__(
231         "0:    lwarx %0, %3, %1\n"
232         "      add %0, %2, %0\n"
233         "      stwcx. %0, %3, %1\n"
234         "      bne- 0b\n"
235         "      isync\n"
236         : "=&r" (ret)
237         : "r"(dest), "r"(incr), "r"(zero)
238         : "cr0", "memory", "r0"
239     );
240     return ret-incr;
241 }
242
243 int interlocked_xchg( int* dest, int val )
244 {
245     int ret = 0;
246     __asm__ __volatile__(
247         "0:    lwarx %0,0,%1\n"
248         "      stwcx. %2,0,%1\n"
249         "      bne- 0b\n"
250         "      isync\n"
251         : "=&r"(ret)
252         : "r"(dest), "r"(val)
253         : "cr0","memory","r0");
254     return ret;
255 }
256
257 void* interlocked_xchg_ptr( void** dest, void* val )
258 {
259     void *ret = NULL;
260     __asm__ __volatile__(
261         "0:    lwarx %0,0,%1\n"
262         "      stwcx. %2,0,%1\n"
263         "      bne- 0b\n"
264         "      isync\n"
265         : "=&r"(ret)
266         : "r"(dest), "r"(val)
267         : "cr0","memory","r0");
268     return ret;
269 }
270
271 #elif defined(__ALPHA__) && defined(__GNUC__)
272
273 __ASM_GLOBAL_FUNC(interlocked_cmpxchg,
274                   "L0cmpxchg:\n\t"
275                   "ldl_l $0,0($16)\n\t"
276                   "cmpeq $0,$18,$1\n\t"
277                   "beq   $1,L1cmpxchg\n\t"
278                   "mov   $17,$0\n\t"
279                   "stl_c $0,0($16)\n\t"
280                   "beq   $0,L0cmpxchg\n\t"
281                   "mov   $18,$0\n"
282                   "L1cmpxchg:\n\t"
283                   "mb")
284
285 __ASM_GLOBAL_FUNC(interlocked_cmpxchg_ptr,
286                   "L0cmpxchg_ptr:\n\t"
287                   "ldq_l $0,0($16)\n\t"
288                   "cmpeq $0,$18,$1\n\t"
289                   "beq   $1,L1cmpxchg_ptr\n\t"
290                   "mov   $17,$0\n\t"
291                   "stq_c $0,0($16)\n\t"
292                   "beq   $0,L0cmpxchg_ptr\n\t"
293                   "mov   $18,$0\n"
294                   "L1cmpxchg_ptr:\n\t"
295                   "mb")
296
297 __int64 interlocked_cmpxchg64(__int64 *dest, __int64 xchg, __int64 compare)
298 {
299     /* FIXME: add code */
300     assert(0);
301 }
302
303 __ASM_GLOBAL_FUNC(interlocked_xchg,
304                   "L0xchg:\n\t"
305                   "ldl_l $0,0($16)\n\t"
306                   "mov   $17,$1\n\t"
307                   "stl_c $1,0($16)\n\t"
308                   "beq   $1,L0xchg\n\t"
309                   "mb")
310
311 __ASM_GLOBAL_FUNC(interlocked_xchg_ptr,
312                   "L0xchg_ptr:\n\t"
313                   "ldq_l $0,0($16)\n\t"
314                   "mov   $17,$1\n\t"
315                   "stq_c $1,0($16)\n\t"
316                   "beq   $1,L0xchg_ptr\n\t"
317                   "mb")
318
319 __ASM_GLOBAL_FUNC(interlocked_xchg_add,
320                   "L0xchg_add:\n\t"
321                   "ldl_l $0,0($16)\n\t"
322                   "addl  $0,$17,$1\n\t"
323                   "stl_c $1,0($16)\n\t"
324                   "beq   $1,L0xchg_add\n\t"
325                   "mb")
326
327
328 #else
329
330 #include <pthread.h>
331
332 static pthread_mutex_t interlocked_mutex = PTHREAD_MUTEX_INITIALIZER;
333
334 int interlocked_cmpxchg( int *dest, int xchg, int compare )
335 {
336     pthread_mutex_lock( &interlocked_mutex );
337
338     if (*dest == compare)
339         *dest = xchg;
340     else
341         compare = *dest;
342
343     pthread_mutex_unlock( &interlocked_mutex );
344     return compare;
345 }
346
347 void *interlocked_cmpxchg_ptr( void **dest, void *xchg, void *compare )
348 {
349     pthread_mutex_lock( &interlocked_mutex );
350
351     if (*dest == compare)
352         *dest = xchg;
353     else
354         compare = *dest;
355
356     pthread_mutex_unlock( &interlocked_mutex );
357     return compare;
358 }
359
360 __int64 interlocked_cmpxchg64( __int64 *dest, __int64 xchg, __int64 compare )
361 {
362     pthread_mutex_lock( &interlocked_mutex );
363
364     if (*dest == compare)
365         *dest = xchg;
366     else
367         compare = *dest;
368
369     pthread_mutex_unlock( &interlocked_mutex );
370     return compare;
371 }
372
373 int interlocked_xchg( int *dest, int val )
374 {
375     int retv;
376     pthread_mutex_lock( &interlocked_mutex );
377     retv = *dest;
378     *dest = val;
379     pthread_mutex_unlock( &interlocked_mutex );
380     return retv;
381 }
382
383 void *interlocked_xchg_ptr( void **dest, void *val )
384 {
385     void *retv;
386     pthread_mutex_lock( &interlocked_mutex );
387     retv = *dest;
388     *dest = val;
389     pthread_mutex_unlock( &interlocked_mutex );
390     return retv;
391 }
392
393 int interlocked_xchg_add( int *dest, int incr )
394 {
395     int retv;
396     pthread_mutex_lock( &interlocked_mutex );
397     retv = *dest;
398     *dest += incr;
399     pthread_mutex_unlock( &interlocked_mutex );
400     return retv;
401 }
402
403 #endif