2 * interlocked functions
4 * Copyright 1996 Alexandre Julliard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
22 #include "wine/port.h"
29 __declspec(naked) int interlocked_cmpxchg( int *dest, int xchg, int compare )
31 __asm mov eax, 12[esp];
32 __asm mov ecx, 8[esp];
33 __asm mov edx, 4[esp];
34 __asm lock cmpxchg [edx], ecx;
38 __declspec(naked) void *interlocked_cmpxchg_ptr( void **dest, void *xchg, void *compare )
40 __asm mov eax, 12[esp];
41 __asm mov ecx, 8[esp];
42 __asm mov edx, 4[esp];
43 __asm lock cmpxchg [edx], ecx;
47 __declspec(naked) __int64 interlocked_cmpxchg64( __int64 *dest, __int64 xchg, __int64 compare)
51 __asm mov esi, 12[esp];
52 __asm mov ebx, 16[esp];
53 __asm mov ecx, 20[esp];
54 __asm mov eax, 24[esp];
55 __asm mov edx, 28[esp];
56 __asm lock cmpxchg8b [esi];
62 __declspec(naked) int interlocked_xchg( int *dest, int val )
64 __asm mov eax, 8[esp];
65 __asm mov edx, 4[esp];
66 __asm lock xchg [edx], eax;
70 __declspec(naked) void *interlocked_xchg_ptr( void **dest, void *val )
72 __asm mov eax, 8[esp];
73 __asm mov edx, 4[esp];
74 __asm lock xchg [edx], eax;
78 __declspec(naked) int interlocked_xchg_add( int *dest, int incr )
80 __asm mov eax, 8[esp];
81 __asm mov edx, 4[esp];
82 __asm lock xadd [edx], eax;
87 /* use gcc compatible asm code as default for __i386__ */
89 __ASM_GLOBAL_FUNC(interlocked_cmpxchg,
90 "movl 12(%esp),%eax\n\t"
91 "movl 8(%esp),%ecx\n\t"
92 "movl 4(%esp),%edx\n\t"
93 "lock; cmpxchgl %ecx,(%edx)\n\t"
95 __ASM_GLOBAL_FUNC(interlocked_cmpxchg_ptr,
96 "movl 12(%esp),%eax\n\t"
97 "movl 8(%esp),%ecx\n\t"
98 "movl 4(%esp),%edx\n\t"
99 "lock; cmpxchgl %ecx,(%edx)\n\t"
101 __ASM_GLOBAL_FUNC(interlocked_cmpxchg64,
103 __ASM_CFI(".cfi_adjust_cfa_offset 4\n\t")
104 __ASM_CFI(".cfi_rel_offset %ebx,0\n\t")
106 __ASM_CFI(".cfi_adjust_cfa_offset 4\n\t")
107 __ASM_CFI(".cfi_rel_offset %esi,0\n\t")
108 "movl 12(%esp),%esi\n\t"
109 "movl 16(%esp),%ebx\n\t"
110 "movl 20(%esp),%ecx\n\t"
111 "movl 24(%esp),%eax\n\t"
112 "movl 28(%esp),%edx\n\t"
113 "lock; cmpxchg8b (%esi)\n\t"
115 __ASM_CFI(".cfi_same_value %esi\n\t")
116 __ASM_CFI(".cfi_adjust_cfa_offset -4\n\t")
118 __ASM_CFI(".cfi_same_value %ebx\n\t")
119 __ASM_CFI(".cfi_adjust_cfa_offset -4\n\t")
121 __ASM_GLOBAL_FUNC(interlocked_xchg,
122 "movl 8(%esp),%eax\n\t"
123 "movl 4(%esp),%edx\n\t"
124 "lock; xchgl %eax,(%edx)\n\t"
126 __ASM_GLOBAL_FUNC(interlocked_xchg_ptr,
127 "movl 8(%esp),%eax\n\t"
128 "movl 4(%esp),%edx\n\t"
129 "lock; xchgl %eax,(%edx)\n\t"
131 __ASM_GLOBAL_FUNC(interlocked_xchg_add,
132 "movl 8(%esp),%eax\n\t"
133 "movl 4(%esp),%edx\n\t"
134 "lock; xaddl %eax,(%edx)\n\t"
139 #elif defined(__x86_64__)
141 __ASM_GLOBAL_FUNC(interlocked_cmpxchg,
143 "lock cmpxchgl %esi,(%rdi)\n\t"
145 __ASM_GLOBAL_FUNC(interlocked_cmpxchg_ptr,
147 "lock cmpxchgq %rsi,(%rdi)\n\t"
149 __ASM_GLOBAL_FUNC(interlocked_cmpxchg64,
151 "lock cmpxchgq %rsi,(%rdi)\n\t"
153 __ASM_GLOBAL_FUNC(interlocked_xchg,
155 "lock xchgl %eax, (%rdi)\n\t"
157 __ASM_GLOBAL_FUNC(interlocked_xchg_ptr,
159 "lock xchgq %rax,(%rdi)\n\t"
161 __ASM_GLOBAL_FUNC(interlocked_xchg_add,
163 "lock xaddl %eax, (%rdi)\n\t"
165 __ASM_GLOBAL_FUNC(interlocked_cmpxchg128,
167 ".cfi_adjust_cfa_offset 8\n\t"
168 ".cfi_rel_offset %rbx,0\n\t"
169 "mov %rcx,%r8\n\t" /* compare */
170 "mov %rdx,%rbx\n\t" /* xchg_low */
171 "mov %rsi,%rcx\n\t" /* xchg_high */
172 "mov 0(%r8),%rax\n\t"
173 "mov 8(%r8),%rdx\n\t"
174 "lock cmpxchg16b (%rdi)\n\t"
175 "mov %rax,0(%r8)\n\t"
176 "mov %rdx,8(%r8)\n\t"
179 ".cfi_adjust_cfa_offset -8\n\t"
180 ".cfi_same_value %rbx\n\t"
183 #elif defined(__powerpc__)
184 void* interlocked_cmpxchg_ptr( void **dest, void* xchg, void* compare)
188 __asm__ __volatile__(
196 : "=&r"(ret), "=&r"(scratch)
197 : "r"(dest), "r"(xchg), "r"(compare)
202 __int64 interlocked_cmpxchg64( __int64 *dest, __int64 xchg, __int64 compare)
204 /* FIXME: add code */
208 int interlocked_cmpxchg( int *dest, int xchg, int compare)
212 __asm__ __volatile__(
220 : "=&r"(ret), "=&r"(scratch)
221 : "r"(dest), "r"(xchg), "r"(compare)
222 : "cr0","memory","r0");
226 int interlocked_xchg_add( int *dest, int incr )
230 __asm__ __volatile__(
231 "0: lwarx %0, %3, %1\n"
233 " stwcx. %0, %3, %1\n"
237 : "r"(dest), "r"(incr), "r"(zero)
238 : "cr0", "memory", "r0"
243 int interlocked_xchg( int* dest, int val )
246 __asm__ __volatile__(
252 : "r"(dest), "r"(val)
253 : "cr0","memory","r0");
257 void* interlocked_xchg_ptr( void** dest, void* val )
260 __asm__ __volatile__(
266 : "r"(dest), "r"(val)
267 : "cr0","memory","r0");
271 #elif defined(__ALPHA__) && defined(__GNUC__)
273 __ASM_GLOBAL_FUNC(interlocked_cmpxchg,
275 "ldl_l $0,0($16)\n\t"
276 "cmpeq $0,$18,$1\n\t"
277 "beq $1,L1cmpxchg\n\t"
279 "stl_c $0,0($16)\n\t"
280 "beq $0,L0cmpxchg\n\t"
285 __ASM_GLOBAL_FUNC(interlocked_cmpxchg_ptr,
287 "ldq_l $0,0($16)\n\t"
288 "cmpeq $0,$18,$1\n\t"
289 "beq $1,L1cmpxchg_ptr\n\t"
291 "stq_c $0,0($16)\n\t"
292 "beq $0,L0cmpxchg_ptr\n\t"
297 __int64 interlocked_cmpxchg64(__int64 *dest, __int64 xchg, __int64 compare)
299 /* FIXME: add code */
303 __ASM_GLOBAL_FUNC(interlocked_xchg,
305 "ldl_l $0,0($16)\n\t"
307 "stl_c $1,0($16)\n\t"
311 __ASM_GLOBAL_FUNC(interlocked_xchg_ptr,
313 "ldq_l $0,0($16)\n\t"
315 "stq_c $1,0($16)\n\t"
316 "beq $1,L0xchg_ptr\n\t"
319 __ASM_GLOBAL_FUNC(interlocked_xchg_add,
321 "ldl_l $0,0($16)\n\t"
323 "stl_c $1,0($16)\n\t"
324 "beq $1,L0xchg_add\n\t"
332 static pthread_mutex_t interlocked_mutex = PTHREAD_MUTEX_INITIALIZER;
334 int interlocked_cmpxchg( int *dest, int xchg, int compare )
336 pthread_mutex_lock( &interlocked_mutex );
338 if (*dest == compare)
343 pthread_mutex_unlock( &interlocked_mutex );
347 void *interlocked_cmpxchg_ptr( void **dest, void *xchg, void *compare )
349 pthread_mutex_lock( &interlocked_mutex );
351 if (*dest == compare)
356 pthread_mutex_unlock( &interlocked_mutex );
360 __int64 interlocked_cmpxchg64( __int64 *dest, __int64 xchg, __int64 compare )
362 pthread_mutex_lock( &interlocked_mutex );
364 if (*dest == compare)
369 pthread_mutex_unlock( &interlocked_mutex );
373 int interlocked_xchg( int *dest, int val )
376 pthread_mutex_lock( &interlocked_mutex );
379 pthread_mutex_unlock( &interlocked_mutex );
383 void *interlocked_xchg_ptr( void **dest, void *val )
386 pthread_mutex_lock( &interlocked_mutex );
389 pthread_mutex_unlock( &interlocked_mutex );
393 int interlocked_xchg_add( int *dest, int incr )
396 pthread_mutex_lock( &interlocked_mutex );
399 pthread_mutex_unlock( &interlocked_mutex );