2 * interlocked functions
4 * Copyright 1996 Alexandre Julliard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #include "wine/port.h"
28 __ASM_GLOBAL_FUNC(interlocked_cmpxchg,
29 "movl 12(%esp),%eax\n\t"
30 "movl 8(%esp),%ecx\n\t"
31 "movl 4(%esp),%edx\n\t"
32 "lock; cmpxchgl %ecx,(%edx)\n\t"
34 __ASM_GLOBAL_FUNC(interlocked_cmpxchg_ptr,
35 "movl 12(%esp),%eax\n\t"
36 "movl 8(%esp),%ecx\n\t"
37 "movl 4(%esp),%edx\n\t"
38 "lock; cmpxchgl %ecx,(%edx)\n\t"
40 __ASM_GLOBAL_FUNC(interlocked_xchg,
41 "movl 8(%esp),%eax\n\t"
42 "movl 4(%esp),%edx\n\t"
43 "lock; xchgl %eax,(%edx)\n\t"
45 __ASM_GLOBAL_FUNC(interlocked_xchg_ptr,
46 "movl 8(%esp),%eax\n\t"
47 "movl 4(%esp),%edx\n\t"
48 "lock; xchgl %eax,(%edx)\n\t"
50 __ASM_GLOBAL_FUNC(interlocked_xchg_add,
51 "movl 8(%esp),%eax\n\t"
52 "movl 4(%esp),%edx\n\t"
53 "lock; xaddl %eax,(%edx)\n\t"
56 #elif defined(_MSC_VER)
58 __declspec(naked) long interlocked_cmpxchg( long *dest, long xchg, long compare )
60 __asm mov eax, 12[esp];
61 __asm mov ecx, 8[esp];
62 __asm mov edx, 4[esp];
63 __asm lock cmpxchg [edx], ecx;
67 __declspec(naked) void *interlocked_cmpxchg_ptr( void **dest, void *xchg, void *compare )
69 __asm mov eax, 12[esp];
70 __asm mov ecx, 8[esp];
71 __asm mov edx, 4[esp];
72 __asm lock cmpxchg [edx], ecx;
76 __declspec(naked) long interlocked_xchg( long *dest, long val )
78 __asm mov eax, 8[esp];
79 __asm mov edx, 4[esp];
80 __asm lock xchg [edx], eax;
84 __declspec(naked) void *interlocked_xchg_ptr( void **dest, void *val )
86 __asm mov eax, 8[esp];
87 __asm mov edx, 4[esp];
88 __asm lock xchg [edx], eax;
92 __declspec(naked) long interlocked_xchg_add( long *dest, long incr )
94 __asm mov eax, 8[esp];
95 __asm mov edx, 4[esp];
96 __asm lock xadd [edx], eax;
101 # error You must implement the interlocked* functions for your compiler
104 #elif defined(__powerpc__)
105 void* interlocked_cmpxchg_ptr( void **dest, void* xchg, void* compare)
109 __asm__ __volatile__(
116 : "=&r"(ret), "=&r"(scratch)
117 : "r"(dest), "r"(xchg), "r"(compare)
122 long interlocked_cmpxchg( long *dest, long xchg, long compare)
126 __asm__ __volatile__(
133 : "=&r"(ret), "=&r"(scratch)
134 : "r"(dest), "r"(xchg), "r"(compare)
135 : "cr0","memory","r0");
139 long interlocked_xchg_add( long *dest, long incr )
143 __asm__ __volatile__(
144 "0: lwarx %0, %3, %1\n"
146 " stwcx. %0, %3, %1\n"
149 : "r"(dest), "r"(incr), "r"(zero)
150 : "cr0", "memory", "r0"
155 long interlocked_xchg( long* dest, long val )
158 __asm__ __volatile__(
163 : "r"(dest), "r"(val)
164 : "cr0","memory","r0");
168 void* interlocked_xchg_ptr( void** dest, void* val )
171 __asm__ __volatile__(
176 : "r"(dest), "r"(val)
177 : "cr0","memory","r0");
181 #elif defined(__sparc__) && defined(__sun__)
184 * As the earlier Sparc processors lack necessary atomic instructions,
185 * I'm simply falling back to the library-provided _lwp_mutex routines
186 * to ensure mutual exclusion in a way appropriate for the current
189 * FIXME: If we have the compare-and-swap instruction (Sparc v9 and above)
190 * we could use this to speed up the Interlocked operations ...
193 static lwp_mutex_t interlocked_mutex = DEFAULTMUTEX;
195 long interlocked_cmpxchg( long *dest, long xchg, long compare )
197 _lwp_mutex_lock( &interlocked_mutex );
198 if (*dest == compare) *dest = xchg;
199 else compare = *dest;
200 _lwp_mutex_unlock( &interlocked_mutex );
204 void *interlocked_cmpxchg_ptr( void **dest, void *xchg, void *compare )
206 _lwp_mutex_lock( &interlocked_mutex );
207 if (*dest == compare) *dest = xchg;
208 else compare = *dest;
209 _lwp_mutex_unlock( &interlocked_mutex );
213 long interlocked_xchg( long *dest, long val )
216 _lwp_mutex_lock( &interlocked_mutex );
219 _lwp_mutex_unlock( &interlocked_mutex );
223 void *interlocked_xchg_ptr( void **dest, void *val )
226 _lwp_mutex_lock( &interlocked_mutex );
229 _lwp_mutex_unlock( &interlocked_mutex );
233 long interlocked_xchg_add( long *dest, long incr )
236 _lwp_mutex_lock( &interlocked_mutex );
239 _lwp_mutex_unlock( &interlocked_mutex );
243 # error You must implement the interlocked* functions for your CPU