Reverse the order for deleting the items in resetcontent to correctly
[wine] / libs / port / interlocked.c
1 /*
2  * interlocked functions
3  *
4  * Copyright 1996 Alexandre Julliard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, write to the Free Software
18  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19  */
20
21 #include "config.h"
22 #include "wine/port.h"
23
24 #ifdef __i386__
25
26 #ifdef __GNUC__
27
28 __ASM_GLOBAL_FUNC(interlocked_cmpxchg,
29                   "movl 12(%esp),%eax\n\t"
30                   "movl 8(%esp),%ecx\n\t"
31                   "movl 4(%esp),%edx\n\t"
32                   "lock; cmpxchgl %ecx,(%edx)\n\t"
33                   "ret");
34 __ASM_GLOBAL_FUNC(interlocked_cmpxchg_ptr,
35                   "movl 12(%esp),%eax\n\t"
36                   "movl 8(%esp),%ecx\n\t"
37                   "movl 4(%esp),%edx\n\t"
38                   "lock; cmpxchgl %ecx,(%edx)\n\t"
39                   "ret");
40 __ASM_GLOBAL_FUNC(interlocked_xchg,
41                   "movl 8(%esp),%eax\n\t"
42                   "movl 4(%esp),%edx\n\t"
43                   "lock; xchgl %eax,(%edx)\n\t"
44                   "ret");
45 __ASM_GLOBAL_FUNC(interlocked_xchg_ptr,
46                   "movl 8(%esp),%eax\n\t"
47                   "movl 4(%esp),%edx\n\t"
48                   "lock; xchgl %eax,(%edx)\n\t"
49                   "ret");
50 __ASM_GLOBAL_FUNC(interlocked_xchg_add,
51                   "movl 8(%esp),%eax\n\t"
52                   "movl 4(%esp),%edx\n\t"
53                   "lock; xaddl %eax,(%edx)\n\t"
54                   "ret");
55
56 #elif defined(_MSC_VER)
57
58 __declspec(naked) long interlocked_cmpxchg( long *dest, long xchg, long compare )
59 {
60     __asm mov eax, 12[esp];
61     __asm mov ecx, 8[esp];
62     __asm mov edx, 4[esp];
63     __asm lock cmpxchg [edx], ecx;
64     __asm ret;
65 }
66
67 __declspec(naked) void *interlocked_cmpxchg_ptr( void **dest, void *xchg, void *compare )
68 {
69     __asm mov eax, 12[esp];
70     __asm mov ecx, 8[esp];
71     __asm mov edx, 4[esp];
72     __asm lock cmpxchg [edx], ecx;
73     __asm ret;
74 }
75
76 __declspec(naked) long interlocked_xchg( long *dest, long val )
77 {
78     __asm mov eax, 8[esp];
79     __asm mov edx, 4[esp];
80     __asm lock xchg [edx], eax;
81     __asm ret;
82 }
83
84 __declspec(naked) void *interlocked_xchg_ptr( void **dest, void *val )
85 {
86     __asm mov eax, 8[esp];
87     __asm mov edx, 4[esp];
88     __asm lock xchg [edx], eax;
89     __asm ret;
90 }
91
92 __declspec(naked) long interlocked_xchg_add( long *dest, long incr )
93 {
94     __asm mov eax, 8[esp];
95     __asm mov edx, 4[esp];
96     __asm lock xadd [edx], eax;
97     __asm ret;
98 }
99
100 #else
101 # error You must implement the interlocked* functions for your compiler
102 #endif
103
104 #elif defined(__powerpc__)
105 void* interlocked_cmpxchg_ptr( void **dest, void* xchg, void* compare)
106 {
107     long ret = 0;
108     long scratch;
109     __asm__ __volatile__(
110         "0:    lwarx %0,0,%2\n"
111         "      xor. %1,%4,%0\n"
112         "      bne 1f\n"
113         "      stwcx. %3,0,%2\n"
114         "      bne- 0b\n"
115         "      isync\n"
116         "1:    "
117         : "=&r"(ret), "=&r"(scratch)
118         : "r"(dest), "r"(xchg), "r"(compare)
119         : "cr0","memory");
120     return (void*)ret;
121 }
122
123 long interlocked_cmpxchg( long *dest, long xchg, long compare)
124 {
125     long ret = 0;
126     long scratch;
127     __asm__ __volatile__(
128         "0:    lwarx %0,0,%2\n"
129         "      xor. %1,%4,%0\n"
130         "      bne 1f\n"
131         "      stwcx. %3,0,%2\n"
132         "      bne- 0b\n"
133         "      isync\n"
134         "1:    "
135         : "=&r"(ret), "=&r"(scratch)
136         : "r"(dest), "r"(xchg), "r"(compare)
137         : "cr0","memory","r0");
138     return ret;
139 }
140
141 long interlocked_xchg_add( long *dest, long incr )
142 {
143     long ret = 0;
144     long zero = 0;
145     __asm__ __volatile__(
146         "0:    lwarx %0, %3, %1\n"
147         "      add %0, %2, %0\n"
148         "      stwcx. %0, %3, %1\n"
149         "      bne- 0b\n"
150         "      isync\n"
151         : "=&r" (ret)
152         : "r"(dest), "r"(incr), "r"(zero)
153         : "cr0", "memory", "r0"
154     );
155     return ret-incr;
156 }
157
158 long interlocked_xchg( long* dest, long val )
159 {
160     long ret = 0;
161     __asm__ __volatile__(
162         "0:    lwarx %0,0,%1\n"
163         "      stwcx. %2,0,%1\n"
164         "      bne- 0b\n"
165         "      isync\n"
166         : "=&r"(ret)
167         : "r"(dest), "r"(val)
168         : "cr0","memory","r0");
169     return ret;
170 }
171
172 void* interlocked_xchg_ptr( void** dest, void* val )
173 {
174     void *ret = NULL;
175     __asm__ __volatile__(
176         "0:    lwarx %0,0,%1\n"
177         "      stwcx. %2,0,%1\n"
178         "      bne- 0b \n"
179         "      isync\n"
180         : "=&r"(ret)
181         : "r"(dest), "r"(val)
182         : "cr0","memory","r0");
183     return ret;
184 }
185
186 #elif defined(__sparc__) && defined(__sun__)
187
188 /*
189  * As the earlier Sparc processors lack necessary atomic instructions,
190  * I'm simply falling back to the library-provided _lwp_mutex routines
191  * to ensure mutual exclusion in a way appropriate for the current
192  * architecture.
193  *
194  * FIXME:  If we have the compare-and-swap instruction (Sparc v9 and above)
195  *         we could use this to speed up the Interlocked operations ...
196  */
197 #include <synch.h>
198 static lwp_mutex_t interlocked_mutex = DEFAULTMUTEX;
199
200 long interlocked_cmpxchg( long *dest, long xchg, long compare )
201 {
202     _lwp_mutex_lock( &interlocked_mutex );
203     if (*dest == compare) *dest = xchg;
204     else compare = *dest;
205     _lwp_mutex_unlock( &interlocked_mutex );
206     return compare;
207 }
208
209 void *interlocked_cmpxchg_ptr( void **dest, void *xchg, void *compare )
210 {
211     _lwp_mutex_lock( &interlocked_mutex );
212     if (*dest == compare) *dest = xchg;
213     else compare = *dest;
214     _lwp_mutex_unlock( &interlocked_mutex );
215     return compare;
216 }
217
218 long interlocked_xchg( long *dest, long val )
219 {
220     long retv;
221     _lwp_mutex_lock( &interlocked_mutex );
222     retv = *dest;
223     *dest = val;
224     _lwp_mutex_unlock( &interlocked_mutex );
225     return retv;
226 }
227
228 void *interlocked_xchg_ptr( void **dest, void *val )
229 {
230     long retv;
231     _lwp_mutex_lock( &interlocked_mutex );
232     retv = *dest;
233     *dest = val;
234     _lwp_mutex_unlock( &interlocked_mutex );
235     return retv;
236 }
237
238 long interlocked_xchg_add( long *dest, long incr )
239 {
240     long retv;
241     _lwp_mutex_lock( &interlocked_mutex );
242     retv = *dest;
243     *dest += incr;
244     _lwp_mutex_unlock( &interlocked_mutex );
245     return retv;
246 }
247
248 #elif defined(__ALPHA__) && defined(__GNUC__)
249
250 __ASM_GLOBAL_FUNC(interlocked_cmpxchg,
251                   "L0cmpxchg:\n\t"
252                   "ldl_l $0,0($16)\n\t"
253                   "cmpeq $0,$18,$1\n\t"
254                   "beq   $1,L1cmpxchg\n\t"
255                   "mov   $17,$0\n\t"
256                   "stl_c $0,0($16)\n\t"
257                   "beq   $0,L0cmpxchg\n\t"
258                   "mov   $18,$0\n"
259                   "L1cmpxchg:\n\t"
260                   "mb");
261
262 __ASM_GLOBAL_FUNC(interlocked_cmpxchg_ptr,
263                   "L0cmpxchg_ptr:\n\t"
264                   "ldq_l $0,0($16)\n\t"
265                   "cmpeq $0,$18,$1\n\t"
266                   "beq   $1,L1cmpxchg_ptr\n\t"
267                   "mov   $17,$0\n\t"
268                   "stq_c $0,0($16)\n\t"
269                   "beq   $0,L0cmpxchg_ptr\n\t"
270                   "mov   $18,$0\n"
271                   "L1cmpxchg_ptr:\n\t"
272                   "mb");
273
274 __ASM_GLOBAL_FUNC(interlocked_xchg,
275                   "L0xchg:\n\t"
276                   "ldl_l $0,0($16)\n\t"
277                   "mov   $17,$1\n\t"
278                   "stl_c $1,0($16)\n\t"
279                   "beq   $1,L0xchg\n\t"
280                   "mb");
281
282 __ASM_GLOBAL_FUNC(interlocked_xchg_ptr,
283                   "L0xchg_ptr:\n\t"
284                   "ldq_l $0,0($16)\n\t"
285                   "mov   $17,$1\n\t"
286                   "stq_c $1,0($16)\n\t"
287                   "beq   $1,L0xchg_ptr\n\t"
288                   "mb");
289
290 __ASM_GLOBAL_FUNC(interlocked_xchg_add,
291                   "L0xchg_add:\n\t"
292                   "ldl_l $0,0($16)\n\t"
293                   "addl  $0,$17,$1\n\t"
294                   "stl_c $1,0($16)\n\t"
295                   "beq   $1,L0xchg_add\n\t"
296                   "mb");
297
298 #else
299 # error You must implement the interlocked* functions for your CPU
300 #endif