Fixed bugs in safe arrays.
[wine] / scheduler / critsection.c
1 /*
2  * Win32 critical sections
3  *
4  * Copyright 1998 Alexandre Julliard
5  */
6
7 #include <assert.h>
8 #include <errno.h>
9 #include <stdio.h>
10 #include <sys/types.h>
11
12 #include "config.h"
13 #include "winerror.h"
14 #include "winbase.h"
15 #include "ntddk.h"
16 #include "debugtools.h"
17 #include "thread.h"
18
19 DEFAULT_DEBUG_CHANNEL(win32);
20 DECLARE_DEBUG_CHANNEL(relay);
21
22 /***********************************************************************
23  *           InitializeCriticalSection   (KERNEL32.@)
24  */
25 void WINAPI InitializeCriticalSection( CRITICAL_SECTION *crit )
26 {
27     NTSTATUS ret = RtlInitializeCriticalSection( crit );
28     if (ret) RtlRaiseStatus( ret );
29 }
30
31 /***********************************************************************
32  *           InitializeCriticalSectionAndSpinCount   (KERNEL32.@)
33  */
34 BOOL WINAPI InitializeCriticalSectionAndSpinCount( CRITICAL_SECTION *crit, DWORD spincount )
35 {
36     NTSTATUS ret = RtlInitializeCriticalSectionAndSpinCount( crit, spincount );
37     if (ret) RtlRaiseStatus( ret );
38     return !ret;
39 }
40
41 /***********************************************************************
42  *           SetCriticalSectionSpinCount   (KERNEL32.@)
43  * This function is available on NT4SP3 or later, but not Win98
44  * It is SMP related
45  */
46 DWORD WINAPI SetCriticalSectionSpinCount( CRITICAL_SECTION *crit, DWORD spincount )
47 {
48     ULONG_PTR oldspincount = crit->SpinCount;
49     if(spincount) FIXME("critsection=%p: spincount=%ld not supported\n", crit, spincount);
50     crit->SpinCount = spincount;
51     return oldspincount;
52 }
53
54 /***********************************************************************
55  *           MakeCriticalSectionGlobal   (KERNEL32.@)
56  */
57 void WINAPI MakeCriticalSectionGlobal( CRITICAL_SECTION *crit )
58 {
59     /* let's assume that only one thread at a time will try to do this */
60     HANDLE sem = crit->LockSemaphore;
61     if (!sem) NtCreateSemaphore( &sem, SEMAPHORE_ALL_ACCESS, NULL, 0, 1 );
62     crit->LockSemaphore = ConvertToGlobalHandle( sem );
63 }
64
65
66 /***********************************************************************
67  *           ReinitializeCriticalSection   (KERNEL32.@)
68  */
69 void WINAPI ReinitializeCriticalSection( CRITICAL_SECTION *crit )
70 {
71     if ( !crit->LockSemaphore )
72         RtlInitializeCriticalSection( crit );
73 }
74
75
76 /***********************************************************************
77  *           UninitializeCriticalSection   (KERNEL32.@)
78  */
79 void WINAPI UninitializeCriticalSection( CRITICAL_SECTION *crit )
80 {
81     RtlDeleteCriticalSection( crit );
82 }
83
84 #ifdef __i386__
85
86 /***********************************************************************
87  *              InterlockedCompareExchange (KERNEL32.@)
88  */
89 /* LONG WINAPI InterlockedCompareExchange( PLONG dest, LONG xchg, LONG compare ); */
90 __ASM_GLOBAL_FUNC(InterlockedCompareExchange,
91                   "movl 12(%esp),%eax\n\t"
92                   "movl 8(%esp),%ecx\n\t"
93                   "movl 4(%esp),%edx\n\t"
94                   "lock; cmpxchgl %ecx,(%edx)\n\t"
95                   "ret $12");
96
97 /***********************************************************************
98  *              InterlockedExchange (KERNEL32.@)
99  */
100 /* LONG WINAPI InterlockedExchange( PLONG dest, LONG val ); */
101 __ASM_GLOBAL_FUNC(InterlockedExchange,
102                   "movl 8(%esp),%eax\n\t"
103                   "movl 4(%esp),%edx\n\t"
104                   "lock; xchgl %eax,(%edx)\n\t"
105                   "ret $8");
106
107 /***********************************************************************
108  *              InterlockedExchangeAdd (KERNEL32.@)
109  */
110 /* LONG WINAPI InterlockedExchangeAdd( PLONG dest, LONG incr ); */
111 __ASM_GLOBAL_FUNC(InterlockedExchangeAdd,
112                   "movl 8(%esp),%eax\n\t"
113                   "movl 4(%esp),%edx\n\t"
114                   "lock; xaddl %eax,(%edx)\n\t"
115                   "ret $8");
116
117 /***********************************************************************
118  *              InterlockedIncrement (KERNEL32.@)
119  */
120 /* LONG WINAPI InterlockedIncrement( PLONG dest ); */
121 __ASM_GLOBAL_FUNC(InterlockedIncrement,
122                   "movl 4(%esp),%edx\n\t"
123                   "movl $1,%eax\n\t"
124                   "lock; xaddl %eax,(%edx)\n\t"
125                   "incl %eax\n\t"
126                   "ret $4");
127
128 /***********************************************************************
129  *              InterlockedDecrement (KERNEL32.@)
130  */
131 __ASM_GLOBAL_FUNC(InterlockedDecrement,
132                   "movl 4(%esp),%edx\n\t"
133                   "movl $-1,%eax\n\t"
134                   "lock; xaddl %eax,(%edx)\n\t"
135                   "decl %eax\n\t"
136                   "ret $4");
137
138 #elif defined(__sparc__) && defined(__sun__)
139
140 /*
141  * As the earlier Sparc processors lack necessary atomic instructions,
142  * I'm simply falling back to the library-provided _lwp_mutex routines
143  * to ensure mutual exclusion in a way appropriate for the current 
144  * architecture.  
145  *
146  * FIXME:  If we have the compare-and-swap instruction (Sparc v9 and above)
147  *         we could use this to speed up the Interlocked operations ...
148  */
149
150 #include <synch.h>
151 static lwp_mutex_t interlocked_mutex = DEFAULTMUTEX;
152
153 /***********************************************************************
154  *              InterlockedCompareExchange (KERNEL32.@)
155  */
156 LONG WINAPI InterlockedCompareExchange( PLONG dest, LONG xchg, LONG compare )
157 {
158     _lwp_mutex_lock( &interlocked_mutex );
159
160     if ( *dest == compare )
161         *dest = xchg;
162     else
163         compare = *dest;
164     
165     _lwp_mutex_unlock( &interlocked_mutex );
166     return compare;
167 }
168
169 /***********************************************************************
170  *              InterlockedExchange (KERNEL32.@)
171  */
172 LONG WINAPI InterlockedExchange( PLONG dest, LONG val )
173 {
174     LONG retv;
175     _lwp_mutex_lock( &interlocked_mutex );
176
177     retv = *dest;
178     *dest = val;
179
180     _lwp_mutex_unlock( &interlocked_mutex );
181     return retv;
182 }
183
184 /***********************************************************************
185  *              InterlockedExchangeAdd (KERNEL32.@)
186  */
187 LONG WINAPI InterlockedExchangeAdd( PLONG dest, LONG incr )
188 {
189     LONG retv;
190     _lwp_mutex_lock( &interlocked_mutex );
191
192     retv = *dest;
193     *dest += incr;
194
195     _lwp_mutex_unlock( &interlocked_mutex );
196     return retv;
197 }
198
199 /***********************************************************************
200  *              InterlockedIncrement (KERNEL32.@)
201  */
202 LONG WINAPI InterlockedIncrement( PLONG dest )
203 {
204     LONG retv;
205     _lwp_mutex_lock( &interlocked_mutex );
206
207     retv = ++*dest;
208
209     _lwp_mutex_unlock( &interlocked_mutex );
210     return retv;
211 }
212
213 /***********************************************************************
214  *              InterlockedDecrement (KERNEL32.@)
215  */
216 LONG WINAPI InterlockedDecrement( PLONG dest )
217 {
218     LONG retv;
219     _lwp_mutex_lock( &interlocked_mutex );
220
221     retv = --*dest;
222
223     _lwp_mutex_unlock( &interlocked_mutex );
224     return retv;
225 }
226
227 #else
228 #error You must implement the Interlocked* functions for your CPU
229 #endif