Fix PPC Mac OS X memory access problem in SMP.h (#1362)
[ghc.git] / includes / SMP.h
1 /* ----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 2005
4 *
5 * Macros for THREADED_RTS support
6 *
7 * -------------------------------------------------------------------------- */
8
9 #ifndef SMP_H
10 #define SMP_H
11
12 /* THREADED_RTS is currently not compatible with the following options:
13 *
14 * PROFILING (but only 1 CPU supported)
15 * TICKY_TICKY
16 * Unregisterised builds are ok, but only 1 CPU supported.
17 */
18
19 #if defined(THREADED_RTS)
20
21 #if defined(TICKY_TICKY)
22 #error Build options incompatible with THREADED_RTS.
23 #endif
24
25 /* ----------------------------------------------------------------------------
26 Atomic operations
27 ------------------------------------------------------------------------- */
28
29 /*
30 * The atomic exchange operation: xchg(p,w) exchanges the value
31 * pointed to by p with the value w, returning the old value.
32 *
33 * Used for locking closures during updates (see lockClosure() below)
34 * and the MVar primops.
35 */
36 INLINE_HEADER StgWord xchg(StgPtr p, StgWord w);
37
38 /*
39 * Compare-and-swap. Atomically does this:
40 *
41 * cas(p,o,n) {
42 * r = *p;
43 * if (r == o) { *p = n };
44 * return r;
45 * }
46 */
47 INLINE_HEADER StgWord cas(StgVolatilePtr p, StgWord o, StgWord n);
48
49 /*
50 * Prevents write operations from moving across this call in either
51 * direction.
52 */
53 INLINE_HEADER void write_barrier(void);
54
55 /* ----------------------------------------------------------------------------
56 Implementations
57 ------------------------------------------------------------------------- */
58 /*
59 * NB: the xchg instruction is implicitly locked, so we do not need
60 * a lock prefix here.
61 */
62 INLINE_HEADER StgWord
63 xchg(StgPtr p, StgWord w)
64 {
65 StgWord result;
66 #if i386_HOST_ARCH || x86_64_HOST_ARCH
67 result = w;
68 __asm__ __volatile__ (
69 "xchg %1,%0"
70 :"+r" (result), "+m" (*p)
71 : /* no input-only operands */
72 );
73 #elif powerpc_HOST_ARCH
74 __asm__ __volatile__ (
75 "1: lwarx %0, 0, %2\n"
76 " stwcx. %1, 0, %2\n"
77 " bne- 1b"
78 :"=&r" (result)
79 :"r" (w), "r" (p)
80 );
81 #elif sparc_HOST_ARCH
82 result = w;
83 __asm__ __volatile__ (
84 "swap %1,%0"
85 : "+r" (result), "+m" (*p)
86 : /* no input-only operands */
87 );
88 #elif !defined(WITHSMP)
89 result = *p;
90 *p = w;
91 #else
92 #error xchg() unimplemented on this architecture
93 #endif
94 return result;
95 }
96
97 /*
98 * CMPXCHG - the single-word atomic compare-and-exchange instruction. Used
99 * in the STM implementation.
100 */
101 INLINE_HEADER StgWord
102 cas(StgVolatilePtr p, StgWord o, StgWord n)
103 {
104 #if i386_HOST_ARCH || x86_64_HOST_ARCH
105 __asm__ __volatile__ (
106 "lock\ncmpxchg %3,%1"
107 :"=a"(o), "=m" (*(volatile unsigned int *)p)
108 :"0" (o), "r" (n));
109 return o;
110 #elif powerpc_HOST_ARCH
111 StgWord result;
112 __asm__ __volatile__ (
113 "1: lwarx %0, 0, %3\n"
114 " cmpw %0, %1\n"
115 " bne 2f\n"
116 " stwcx. %2, 0, %3\n"
117 " bne- 1b\n"
118 "2:"
119 :"=&r" (result)
120 :"r" (o), "r" (n), "r" (p)
121 :"cc", "memory"
122 );
123 return result;
124 #elif sparc_HOST_ARCH
125 __asm__ __volatile__ (
126 "cas [%1], %2, %0"
127 : "+r" (n)
128 : "r" (p), "r" (o)
129 : "memory"
130 );
131 return n;
132 #elif !defined(WITHSMP)
133 StgWord result;
134 result = *p;
135 if (result == o) {
136 *p = n;
137 }
138 return result;
139 #else
140 #error cas() unimplemented on this architecture
141 #endif
142 }
143
144 /*
145 * Write barrier - ensure that all preceding writes have happened
146 * before all following writes.
147 *
148 * We need to tell both the compiler AND the CPU about the barrier.
149 * This is a brute force solution; better results might be obtained by
150 * using volatile type declarations to get fine-grained ordering
151 * control in C, and optionally a memory barrier instruction on CPUs
152 * that require it (not x86 or x86_64).
153 */
154 INLINE_HEADER void
155 write_barrier(void) {
156 #if i386_HOST_ARCH || x86_64_HOST_ARCH
157 __asm__ __volatile__ ("" : : : "memory");
158 #elif powerpc_HOST_ARCH
159 __asm__ __volatile__ ("lwsync" : : : "memory");
160 #elif sparc_HOST_ARCH
161 /* Sparc in TSO mode does not require write/write barriers. */
162 __asm__ __volatile__ ("" : : : "memory");
163 #elif !defined(WITHSMP)
164 return;
165 #else
166 #error memory barriers unimplemented on this architecture
167 #endif
168 }
169
170 /* -----------------------------------------------------------------------------
171 * Locking/unlocking closures
172 *
173 * This is used primarily in the implementation of MVars.
174 * -------------------------------------------------------------------------- */
175
176 #define SPIN_COUNT 4000
177
178 INLINE_HEADER StgInfoTable *
179 lockClosure(StgClosure *p)
180 {
181 StgWord info;
182 do {
183 nat i = 0;
184 do {
185 info = xchg((P_)(void *)&p->header.info, (W_)&stg_WHITEHOLE_info);
186 if (info != (W_)&stg_WHITEHOLE_info) return (StgInfoTable *)info;
187 } while (++i < SPIN_COUNT);
188 yieldThread();
189 } while (1);
190 }
191
192 INLINE_HEADER void
193 unlockClosure(StgClosure *p, StgInfoTable *info)
194 {
195 // This is a strictly ordered write, so we need a write_barrier():
196 write_barrier();
197 p->header.info = info;
198 }
199
200 /* -----------------------------------------------------------------------------
201 * Spin locks
202 *
203 * These are simple spin-only locks as opposed to Mutexes which
204 * probably spin for a while before blocking in the kernel. We use
205 * these when we are sure that all our threads are actively running on
206 * a CPU, eg. in the GC.
207 *
208 * TODO: measure whether we really need these, or whether Mutexes
209 * would do (and be a bit safer if a CPU becomes loaded).
210 * -------------------------------------------------------------------------- */
211
212 #if defined(DEBUG)
213 typedef struct StgSync_
214 {
215 StgWord32 lock;
216 StgWord64 spin; // DEBUG version counts how much it spins
217 } StgSync;
218 #else
219 typedef StgWord StgSync;
220 #endif
221
222 typedef lnat StgSyncCount;
223
224
225 #if defined(DEBUG)
226
227 // Debug versions of spin locks maintain a spin count
228
229 // How to use:
230 // To use the debug veriosn of the spin locks, a debug version of the program
231 // can be run under a deugger with a break point on stat_exit. At exit time
232 // of the program one can examine the state the spin count counts of various
233 // spin locks to check for contention.
234
235 // acquire spin lock
236 INLINE_HEADER void ACQUIRE_SPIN_LOCK(StgSync * p)
237 {
238 StgWord32 r = 0;
239 do {
240 p->spin++;
241 r = cas((StgVolatilePtr)&(p->lock), 1, 0);
242 } while(r == 0);
243 p->spin--;
244 }
245
246 // release spin lock
247 INLINE_HEADER void RELEASE_SPIN_LOCK(StgSync * p)
248 {
249 write_barrier();
250 p->lock = 1;
251 }
252
253 // initialise spin lock
254 INLINE_HEADER void initSpinLock(StgSync * p)
255 {
256 write_barrier();
257 p->lock = 1;
258 p->spin = 0;
259 }
260
261 #else
262
263 // acquire spin lock
264 INLINE_HEADER void ACQUIRE_SPIN_LOCK(StgSync * p)
265 {
266 StgWord32 r = 0;
267 do {
268 r = cas((StgVolatilePtr)p, 1, 0);
269 } while(r == 0);
270 }
271
272 // release spin lock
273 INLINE_HEADER void RELEASE_SPIN_LOCK(StgSync * p)
274 {
275 write_barrier();
276 (*p) = 1;
277 }
278
279 // init spin lock
280 INLINE_HEADER void initSpinLock(StgSync * p)
281 {
282 write_barrier();
283 (*p) = 1;
284 }
285
286 #endif /* DEBUG */
287
288 /* ---------------------------------------------------------------------- */
289 #else /* !THREADED_RTS */
290
291 #define write_barrier() /* nothing */
292
293 INLINE_HEADER StgWord
294 xchg(StgPtr p, StgWord w)
295 {
296 StgWord old = *p;
297 *p = w;
298 return old;
299 }
300
301 INLINE_HEADER StgInfoTable *
302 lockClosure(StgClosure *p)
303 { return (StgInfoTable *)p->header.info; }
304
305 INLINE_HEADER void
306 unlockClosure(StgClosure *p STG_UNUSED, StgInfoTable *info STG_UNUSED)
307 { /* nothing */ }
308
309 // Using macros here means we don't have to ensure the argument is in scope
310 #define ACQUIRE_SPIN_LOCK(p) /* nothing */
311 #define RELEASE_SPIN_LOCK(p) /* nothing */
312
313 INLINE_HEADER void initSpinLock(void * p STG_UNUSED)
314 { /* nothing */ }
315
316 #endif /* !THREADED_RTS */
317
318 // Handy specialised versions of lockClosure()/unlockClosure()
319 INLINE_HEADER void lockTSO(StgTSO *tso)
320 { lockClosure((StgClosure *)tso); }
321
322 INLINE_HEADER void unlockTSO(StgTSO *tso)
323 { unlockClosure((StgClosure*)tso, (StgInfoTable*)&stg_TSO_info); }
324
325 #endif /* SMP_H */