Reorganisation of the source tree
[ghc.git] / includes / SMP.h
1 /* ----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 2005
4 *
5 * Macros for THREADED_RTS support
6 *
7 * -------------------------------------------------------------------------- */
8
9 #ifndef SMP_H
10 #define SMP_H
11
12 /* THREADED_RTS is currently not compatible with the following options:
13 *
14 * PROFILING (but only 1 CPU supported)
15 * TICKY_TICKY
16 * Unregisterised builds are ok, but only 1 CPU supported.
17 */
18
19 #if defined(THREADED_RTS)
20
21 #if defined(TICKY_TICKY)
22 #error Build options incompatible with THREADED_RTS.
23 #endif
24
25 /*
26 * XCHG - the atomic exchange instruction. Used for locking closures
27 * during updates (see lockClosure() below) and the MVar primops.
28 *
29 * NB: the xchg instruction is implicitly locked, so we do not need
30 * a lock prefix here.
31 */
32 INLINE_HEADER StgWord
33 xchg(StgPtr p, StgWord w)
34 {
35 StgWord result;
36 #if i386_HOST_ARCH || x86_64_HOST_ARCH
37 result = w;
38 __asm__ __volatile__ (
39 "xchg %1,%0"
40 :"+r" (result), "+m" (*p)
41 : /* no input-only operands */
42 );
43 #elif powerpc_HOST_ARCH
44 __asm__ __volatile__ (
45 "1: lwarx %0, 0, %2\n"
46 " stwcx. %1, 0, %2\n"
47 " bne- 1b"
48 :"=r" (result)
49 :"r" (w), "r" (p)
50 );
51 #else
52 #error xchg() unimplemented on this architecture
53 #endif
54 return result;
55 }
56
57 /*
58 * CMPXCHG - the single-word atomic compare-and-exchange instruction. Used
59 * in the STM implementation.
60 */
61 INLINE_HEADER StgWord
62 cas(StgVolatilePtr p, StgWord o, StgWord n)
63 {
64 #if i386_HOST_ARCH || x86_64_HOST_ARCH
65 __asm__ __volatile__ (
66 "lock/cmpxchg %3,%1"
67 :"=a"(o), "=m" (*(volatile unsigned int *)p)
68 :"0" (o), "r" (n));
69 return o;
70 #elif powerpc_HOST_ARCH
71 StgWord result;
72 __asm__ __volatile__ (
73 "1: lwarx %0, 0, %3\n"
74 " cmpw %0, %1\n"
75 " bne 2f\n"
76 " stwcx. %2, 0, %3\n"
77 " bne- 1b\n"
78 "2:"
79 :"=r" (result)
80 :"r" (o), "r" (n), "r" (p)
81 );
82 return result;
83 #else
84 #error cas() unimplemented on this architecture
85 #endif
86 }
87
88 /*
89 * Write barrier - ensure that all preceding writes have happened
90 * before all following writes.
91 *
92 * We need to tell both the compiler AND the CPU about the barrier.
93 * This is a brute force solution; better results might be obtained by
94 * using volatile type declarations to get fine-grained ordering
95 * control in C, and optionally a memory barrier instruction on CPUs
96 * that require it (not x86 or x86_64).
97 */
98 INLINE_HEADER void
99 wb(void) {
100 #if i386_HOST_ARCH || x86_64_HOST_ARCH
101 __asm__ __volatile__ ("" : : : "memory");
102 #elif powerpc_HOST_ARCH
103 __asm__ __volatile__ ("lwsync" : : : "memory");
104 #else
105 #error memory barriers unimplemented on this architecture
106 #endif
107 }
108
109 /*
110 * Locking/unlocking closures
111 *
112 * This is used primarily in the implementation of MVars.
113 */
114 #define SPIN_COUNT 4000
115
116 INLINE_HEADER StgInfoTable *
117 lockClosure(StgClosure *p)
118 {
119 #if i386_HOST_ARCH || x86_64_HOST_ARCH || powerpc_HOST_ARCH
120 StgWord info;
121 do {
122 nat i = 0;
123 do {
124 info = xchg((P_)(void *)&p->header.info, (W_)&stg_WHITEHOLE_info);
125 if (info != (W_)&stg_WHITEHOLE_info) return (StgInfoTable *)info;
126 } while (++i < SPIN_COUNT);
127 yieldThread();
128 } while (1);
129 #else
130 ACQUIRE_SM_LOCK
131 #endif
132 }
133
134 INLINE_HEADER void
135 unlockClosure(StgClosure *p, StgInfoTable *info)
136 {
137 #if i386_HOST_ARCH || x86_64_HOST_ARCH || powerpc_HOST_ARCH
138 // This is a strictly ordered write, so we need a wb():
139 wb();
140 p->header.info = info;
141 #else
142 RELEASE_SM_LOCK;
143 #endif
144 }
145
146 #else /* !THREADED_RTS */
147
148 #define wb() /* nothing */
149
150 INLINE_HEADER StgWord
151 xchg(StgPtr p, StgWord w)
152 {
153 StgWord old = *p;
154 *p = w;
155 return old;
156 }
157
158 #endif /* !THREADED_RTS */
159
160 #endif /* SMP_H */