move the "meat" into the wiki, this file just contains pointers now
[ghc.git] / includes / SMP.h
1 /* ----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 2005
4 *
5 * Macros for THREADED_RTS support
6 *
7 * -------------------------------------------------------------------------- */
8
9 #ifndef SMP_H
10 #define SMP_H
11
12 /* THREADED_RTS is currently not compatible with the following options:
13 *
14 * PROFILING (but only 1 CPU supported)
15 * TICKY_TICKY
16 * Unregisterised builds are ok, but only 1 CPU supported.
17 */
18
19 #if defined(THREADED_RTS)
20
21 #if defined(TICKY_TICKY)
22 #error Build options incompatible with THREADED_RTS.
23 #endif
24
25 /*
26 * XCHG - the atomic exchange instruction. Used for locking closures
27 * during updates (see lockClosure() below) and the MVar primops.
28 *
29 * NB: the xchg instruction is implicitly locked, so we do not need
30 * a lock prefix here.
31 */
32 INLINE_HEADER StgWord
33 xchg(StgPtr p, StgWord w)
34 {
35 StgWord result;
36 #if i386_HOST_ARCH || x86_64_HOST_ARCH
37 result = w;
38 __asm__ __volatile__ (
39 "xchg %1,%0"
40 :"+r" (result), "+m" (*p)
41 : /* no input-only operands */
42 );
43 #elif powerpc_HOST_ARCH
44 __asm__ __volatile__ (
45 "1: lwarx %0, 0, %2\n"
46 " stwcx. %1, 0, %2\n"
47 " bne- 1b"
48 :"=r" (result)
49 :"r" (w), "r" (p)
50 );
51 #elif sparc_HOST_ARCH
52 result = w;
53 __asm__ __volatile__ (
54 "swap %1,%0"
55 : "+r" (result), "+m" (*p)
56 : /* no input-only operands */
57 );
58 #elif !defined(WITHSMP)
59 result = *p;
60 *p = w;
61 #else
62 #error xchg() unimplemented on this architecture
63 #endif
64 return result;
65 }
66
67 /*
68 * CMPXCHG - the single-word atomic compare-and-exchange instruction. Used
69 * in the STM implementation.
70 */
71 INLINE_HEADER StgWord
72 cas(StgVolatilePtr p, StgWord o, StgWord n)
73 {
74 #if i386_HOST_ARCH || x86_64_HOST_ARCH
75 __asm__ __volatile__ (
76 "lock/cmpxchg %3,%1"
77 :"=a"(o), "=m" (*(volatile unsigned int *)p)
78 :"0" (o), "r" (n));
79 return o;
80 #elif powerpc_HOST_ARCH
81 StgWord result;
82 __asm__ __volatile__ (
83 "1: lwarx %0, 0, %3\n"
84 " cmpw %0, %1\n"
85 " bne 2f\n"
86 " stwcx. %2, 0, %3\n"
87 " bne- 1b\n"
88 "2:"
89 :"=&r" (result)
90 :"r" (o), "r" (n), "r" (p)
91 :"cc", "memory"
92 );
93 return result;
94 #elif sparc_HOST_ARCH
95 __asm__ __volatile__ (
96 "cas [%1], %2, %0"
97 : "+r" (n)
98 : "r" (p), "r" (o)
99 : "memory"
100 );
101 return n;
102 #elif !defined(WITHSMP)
103 StgWord result;
104 result = *p;
105 if (result == o) {
106 *p = n;
107 }
108 return result;
109 #else
110 #error cas() unimplemented on this architecture
111 #endif
112 }
113
114 /*
115 * Write barrier - ensure that all preceding writes have happened
116 * before all following writes.
117 *
118 * We need to tell both the compiler AND the CPU about the barrier.
119 * This is a brute force solution; better results might be obtained by
120 * using volatile type declarations to get fine-grained ordering
121 * control in C, and optionally a memory barrier instruction on CPUs
122 * that require it (not x86 or x86_64).
123 */
124 INLINE_HEADER void
125 write_barrier(void) {
126 #if i386_HOST_ARCH || x86_64_HOST_ARCH
127 __asm__ __volatile__ ("" : : : "memory");
128 #elif powerpc_HOST_ARCH
129 __asm__ __volatile__ ("lwsync" : : : "memory");
130 #elif sparc_HOST_ARCH
131 /* Sparc in TSO mode does not require write/write barriers. */
132 __asm__ __volatile__ ("" : : : "memory");
133 #elif !defined(WITHSMP)
134 return;
135 #else
136 #error memory barriers unimplemented on this architecture
137 #endif
138 }
139
140 /*
141 * Locking/unlocking closures
142 *
143 * This is used primarily in the implementation of MVars.
144 */
145 #define SPIN_COUNT 4000
146
147 INLINE_HEADER StgInfoTable *
148 lockClosure(StgClosure *p)
149 {
150 StgWord info;
151 do {
152 nat i = 0;
153 do {
154 info = xchg((P_)(void *)&p->header.info, (W_)&stg_WHITEHOLE_info);
155 if (info != (W_)&stg_WHITEHOLE_info) return (StgInfoTable *)info;
156 } while (++i < SPIN_COUNT);
157 yieldThread();
158 } while (1);
159 }
160
161 INLINE_HEADER void
162 unlockClosure(StgClosure *p, StgInfoTable *info)
163 {
164 // This is a strictly ordered write, so we need a wb():
165 write_barrier();
166 p->header.info = info;
167 }
168
169 #else /* !THREADED_RTS */
170
171 #define write_barrier() /* nothing */
172
173 INLINE_HEADER StgWord
174 xchg(StgPtr p, StgWord w)
175 {
176 StgWord old = *p;
177 *p = w;
178 return old;
179 }
180
181 INLINE_HEADER StgInfoTable *
182 lockClosure(StgClosure *p)
183 { return (StgInfoTable *)p->header.info; }
184
185 INLINE_HEADER void
186 unlockClosure(StgClosure *p STG_UNUSED, StgInfoTable *info STG_UNUSED)
187 { /* nothing */ }
188
189 #endif /* !THREADED_RTS */
190
191 // Handy specialised versions of lockClosure()/unlockClosure()
192 INLINE_HEADER void lockTSO(StgTSO *tso)
193 { lockClosure((StgClosure *)tso); }
194
195 INLINE_HEADER void unlockTSO(StgTSO *tso)
196 { unlockClosure((StgClosure*)tso, (StgInfoTable*)&stg_TSO_info); }
197
198 #endif /* SMP_H */