Fix up inlines for gcc 4.3
[ghc.git] / includes / SMP.h
1 /* ----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 2005-2008
4 *
5 * Macros for multi-CPU support
6 *
7 * -------------------------------------------------------------------------- */
8
9 #ifndef SMP_H
10 #define SMP_H
11
12 /* THREADED_RTS is currently not compatible with the following options:
13 *
14 * PROFILING (but only 1 CPU supported)
15 * TICKY_TICKY
16 * Unregisterised builds are ok, but only 1 CPU supported.
17 */
18
19 #ifdef CMINUSMINUS
20
21 #define unlockClosure(ptr,info) \
22 prim %write_barrier() []; \
23 StgHeader_info(ptr) = info;
24
25 #else
26
27 #if defined(THREADED_RTS)
28
29 #if defined(TICKY_TICKY)
30 #error Build options incompatible with THREADED_RTS.
31 #endif
32
33 /* ----------------------------------------------------------------------------
34 Atomic operations
35 ------------------------------------------------------------------------- */
36
37 /*
38 * The atomic exchange operation: xchg(p,w) exchanges the value
39 * pointed to by p with the value w, returning the old value.
40 *
41 * Used for locking closures during updates (see lockClosure() below)
42 * and the MVar primops.
43 */
44 EXTERN_INLINE StgWord xchg(StgPtr p, StgWord w);
45
46 /*
47 * Compare-and-swap. Atomically does this:
48 *
49 * cas(p,o,n) {
50 * r = *p;
51 * if (r == o) { *p = n };
52 * return r;
53 * }
54 */
55 EXTERN_INLINE StgWord cas(StgVolatilePtr p, StgWord o, StgWord n);
56
57 /*
58 * Prevents write operations from moving across this call in either
59 * direction.
60 */
61 EXTERN_INLINE void write_barrier(void);
62
63 /* ----------------------------------------------------------------------------
64 Implementations
65 ------------------------------------------------------------------------- */
66 /*
67 * NB: the xchg instruction is implicitly locked, so we do not need
68 * a lock prefix here.
69 */
70 EXTERN_INLINE StgWord
71 xchg(StgPtr p, StgWord w)
72 {
73 StgWord result;
74 #if i386_HOST_ARCH || x86_64_HOST_ARCH
75 result = w;
76 __asm__ __volatile__ (
77 "xchg %1,%0"
78 :"+r" (result), "+m" (*p)
79 : /* no input-only operands */
80 );
81 #elif powerpc_HOST_ARCH
82 __asm__ __volatile__ (
83 "1: lwarx %0, 0, %2\n"
84 " stwcx. %1, 0, %2\n"
85 " bne- 1b"
86 :"=&r" (result)
87 :"r" (w), "r" (p)
88 );
89 #elif sparc_HOST_ARCH
90 result = w;
91 __asm__ __volatile__ (
92 "swap %1,%0"
93 : "+r" (result), "+m" (*p)
94 : /* no input-only operands */
95 );
96 #elif !defined(WITHSMP)
97 result = *p;
98 *p = w;
99 #else
100 #error xchg() unimplemented on this architecture
101 #endif
102 return result;
103 }
104
105 /*
106 * CMPXCHG - the single-word atomic compare-and-exchange instruction. Used
107 * in the STM implementation.
108 */
109 EXTERN_INLINE StgWord
110 cas(StgVolatilePtr p, StgWord o, StgWord n)
111 {
112 #if i386_HOST_ARCH || x86_64_HOST_ARCH
113 __asm__ __volatile__ (
114 "lock\ncmpxchg %3,%1"
115 :"=a"(o), "=m" (*(volatile unsigned int *)p)
116 :"0" (o), "r" (n));
117 return o;
118 #elif powerpc_HOST_ARCH
119 StgWord result;
120 __asm__ __volatile__ (
121 "1: lwarx %0, 0, %3\n"
122 " cmpw %0, %1\n"
123 " bne 2f\n"
124 " stwcx. %2, 0, %3\n"
125 " bne- 1b\n"
126 "2:"
127 :"=&r" (result)
128 :"r" (o), "r" (n), "r" (p)
129 :"cc", "memory"
130 );
131 return result;
132 #elif sparc_HOST_ARCH
133 __asm__ __volatile__ (
134 "cas [%1], %2, %0"
135 : "+r" (n)
136 : "r" (p), "r" (o)
137 : "memory"
138 );
139 return n;
140 #elif !defined(WITHSMP)
141 StgWord result;
142 result = *p;
143 if (result == o) {
144 *p = n;
145 }
146 return result;
147 #else
148 #error cas() unimplemented on this architecture
149 #endif
150 }
151
152 /*
153 * Write barrier - ensure that all preceding writes have happened
154 * before all following writes.
155 *
156 * We need to tell both the compiler AND the CPU about the barrier.
157 * This is a brute force solution; better results might be obtained by
158 * using volatile type declarations to get fine-grained ordering
159 * control in C, and optionally a memory barrier instruction on CPUs
160 * that require it (not x86 or x86_64).
161 */
162 EXTERN_INLINE void
163 write_barrier(void) {
164 #if i386_HOST_ARCH || x86_64_HOST_ARCH
165 __asm__ __volatile__ ("" : : : "memory");
166 #elif powerpc_HOST_ARCH
167 __asm__ __volatile__ ("lwsync" : : : "memory");
168 #elif sparc_HOST_ARCH
169 /* Sparc in TSO mode does not require write/write barriers. */
170 __asm__ __volatile__ ("" : : : "memory");
171 #elif !defined(WITHSMP)
172 return;
173 #else
174 #error memory barriers unimplemented on this architecture
175 #endif
176 }
177
178 /* ---------------------------------------------------------------------- */
179 #else /* !THREADED_RTS */
180
181 #define write_barrier() /* nothing */
182
183 INLINE_HEADER StgWord
184 xchg(StgPtr p, StgWord w)
185 {
186 StgWord old = *p;
187 *p = w;
188 return old;
189 }
190
191 #endif /* !THREADED_RTS */
192
193 #endif /* CMINUSMINUS */
194
195 #endif /* SMP_H */