THREADED_RTS: use cas() when claiming thunks
[ghc.git] / rts / ThreadPaused.c
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team 1998-2006
4 *
5 * Tidying up a thread when it stops running
6 *
7 * ---------------------------------------------------------------------------*/
8
9 #include "Rts.h"
10 #include "Storage.h"
11 #include "LdvProfile.h"
12 #include "Updates.h"
13 #include "RaiseAsync.h"
14 #include "Trace.h"
15 #include "RtsFlags.h"
16
17 #include <string.h> // for memmove()
18
19 /* -----------------------------------------------------------------------------
20 * Stack squeezing
21 *
22 * Code largely pinched from old RTS, then hacked to bits. We also do
23 * lazy black holing here.
24 *
25 * -------------------------------------------------------------------------- */
26
27 struct stack_gap { StgWord gap_size; struct stack_gap *next_gap; };
28
29 static void
30 stackSqueeze(StgTSO *tso, StgPtr bottom)
31 {
32 StgPtr frame;
33 rtsBool prev_was_update_frame;
34 StgClosure *updatee = NULL;
35 StgRetInfoTable *info;
36 StgWord current_gap_size;
37 struct stack_gap *gap;
38
39 // Stage 1:
40 // Traverse the stack upwards, replacing adjacent update frames
41 // with a single update frame and a "stack gap". A stack gap
42 // contains two values: the size of the gap, and the distance
43 // to the next gap (or the stack top).
44
45 frame = tso->sp;
46
47 ASSERT(frame < bottom);
48
49 prev_was_update_frame = rtsFalse;
50 current_gap_size = 0;
51 gap = (struct stack_gap *) (tso->sp - sizeofW(StgUpdateFrame));
52
53 while (frame < bottom) {
54
55 info = get_ret_itbl((StgClosure *)frame);
56 switch (info->i.type) {
57
58 case UPDATE_FRAME:
59 {
60 StgUpdateFrame *upd = (StgUpdateFrame *)frame;
61
62 if (prev_was_update_frame) {
63
64 TICK_UPD_SQUEEZED();
65 /* wasn't there something about update squeezing and ticky to be
66 * sorted out? oh yes: we aren't counting each enter properly
67 * in this case. See the log somewhere. KSW 1999-04-21
68 *
69 * Check two things: that the two update frames don't point to
70 * the same object, and that the updatee_bypass isn't already an
71 * indirection. Both of these cases only happen when we're in a
72 * block hole-style loop (and there are multiple update frames
73 * on the stack pointing to the same closure), but they can both
74 * screw us up if we don't check.
75 */
76 if (upd->updatee != updatee && !closure_IND(upd->updatee)) {
77 UPD_IND_NOLOCK(upd->updatee, updatee);
78 }
79
80 // now mark this update frame as a stack gap. The gap
81 // marker resides in the bottom-most update frame of
82 // the series of adjacent frames, and covers all the
83 // frames in this series.
84 current_gap_size += sizeofW(StgUpdateFrame);
85 ((struct stack_gap *)frame)->gap_size = current_gap_size;
86 ((struct stack_gap *)frame)->next_gap = gap;
87
88 frame += sizeofW(StgUpdateFrame);
89 continue;
90 }
91
92 // single update frame, or the topmost update frame in a series
93 else {
94 prev_was_update_frame = rtsTrue;
95 updatee = upd->updatee;
96 frame += sizeofW(StgUpdateFrame);
97 continue;
98 }
99 }
100
101 default:
102 prev_was_update_frame = rtsFalse;
103
104 // we're not in a gap... check whether this is the end of a gap
105 // (an update frame can't be the end of a gap).
106 if (current_gap_size != 0) {
107 gap = (struct stack_gap *) (frame - sizeofW(StgUpdateFrame));
108 }
109 current_gap_size = 0;
110
111 frame += stack_frame_sizeW((StgClosure *)frame);
112 continue;
113 }
114 }
115
116 if (current_gap_size != 0) {
117 gap = (struct stack_gap *) (frame - sizeofW(StgUpdateFrame));
118 }
119
120 // Now we have a stack with gaps in it, and we have to walk down
121 // shoving the stack up to fill in the gaps. A diagram might
122 // help:
123 //
124 // +| ********* |
125 // | ********* | <- sp
126 // | |
127 // | | <- gap_start
128 // | ......... | |
129 // | stack_gap | <- gap | chunk_size
130 // | ......... | |
131 // | ......... | <- gap_end v
132 // | ********* |
133 // | ********* |
134 // | ********* |
135 // -| ********* |
136 //
137 // 'sp' points the the current top-of-stack
138 // 'gap' points to the stack_gap structure inside the gap
139 // ***** indicates real stack data
140 // ..... indicates gap
141 // <empty> indicates unused
142 //
143 {
144 void *sp;
145 void *gap_start, *next_gap_start, *gap_end;
146 nat chunk_size;
147
148 next_gap_start = (void *)((unsigned char*)gap + sizeof(StgUpdateFrame));
149 sp = next_gap_start;
150
151 while ((StgPtr)gap > tso->sp) {
152
153 // we're working in *bytes* now...
154 gap_start = next_gap_start;
155 gap_end = (void*) ((unsigned char*)gap_start - gap->gap_size * sizeof(W_));
156
157 gap = gap->next_gap;
158 next_gap_start = (void *)((unsigned char*)gap + sizeof(StgUpdateFrame));
159
160 chunk_size = (unsigned char*)gap_end - (unsigned char*)next_gap_start;
161 sp -= chunk_size;
162 memmove(sp, next_gap_start, chunk_size);
163 }
164
165 tso->sp = (StgPtr)sp;
166 }
167 }
168
169 /* -----------------------------------------------------------------------------
170 * Pausing a thread
171 *
172 * We have to prepare for GC - this means doing lazy black holing
173 * here. We also take the opportunity to do stack squeezing if it's
174 * turned on.
175 * -------------------------------------------------------------------------- */
176 void
177 threadPaused(Capability *cap, StgTSO *tso)
178 {
179 StgClosure *frame;
180 StgRetInfoTable *info;
181 const StgInfoTable *bh_info;
182 const StgInfoTable *cur_bh_info USED_IF_THREADS;
183 StgClosure *bh;
184 StgPtr stack_end;
185 nat words_to_squeeze = 0;
186 nat weight = 0;
187 nat weight_pending = 0;
188 rtsBool prev_was_update_frame = rtsFalse;
189
190 // Check to see whether we have threads waiting to raise
191 // exceptions, and we're not blocking exceptions, or are blocked
192 // interruptibly. This is important; if a thread is running with
193 // TSO_BLOCKEX and becomes blocked interruptibly, this is the only
194 // place we ensure that the blocked_exceptions get a chance.
195 maybePerformBlockedException (cap, tso);
196 if (tso->what_next == ThreadKilled) { return; }
197
198 stack_end = &tso->stack[tso->stack_size];
199
200 frame = (StgClosure *)tso->sp;
201
202 while (1) {
203 // If we've already marked this frame, then stop here.
204 if (frame->header.info == (StgInfoTable *)&stg_marked_upd_frame_info) {
205 goto end;
206 }
207
208 info = get_ret_itbl(frame);
209
210 switch (info->i.type) {
211
212 case UPDATE_FRAME:
213
214 SET_INFO(frame, (StgInfoTable *)&stg_marked_upd_frame_info);
215
216 bh = ((StgUpdateFrame *)frame)->updatee;
217 bh_info = bh->header.info;
218
219 #ifdef THREADED_RTS
220 retry:
221 #endif
222 if (closure_flags[INFO_PTR_TO_STRUCT(bh_info)->type] & _IND
223 || bh_info == &stg_BLACKHOLE_info) {
224 debugTrace(DEBUG_squeeze,
225 "suspending duplicate work: %ld words of stack",
226 (long)((StgPtr)frame - tso->sp));
227
228 // If this closure is already an indirection, then
229 // suspend the computation up to this point:
230 suspendComputation(cap,tso,(StgPtr)frame);
231
232 // Now drop the update frame, and arrange to return
233 // the value to the frame underneath:
234 tso->sp = (StgPtr)frame + sizeofW(StgUpdateFrame) - 2;
235 tso->sp[1] = (StgWord)bh;
236 tso->sp[0] = (W_)&stg_enter_info;
237
238 // And continue with threadPaused; there might be
239 // yet more computation to suspend.
240 threadPaused(cap,tso);
241 return;
242 }
243
244 if (bh->header.info != &stg_CAF_BLACKHOLE_info) {
245 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
246 debugBelch("Unexpected lazy BHing required at 0x%04lx\n",(long)bh);
247 #endif
248 // zero out the slop so that the sanity checker can tell
249 // where the next closure is.
250 DEBUG_FILL_SLOP(bh);
251 #ifdef PROFILING
252 // @LDV profiling
253 // We pretend that bh is now dead.
254 LDV_recordDead_FILL_SLOP_DYNAMIC((StgClosure *)bh);
255 #endif
256
257 #ifdef THREADED_RTS
258 cur_bh_info = (const StgInfoTable *)
259 cas((StgVolatilePtr)&bh->header.info,
260 (StgWord)bh_info,
261 (StgWord)&stg_BLACKHOLE_info);
262
263 if (cur_bh_info != bh_info) {
264 bh_info = cur_bh_info;
265 goto retry;
266 }
267 #else
268 SET_INFO(bh,&stg_BLACKHOLE_info);
269 #endif
270
271 // We pretend that bh has just been created.
272 LDV_RECORD_CREATE(bh);
273 }
274
275 frame = (StgClosure *) ((StgUpdateFrame *)frame + 1);
276 if (prev_was_update_frame) {
277 words_to_squeeze += sizeofW(StgUpdateFrame);
278 weight += weight_pending;
279 weight_pending = 0;
280 }
281 prev_was_update_frame = rtsTrue;
282 break;
283
284 case STOP_FRAME:
285 goto end;
286
287 // normal stack frames; do nothing except advance the pointer
288 default:
289 {
290 nat frame_size = stack_frame_sizeW(frame);
291 weight_pending += frame_size;
292 frame = (StgClosure *)((StgPtr)frame + frame_size);
293 prev_was_update_frame = rtsFalse;
294 }
295 }
296 }
297
298 end:
299 debugTrace(DEBUG_squeeze,
300 "words_to_squeeze: %d, weight: %d, squeeze: %s",
301 words_to_squeeze, weight,
302 weight < words_to_squeeze ? "YES" : "NO");
303
304 // Should we squeeze or not? Arbitrary heuristic: we squeeze if
305 // the number of words we have to shift down is less than the
306 // number of stack words we squeeze away by doing so.
307 if (RtsFlags.GcFlags.squeezeUpdFrames == rtsTrue &&
308 weight < words_to_squeeze) {
309 stackSqueeze(tso, (StgPtr)frame);
310 }
311 }