Use message-passing to implement throwTo in the RTS
[ghc.git] / rts / sm / MarkWeak.c
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team 1998-2008
4 *
5 * Weak pointers and weak-like things in the GC
6 *
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
9 *
10 * http://hackage.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/GC
11 *
12 * ---------------------------------------------------------------------------*/
13
14 #include "PosixSource.h"
15 #include "Rts.h"
16
17 #include "MarkWeak.h"
18 #include "GC.h"
19 #include "GCThread.h"
20 #include "Evac.h"
21 #include "Trace.h"
22 #include "Schedule.h"
23 #include "Weak.h"
24 #include "Storage.h"
25 #include "Threads.h"
26
27 /* -----------------------------------------------------------------------------
28 Weak Pointers
29
30 traverse_weak_ptr_list is called possibly many times during garbage
31 collection. It returns a flag indicating whether it did any work
32 (i.e. called evacuate on any live pointers).
33
34 Invariant: traverse_weak_ptr_list is called when the heap is in an
35 idempotent state. That means that there are no pending
36 evacuate/scavenge operations. This invariant helps the weak
37 pointer code decide which weak pointers are dead - if there are no
38 new live weak pointers, then all the currently unreachable ones are
39 dead.
40
41 For generational GC: we just don't try to finalize weak pointers in
42 older generations than the one we're collecting. This could
43 probably be optimised by keeping per-generation lists of weak
44 pointers, but for a few weak pointers this scheme will work.
45
46 There are three distinct stages to processing weak pointers:
47
48 - weak_stage == WeakPtrs
49
50 We process all the weak pointers whos keys are alive (evacuate
51 their values and finalizers), and repeat until we can find no new
52 live keys. If no live keys are found in this pass, then we
53 evacuate the finalizers of all the dead weak pointers in order to
54 run them.
55
56 - weak_stage == WeakThreads
57
58 Now, we discover which *threads* are still alive. Pointers to
59 threads from the all_threads and main thread lists are the
60 weakest of all: a pointers from the finalizer of a dead weak
61 pointer can keep a thread alive. Any threads found to be unreachable
62 are evacuated and placed on the resurrected_threads list so we
63 can send them a signal later.
64
65 - weak_stage == WeakDone
66
67 No more evacuation is done.
68
69 -------------------------------------------------------------------------- */
70
71 /* Which stage of processing various kinds of weak pointer are we at?
72 * (see traverse_weak_ptr_list() below for discussion).
73 */
74 typedef enum { WeakPtrs, WeakThreads, WeakDone } WeakStage;
75 static WeakStage weak_stage;
76
77 /* Weak pointers
78 */
79 StgWeak *old_weak_ptr_list; // also pending finaliser list
80
81 // List of threads found to be unreachable
82 StgTSO *resurrected_threads;
83
84 static void resurrectUnreachableThreads (generation *gen);
85 static rtsBool tidyThreadList (generation *gen);
86
87 void
88 initWeakForGC(void)
89 {
90 old_weak_ptr_list = weak_ptr_list;
91 weak_ptr_list = NULL;
92 weak_stage = WeakPtrs;
93 resurrected_threads = END_TSO_QUEUE;
94 }
95
96 rtsBool
97 traverseWeakPtrList(void)
98 {
99 StgWeak *w, **last_w, *next_w;
100 StgClosure *new;
101 rtsBool flag = rtsFalse;
102 const StgInfoTable *info;
103
104 switch (weak_stage) {
105
106 case WeakDone:
107 return rtsFalse;
108
109 case WeakPtrs:
110 /* doesn't matter where we evacuate values/finalizers to, since
111 * these pointers are treated as roots (iff the keys are alive).
112 */
113 gct->evac_gen = 0;
114
115 last_w = &old_weak_ptr_list;
116 for (w = old_weak_ptr_list; w != NULL; w = next_w) {
117
118 /* There might be a DEAD_WEAK on the list if finalizeWeak# was
119 * called on a live weak pointer object. Just remove it.
120 */
121 if (w->header.info == &stg_DEAD_WEAK_info) {
122 next_w = ((StgDeadWeak *)w)->link;
123 *last_w = next_w;
124 continue;
125 }
126
127 info = get_itbl(w);
128 switch (info->type) {
129
130 case WEAK:
131 /* Now, check whether the key is reachable.
132 */
133 new = isAlive(w->key);
134 if (new != NULL) {
135 w->key = new;
136 // evacuate the value and finalizer
137 evacuate(&w->value);
138 evacuate(&w->finalizer);
139 // remove this weak ptr from the old_weak_ptr list
140 *last_w = w->link;
141 // and put it on the new weak ptr list
142 next_w = w->link;
143 w->link = weak_ptr_list;
144 weak_ptr_list = w;
145 flag = rtsTrue;
146
147 debugTrace(DEBUG_weak,
148 "weak pointer still alive at %p -> %p",
149 w, w->key);
150 continue;
151 }
152 else {
153 last_w = &(w->link);
154 next_w = w->link;
155 continue;
156 }
157
158 default:
159 barf("traverseWeakPtrList: not WEAK");
160 }
161 }
162
163 /* If we didn't make any changes, then we can go round and kill all
164 * the dead weak pointers. The old_weak_ptr list is used as a list
165 * of pending finalizers later on.
166 */
167 if (flag == rtsFalse) {
168 for (w = old_weak_ptr_list; w; w = w->link) {
169 evacuate(&w->finalizer);
170 }
171
172 // Next, move to the WeakThreads stage after fully
173 // scavenging the finalizers we've just evacuated.
174 weak_stage = WeakThreads;
175 }
176
177 return rtsTrue;
178
179 case WeakThreads:
180 /* Now deal with the step->threads lists, which behave somewhat like
181 * the weak ptr list. If we discover any threads that are about to
182 * become garbage, we wake them up and administer an exception.
183 */
184 {
185 nat g;
186
187 // Traverse thread lists for generations we collected...
188 // ToDo when we have one gen per capability:
189 // for (n = 0; n < n_capabilities; n++) {
190 // if (tidyThreadList(&nurseries[n])) {
191 // flag = rtsTrue;
192 // }
193 // }
194 for (g = 0; g <= N; g++) {
195 if (tidyThreadList(&generations[g])) {
196 flag = rtsTrue;
197 }
198 }
199
200 /* If we evacuated any threads, we need to go back to the scavenger.
201 */
202 if (flag) return rtsTrue;
203
204 /* And resurrect any threads which were about to become garbage.
205 */
206 {
207 nat g;
208 for (g = 0; g <= N; g++) {
209 resurrectUnreachableThreads(&generations[g]);
210 }
211 }
212
213 /* Finally, we can update the blackhole_queue. This queue
214 * simply strings together TSOs blocked on black holes, it is
215 * not intended to keep anything alive. Hence, we do not follow
216 * pointers on the blackhole_queue until now, when we have
217 * determined which TSOs are otherwise reachable. We know at
218 * this point that all TSOs have been evacuated, however.
219 */
220 {
221 StgTSO **pt;
222 for (pt = &blackhole_queue; *pt != END_TSO_QUEUE; pt = &((*pt)->_link)) {
223 *pt = (StgTSO *)isAlive((StgClosure *)*pt);
224 ASSERT(*pt != NULL);
225 }
226 }
227
228 weak_stage = WeakDone; // *now* we're done,
229 return rtsTrue; // but one more round of scavenging, please
230 }
231
232 default:
233 barf("traverse_weak_ptr_list");
234 return rtsTrue;
235 }
236 }
237
238 static void resurrectUnreachableThreads (generation *gen)
239 {
240 StgTSO *t, *tmp, *next;
241
242 for (t = gen->old_threads; t != END_TSO_QUEUE; t = next) {
243 next = t->global_link;
244
245 // ThreadFinished and ThreadComplete: we have to keep
246 // these on the all_threads list until they
247 // become garbage, because they might get
248 // pending exceptions.
249 switch (t->what_next) {
250 case ThreadKilled:
251 case ThreadComplete:
252 continue;
253 default:
254 tmp = t;
255 evacuate((StgClosure **)&tmp);
256 tmp->global_link = resurrected_threads;
257 resurrected_threads = tmp;
258 }
259 }
260 }
261
262 static rtsBool tidyThreadList (generation *gen)
263 {
264 StgTSO *t, *tmp, *next, **prev;
265 rtsBool flag = rtsFalse;
266
267 prev = &gen->old_threads;
268
269 for (t = gen->old_threads; t != END_TSO_QUEUE; t = next) {
270
271 tmp = (StgTSO *)isAlive((StgClosure *)t);
272
273 if (tmp != NULL) {
274 t = tmp;
275 }
276
277 ASSERT(get_itbl(t)->type == TSO);
278 if (t->what_next == ThreadRelocated) {
279 next = t->_link;
280 *prev = next;
281 continue;
282 }
283
284 next = t->global_link;
285
286 // if the thread is not masking exceptions but there are
287 // pending exceptions on its queue, then something has gone
288 // wrong:
289 ASSERT(t->blocked_exceptions == END_BLOCKED_EXCEPTIONS_QUEUE
290 || (t->flags & TSO_BLOCKEX));
291
292 if (tmp == NULL) {
293 // not alive (yet): leave this thread on the
294 // old_all_threads list.
295 prev = &(t->global_link);
296 }
297 else {
298 // alive
299 *prev = next;
300
301 // move this thread onto the correct threads list.
302 generation *new_gen;
303 new_gen = Bdescr((P_)t)->gen;
304 t->global_link = new_gen->threads;
305 new_gen->threads = t;
306 }
307 }
308
309 return flag;
310 }
311
312 /* -----------------------------------------------------------------------------
313 The blackhole queue
314
315 Threads on this list behave like weak pointers during the normal
316 phase of garbage collection: if the blackhole is reachable, then
317 the thread is reachable too.
318 -------------------------------------------------------------------------- */
319 rtsBool
320 traverseBlackholeQueue (void)
321 {
322 StgTSO *prev, *t, *tmp;
323 rtsBool flag;
324 nat type;
325
326 flag = rtsFalse;
327 prev = NULL;
328
329 for (t = blackhole_queue; t != END_TSO_QUEUE; prev=t, t = t->_link) {
330 // if the thread is not yet alive...
331 if (! (tmp = (StgTSO *)isAlive((StgClosure*)t))) {
332 // if the closure it is blocked on is either (a) a
333 // reachable BLAKCHOLE or (b) not a BLACKHOLE, then we
334 // make the thread alive.
335 if (!isAlive(t->block_info.closure)) {
336 type = get_itbl(t->block_info.closure)->type;
337 if (type == BLACKHOLE || type == CAF_BLACKHOLE) {
338 continue;
339 }
340 }
341 evacuate((StgClosure **)&t);
342 if (prev) {
343 prev->_link = t;
344 } else {
345 blackhole_queue = t;
346 }
347 // no write barrier when on the blackhole queue,
348 // because we traverse the whole queue on every GC.
349 flag = rtsTrue;
350 }
351 }
352 return flag;
353 }
354
355 /* -----------------------------------------------------------------------------
356 Evacuate every weak pointer object on the weak_ptr_list, and update
357 the link fields.
358
359 ToDo: with a lot of weak pointers, this will be expensive. We
360 should have a per-GC weak pointer list, just like threads.
361 -------------------------------------------------------------------------- */
362
363 void
364 markWeakPtrList ( void )
365 {
366 StgWeak *w, **last_w;
367
368 last_w = &weak_ptr_list;
369 for (w = weak_ptr_list; w; w = w->link) {
370 // w might be WEAK, EVACUATED, or DEAD_WEAK (actually CON_STATIC) here
371 ASSERT(IS_FORWARDING_PTR(w->header.info)
372 || w->header.info == &stg_DEAD_WEAK_info
373 || get_itbl(w)->type == WEAK);
374 evacuate((StgClosure **)last_w);
375 w = *last_w;
376 if (w->header.info == &stg_DEAD_WEAK_info) {
377 last_w = &(((StgDeadWeak*)w)->link);
378 } else {
379 last_w = &(w->link);
380 }
381 }
382 }
383