Fold template-haskell.git into ghc.git (re #8545)
[ghc.git] / rts / sm / MarkWeak.c
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team 1998-2008
4 *
5 * Weak pointers and weak-like things in the GC
6 *
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
9 *
10 * http://ghc.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/GC
11 *
12 * ---------------------------------------------------------------------------*/
13
14 #include "PosixSource.h"
15 #include "Rts.h"
16
17 #include "MarkWeak.h"
18 #include "GC.h"
19 #include "GCThread.h"
20 #include "GCTDecl.h"
21 #include "Evac.h"
22 #include "Trace.h"
23 #include "Schedule.h"
24 #include "Weak.h"
25 #include "Storage.h"
26 #include "Threads.h"
27
28 /* -----------------------------------------------------------------------------
29 Weak Pointers
30
31 traverse_weak_ptr_list is called possibly many times during garbage
32 collection. It returns a flag indicating whether it did any work
33 (i.e. called evacuate on any live pointers).
34
35 Invariant: traverse_weak_ptr_list is called when the heap is in an
36 idempotent state. That means that there are no pending
37 evacuate/scavenge operations. This invariant helps the weak
38 pointer code decide which weak pointers are dead - if there are no
39 new live weak pointers, then all the currently unreachable ones are
40 dead.
41
42 For generational GC: we just don't try to finalize weak pointers in
43 older generations than the one we're collecting. This could
44 probably be optimised by keeping per-generation lists of weak
45 pointers, but for a few weak pointers this scheme will work.
46
47 There are three distinct stages to processing weak pointers:
48
49 - weak_stage == WeakPtrs
50
51 We process all the weak pointers whos keys are alive (evacuate
52 their values and finalizers), and repeat until we can find no new
53 live keys. If no live keys are found in this pass, then we
54 evacuate the finalizers of all the dead weak pointers in order to
55 run them.
56
57 - weak_stage == WeakThreads
58
59 Now, we discover which *threads* are still alive. Pointers to
60 threads from the all_threads and main thread lists are the
61 weakest of all: a pointers from the finalizer of a dead weak
62 pointer can keep a thread alive. Any threads found to be unreachable
63 are evacuated and placed on the resurrected_threads list so we
64 can send them a signal later.
65
66 - weak_stage == WeakDone
67
68 No more evacuation is done.
69
70 -------------------------------------------------------------------------- */
71
72 /* Which stage of processing various kinds of weak pointer are we at?
73 * (see traverse_weak_ptr_list() below for discussion).
74 */
75 typedef enum { WeakPtrs, WeakThreads, WeakDone } WeakStage;
76 static WeakStage weak_stage;
77
78 // List of weak pointers whose key is dead
79 StgWeak *dead_weak_ptr_list;
80
81 // List of threads found to be unreachable
82 StgTSO *resurrected_threads;
83
84 static void collectDeadWeakPtrs (generation *gen);
85 static rtsBool tidyWeakList (generation *gen);
86 static rtsBool resurrectUnreachableThreads (generation *gen);
87 static void tidyThreadList (generation *gen);
88
89 void
90 initWeakForGC(void)
91 {
92 nat g;
93
94 for (g = 0; g <= N; g++) {
95 generation *gen = &generations[g];
96 gen->old_weak_ptr_list = gen->weak_ptr_list;
97 gen->weak_ptr_list = NULL;
98 }
99
100 weak_stage = WeakThreads;
101 dead_weak_ptr_list = NULL;
102 resurrected_threads = END_TSO_QUEUE;
103 }
104
105 rtsBool
106 traverseWeakPtrList(void)
107 {
108 rtsBool flag = rtsFalse;
109
110 switch (weak_stage) {
111
112 case WeakDone:
113 return rtsFalse;
114
115 case WeakThreads:
116 /* Now deal with the gen->threads lists, which behave somewhat like
117 * the weak ptr list. If we discover any threads that are about to
118 * become garbage, we wake them up and administer an exception.
119 */
120 {
121 nat g;
122
123 for (g = 0; g <= N; g++) {
124 tidyThreadList(&generations[g]);
125 }
126
127 // Use weak pointer relationships (value is reachable if
128 // key is reachable):
129 for (g = 0; g <= N; g++) {
130 if (tidyWeakList(&generations[g])) {
131 flag = rtsTrue;
132 }
133 }
134
135 // if we evacuated anything new, we must scavenge thoroughly
136 // before we can determine which threads are unreachable.
137 if (flag) return rtsTrue;
138
139 // Resurrect any threads which were unreachable
140 for (g = 0; g <= N; g++) {
141 if (resurrectUnreachableThreads(&generations[g])) {
142 flag = rtsTrue;
143 }
144 }
145
146 // Next, move to the WeakPtrs stage after fully
147 // scavenging the finalizers we've just evacuated.
148 weak_stage = WeakPtrs;
149
150 // if we evacuated anything new, we must scavenge thoroughly
151 // before entering the WeakPtrs stage.
152 if (flag) return rtsTrue;
153
154 // otherwise, fall through...
155 }
156
157 case WeakPtrs:
158 {
159 nat g;
160
161 // resurrecting threads might have made more weak pointers
162 // alive, so traverse those lists again:
163 for (g = 0; g <= N; g++) {
164 if (tidyWeakList(&generations[g])) {
165 flag = rtsTrue;
166 }
167 }
168
169 /* If we didn't make any changes, then we can go round and kill all
170 * the dead weak pointers. The dead_weak_ptr list is used as a list
171 * of pending finalizers later on.
172 */
173 if (flag == rtsFalse) {
174 for (g = 0; g <= N; g++) {
175 collectDeadWeakPtrs(&generations[g]);
176 }
177
178 weak_stage = WeakDone; // *now* we're done,
179 }
180
181 return rtsTrue; // but one more round of scavenging, please
182 }
183
184 default:
185 barf("traverse_weak_ptr_list");
186 return rtsTrue;
187 }
188 }
189
190 static void collectDeadWeakPtrs (generation *gen)
191 {
192 StgWeak *w, *next_w;
193 for (w = gen->old_weak_ptr_list; w != NULL; w = next_w) {
194 evacuate(&w->finalizer);
195 next_w = w->link;
196 w->link = dead_weak_ptr_list;
197 dead_weak_ptr_list = w;
198 }
199 }
200
201 static rtsBool resurrectUnreachableThreads (generation *gen)
202 {
203 StgTSO *t, *tmp, *next;
204 rtsBool flag = rtsFalse;
205
206 for (t = gen->old_threads; t != END_TSO_QUEUE; t = next) {
207 next = t->global_link;
208
209 // ThreadFinished and ThreadComplete: we have to keep
210 // these on the all_threads list until they
211 // become garbage, because they might get
212 // pending exceptions.
213 switch (t->what_next) {
214 case ThreadKilled:
215 case ThreadComplete:
216 continue;
217 default:
218 tmp = t;
219 evacuate((StgClosure **)&tmp);
220 tmp->global_link = resurrected_threads;
221 resurrected_threads = tmp;
222 flag = rtsTrue;
223 }
224 }
225 return flag;
226 }
227
228 static rtsBool tidyWeakList(generation *gen)
229 {
230 StgWeak *w, **last_w, *next_w;
231 const StgInfoTable *info;
232 StgClosure *new;
233 rtsBool flag = rtsFalse;
234 last_w = &gen->old_weak_ptr_list;
235 for (w = gen->old_weak_ptr_list; w != NULL; w = next_w) {
236
237 /* There might be a DEAD_WEAK on the list if finalizeWeak# was
238 * called on a live weak pointer object. Just remove it.
239 */
240 if (w->header.info == &stg_DEAD_WEAK_info) {
241 next_w = w->link;
242 *last_w = next_w;
243 continue;
244 }
245
246 info = get_itbl((StgClosure *)w);
247 switch (info->type) {
248
249 case WEAK:
250 /* Now, check whether the key is reachable.
251 */
252 new = isAlive(w->key);
253 if (new != NULL) {
254 generation *new_gen;
255
256 w->key = new;
257
258 // Find out which generation this weak ptr is in, and
259 // move it onto the weak ptr list of that generation.
260
261 new_gen = Bdescr((P_)w)->gen;
262 gct->evac_gen_no = new_gen->no;
263
264 // evacuate the value and finalizer
265 evacuate(&w->value);
266 evacuate(&w->finalizer);
267 // remove this weak ptr from the old_weak_ptr list
268 *last_w = w->link;
269 next_w = w->link;
270
271 // and put it on the correct weak ptr list.
272 w->link = new_gen->weak_ptr_list;
273 new_gen->weak_ptr_list = w;
274 flag = rtsTrue;
275
276 if (gen->no != new_gen->no) {
277 debugTrace(DEBUG_weak,
278 "moving weak pointer %p from %d to %d",
279 w, gen->no, new_gen->no);
280 }
281
282
283 debugTrace(DEBUG_weak,
284 "weak pointer still alive at %p -> %p",
285 w, w->key);
286 continue;
287 }
288 else {
289 last_w = &(w->link);
290 next_w = w->link;
291 continue;
292 }
293
294 default:
295 barf("tidyWeakList: not WEAK: %d, %p", info->type, w);
296 }
297 }
298
299 return flag;
300 }
301
302 static void tidyThreadList (generation *gen)
303 {
304 StgTSO *t, *tmp, *next, **prev;
305
306 prev = &gen->old_threads;
307
308 for (t = gen->old_threads; t != END_TSO_QUEUE; t = next) {
309
310 tmp = (StgTSO *)isAlive((StgClosure *)t);
311
312 if (tmp != NULL) {
313 t = tmp;
314 }
315
316 ASSERT(get_itbl((StgClosure *)t)->type == TSO);
317 next = t->global_link;
318
319 // if the thread is not masking exceptions but there are
320 // pending exceptions on its queue, then something has gone
321 // wrong. However, pending exceptions are OK if there is an
322 // FFI call.
323 ASSERT(t->blocked_exceptions == END_BLOCKED_EXCEPTIONS_QUEUE
324 || t->why_blocked == BlockedOnCCall
325 || t->why_blocked == BlockedOnCCall_Interruptible
326 || (t->flags & TSO_BLOCKEX));
327
328 if (tmp == NULL) {
329 // not alive (yet): leave this thread on the
330 // old_all_threads list.
331 prev = &(t->global_link);
332 }
333 else {
334 // alive
335 *prev = next;
336
337 // move this thread onto the correct threads list.
338 generation *new_gen;
339 new_gen = Bdescr((P_)t)->gen;
340 t->global_link = new_gen->threads;
341 new_gen->threads = t;
342 }
343 }
344 }
345
346 /* -----------------------------------------------------------------------------
347 Evacuate every weak pointer object on the weak_ptr_list, and update
348 the link fields.
349 -------------------------------------------------------------------------- */
350
351 void
352 markWeakPtrList ( void )
353 {
354 nat g;
355
356 for (g = 0; g <= N; g++) {
357 generation *gen = &generations[g];
358 StgWeak *w, **last_w;
359
360 last_w = &gen->weak_ptr_list;
361 for (w = gen->weak_ptr_list; w != NULL; w = w->link) {
362 // w might be WEAK, EVACUATED, or DEAD_WEAK (actually CON_STATIC) here
363
364 #ifdef DEBUG
365 { // careful to do this assertion only reading the info ptr
366 // once, because during parallel GC it might change under our feet.
367 const StgInfoTable *info;
368 info = w->header.info;
369 ASSERT(IS_FORWARDING_PTR(info)
370 || info == &stg_DEAD_WEAK_info
371 || INFO_PTR_TO_STRUCT(info)->type == WEAK);
372 }
373 #endif
374
375 evacuate((StgClosure **)last_w);
376 w = *last_w;
377 if (w->header.info == &stg_DEAD_WEAK_info) {
378 last_w = &(w->link);
379 } else {
380 last_w = &(w->link);
381 }
382 }
383 }
384 }
385