When in sanity mode, un-zero malloc'd memory; fix uninitialized memory bugs.
[ghc.git] / rts / sm / MarkWeak.c
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team 1998-2008
4 *
5 * Weak pointers and weak-like things in the GC
6 *
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
9 *
10 * http://ghc.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/GC
11 *
12 * ---------------------------------------------------------------------------*/
13
14 #include "PosixSource.h"
15 #include "Rts.h"
16
17 #include "MarkWeak.h"
18 #include "GC.h"
19 #include "GCThread.h"
20 #include "GCTDecl.h"
21 #include "Evac.h"
22 #include "Trace.h"
23 #include "Schedule.h"
24 #include "Weak.h"
25 #include "Storage.h"
26 #include "Threads.h"
27
28 #include "sm/GCUtils.h"
29 #include "sm/MarkWeak.h"
30 #include "sm/Sanity.h"
31
32 /* -----------------------------------------------------------------------------
33 Weak Pointers
34
35 traverse_weak_ptr_list is called possibly many times during garbage
36 collection. It returns a flag indicating whether it did any work
37 (i.e. called evacuate on any live pointers).
38
39 Invariant: traverse_weak_ptr_list is called when the heap is in an
40 idempotent state. That means that there are no pending
41 evacuate/scavenge operations. This invariant helps the weak
42 pointer code decide which weak pointers are dead - if there are no
43 new live weak pointers, then all the currently unreachable ones are
44 dead.
45
46 For generational GC: we don't try to finalize weak pointers in
47 older generations than the one we're collecting.
48
49 There are three distinct stages to processing weak pointers:
50
51 - weak_stage == WeakPtrs
52
53 We process all the weak pointers whos keys are alive (evacuate
54 their values and finalizers), and repeat until we can find no new
55 live keys. If no live keys are found in this pass, then we
56 evacuate the finalizers of all the dead weak pointers in order to
57 run them.
58
59 - weak_stage == WeakThreads
60
61 Now, we discover which *threads* are still alive. Pointers to
62 threads from the all_threads and main thread lists are the
63 weakest of all: a pointers from the finalizer of a dead weak
64 pointer can keep a thread alive. Any threads found to be unreachable
65 are evacuated and placed on the resurrected_threads list so we
66 can send them a signal later.
67
68 - weak_stage == WeakDone
69
70 No more evacuation is done.
71
72 -------------------------------------------------------------------------- */
73
74 /* Which stage of processing various kinds of weak pointer are we at?
75 * (see traverse_weak_ptr_list() below for discussion).
76 */
77 typedef enum { WeakPtrs, WeakThreads, WeakDone } WeakStage;
78 static WeakStage weak_stage;
79
80 // List of weak pointers whose key is dead
81 StgWeak *dead_weak_ptr_list;
82
83 // List of threads found to be unreachable
84 StgTSO *resurrected_threads;
85
86 static void collectDeadWeakPtrs (generation *gen);
87 static rtsBool tidyWeakList (generation *gen);
88 static rtsBool resurrectUnreachableThreads (generation *gen);
89 static void tidyThreadList (generation *gen);
90
91 void
92 initWeakForGC(void)
93 {
94 uint32_t g;
95
96 for (g = 0; g <= N; g++) {
97 generation *gen = &generations[g];
98 gen->old_weak_ptr_list = gen->weak_ptr_list;
99 gen->weak_ptr_list = NULL;
100 }
101
102 weak_stage = WeakThreads;
103 dead_weak_ptr_list = NULL;
104 resurrected_threads = END_TSO_QUEUE;
105 }
106
107 rtsBool
108 traverseWeakPtrList(void)
109 {
110 rtsBool flag = rtsFalse;
111
112 switch (weak_stage) {
113
114 case WeakDone:
115 return rtsFalse;
116
117 case WeakThreads:
118 /* Now deal with the gen->threads lists, which behave somewhat like
119 * the weak ptr list. If we discover any threads that are about to
120 * become garbage, we wake them up and administer an exception.
121 */
122 {
123 uint32_t g;
124
125 for (g = 0; g <= N; g++) {
126 tidyThreadList(&generations[g]);
127 }
128
129 // Use weak pointer relationships (value is reachable if
130 // key is reachable):
131 for (g = 0; g <= N; g++) {
132 if (tidyWeakList(&generations[g])) {
133 flag = rtsTrue;
134 }
135 }
136
137 // if we evacuated anything new, we must scavenge thoroughly
138 // before we can determine which threads are unreachable.
139 if (flag) return rtsTrue;
140
141 // Resurrect any threads which were unreachable
142 for (g = 0; g <= N; g++) {
143 if (resurrectUnreachableThreads(&generations[g])) {
144 flag = rtsTrue;
145 }
146 }
147
148 // Next, move to the WeakPtrs stage after fully
149 // scavenging the finalizers we've just evacuated.
150 weak_stage = WeakPtrs;
151
152 // if we evacuated anything new, we must scavenge thoroughly
153 // before entering the WeakPtrs stage.
154 if (flag) return rtsTrue;
155
156 // otherwise, fall through...
157 }
158
159 case WeakPtrs:
160 {
161 uint32_t g;
162
163 // resurrecting threads might have made more weak pointers
164 // alive, so traverse those lists again:
165 for (g = 0; g <= N; g++) {
166 if (tidyWeakList(&generations[g])) {
167 flag = rtsTrue;
168 }
169 }
170
171 /* If we didn't make any changes, then we can go round and kill all
172 * the dead weak pointers. The dead_weak_ptr list is used as a list
173 * of pending finalizers later on.
174 */
175 if (flag == rtsFalse) {
176 for (g = 0; g <= N; g++) {
177 collectDeadWeakPtrs(&generations[g]);
178 }
179
180 weak_stage = WeakDone; // *now* we're done,
181 }
182
183 return rtsTrue; // but one more round of scavenging, please
184 }
185
186 default:
187 barf("traverse_weak_ptr_list");
188 return rtsTrue;
189 }
190 }
191
192 static void collectDeadWeakPtrs (generation *gen)
193 {
194 StgWeak *w, *next_w;
195 for (w = gen->old_weak_ptr_list; w != NULL; w = next_w) {
196 // If we have C finalizers, keep the value alive for this GC.
197 // See Note [MallocPtr finalizers] in GHC.ForeignPtr, and #10904
198 if (w->cfinalizers != &stg_NO_FINALIZER_closure) {
199 evacuate(&w->value);
200 }
201 evacuate(&w->finalizer);
202 next_w = w->link;
203 w->link = dead_weak_ptr_list;
204 dead_weak_ptr_list = w;
205 }
206 }
207
208 static rtsBool resurrectUnreachableThreads (generation *gen)
209 {
210 StgTSO *t, *tmp, *next;
211 rtsBool flag = rtsFalse;
212
213 for (t = gen->old_threads; t != END_TSO_QUEUE; t = next) {
214 next = t->global_link;
215
216 // ThreadFinished and ThreadComplete: we have to keep
217 // these on the all_threads list until they
218 // become garbage, because they might get
219 // pending exceptions.
220 switch (t->what_next) {
221 case ThreadKilled:
222 case ThreadComplete:
223 continue;
224 default:
225 tmp = t;
226 evacuate((StgClosure **)&tmp);
227 tmp->global_link = resurrected_threads;
228 resurrected_threads = tmp;
229 flag = rtsTrue;
230 }
231 }
232 return flag;
233 }
234
235 static rtsBool tidyWeakList(generation *gen)
236 {
237 StgWeak *w, **last_w, *next_w;
238 const StgInfoTable *info;
239 StgClosure *new;
240 rtsBool flag = rtsFalse;
241 last_w = &gen->old_weak_ptr_list;
242 for (w = gen->old_weak_ptr_list; w != NULL; w = next_w) {
243
244 /* There might be a DEAD_WEAK on the list if finalizeWeak# was
245 * called on a live weak pointer object. Just remove it.
246 */
247 if (w->header.info == &stg_DEAD_WEAK_info) {
248 next_w = w->link;
249 *last_w = next_w;
250 continue;
251 }
252
253 info = get_itbl((StgClosure *)w);
254 switch (info->type) {
255
256 case WEAK:
257 /* Now, check whether the key is reachable.
258 */
259 new = isAlive(w->key);
260 if (new != NULL) {
261 generation *new_gen;
262
263 w->key = new;
264
265 // Find out which generation this weak ptr is in, and
266 // move it onto the weak ptr list of that generation.
267
268 new_gen = Bdescr((P_)w)->gen;
269 gct->evac_gen_no = new_gen->no;
270 gct->failed_to_evac = rtsFalse;
271
272 // evacuate the fields of the weak ptr
273 scavengeLiveWeak(w);
274
275 if (gct->failed_to_evac) {
276 debugTrace(DEBUG_weak,
277 "putting weak pointer %p into mutable list",
278 w);
279 gct->failed_to_evac = rtsFalse;
280 recordMutableGen_GC((StgClosure *)w, new_gen->no);
281 }
282
283 // remove this weak ptr from the old_weak_ptr list
284 *last_w = w->link;
285 next_w = w->link;
286
287 // and put it on the correct weak ptr list.
288 w->link = new_gen->weak_ptr_list;
289 new_gen->weak_ptr_list = w;
290 flag = rtsTrue;
291
292 if (gen->no != new_gen->no) {
293 debugTrace(DEBUG_weak,
294 "moving weak pointer %p from %d to %d",
295 w, gen->no, new_gen->no);
296 }
297
298
299 debugTrace(DEBUG_weak,
300 "weak pointer still alive at %p -> %p",
301 w, w->key);
302 continue;
303 }
304 else {
305 last_w = &(w->link);
306 next_w = w->link;
307 continue;
308 }
309
310 default:
311 barf("tidyWeakList: not WEAK: %d, %p", info->type, w);
312 }
313 }
314
315 return flag;
316 }
317
318 static void tidyThreadList (generation *gen)
319 {
320 StgTSO *t, *tmp, *next, **prev;
321
322 prev = &gen->old_threads;
323
324 for (t = gen->old_threads; t != END_TSO_QUEUE; t = next) {
325
326 tmp = (StgTSO *)isAlive((StgClosure *)t);
327
328 if (tmp != NULL) {
329 t = tmp;
330 }
331
332 ASSERT(get_itbl((StgClosure *)t)->type == TSO);
333 next = t->global_link;
334
335 // if the thread is not masking exceptions but there are
336 // pending exceptions on its queue, then something has gone
337 // wrong. However, pending exceptions are OK if there is an
338 // FFI call.
339 ASSERT(t->blocked_exceptions == END_BLOCKED_EXCEPTIONS_QUEUE
340 || t->why_blocked == BlockedOnCCall
341 || t->why_blocked == BlockedOnCCall_Interruptible
342 || (t->flags & TSO_BLOCKEX));
343
344 if (tmp == NULL) {
345 // not alive (yet): leave this thread on the
346 // old_all_threads list.
347 prev = &(t->global_link);
348 }
349 else {
350 // alive
351 *prev = next;
352
353 // move this thread onto the correct threads list.
354 generation *new_gen;
355 new_gen = Bdescr((P_)t)->gen;
356 t->global_link = new_gen->threads;
357 new_gen->threads = t;
358 }
359 }
360 }
361
362 #ifdef DEBUG
363 static void checkWeakPtrSanity(StgWeak *hd, StgWeak *tl)
364 {
365 StgWeak *w, *prev;
366 for (w = hd; w != NULL; prev = w, w = w->link) {
367 ASSERT(INFO_PTR_TO_STRUCT(UNTAG_CLOSURE((StgClosure*)w)->header.info)->type == WEAK
368 || UNTAG_CLOSURE((StgClosure*)w)->header.info == &stg_DEAD_WEAK_info);
369 checkClosure((StgClosure*)w);
370 }
371 if (tl != NULL) {
372 ASSERT(prev == tl);
373 }
374 }
375 #endif
376
377 void collectFreshWeakPtrs()
378 {
379 uint32_t i;
380 generation *gen = &generations[0];
381 // move recently allocated weak_ptr_list to the old list as well
382 for (i = 0; i < n_capabilities; i++) {
383 Capability *cap = capabilities[i];
384 if (cap->weak_ptr_list_tl != NULL) {
385 IF_DEBUG(sanity, checkWeakPtrSanity(cap->weak_ptr_list_hd, cap->weak_ptr_list_tl));
386 cap->weak_ptr_list_tl->link = gen->weak_ptr_list;
387 gen->weak_ptr_list = cap->weak_ptr_list_hd;
388 cap->weak_ptr_list_tl = NULL;
389 cap->weak_ptr_list_hd = NULL;
390 } else {
391 ASSERT(cap->weak_ptr_list_hd == NULL);
392 }
393 }
394 }
395
396 /* -----------------------------------------------------------------------------
397 Evacuate every weak pointer object on the weak_ptr_list, and update
398 the link fields.
399 -------------------------------------------------------------------------- */
400
401 void
402 markWeakPtrList ( void )
403 {
404 uint32_t g;
405
406 for (g = 0; g <= N; g++) {
407 generation *gen = &generations[g];
408 StgWeak *w, **last_w;
409
410 last_w = &gen->weak_ptr_list;
411 for (w = gen->weak_ptr_list; w != NULL; w = w->link) {
412 // w might be WEAK, EVACUATED, or DEAD_WEAK (actually CON_STATIC) here
413
414 #ifdef DEBUG
415 { // careful to do this assertion only reading the info ptr
416 // once, because during parallel GC it might change under our feet.
417 const StgInfoTable *info;
418 info = w->header.info;
419 ASSERT(IS_FORWARDING_PTR(info)
420 || info == &stg_DEAD_WEAK_info
421 || INFO_PTR_TO_STRUCT(info)->type == WEAK);
422 }
423 #endif
424
425 evacuate((StgClosure **)last_w);
426 w = *last_w;
427 last_w = &(w->link);
428 }
429 }
430 }
431
432 /* -----------------------------------------------------------------------------
433 Fully scavenge a known-to-be-alive weak pointer.
434
435 In scavenge_block, we only partially scavenge a weak pointer because it may
436 turn out to be dead. This function should be called when we decide that the
437 weak pointer is alive after this GC.
438 -------------------------------------------------------------------------- */
439
440 void
441 scavengeLiveWeak(StgWeak *w)
442 {
443 evacuate(&w->value);
444 evacuate(&w->key);
445 evacuate(&w->finalizer);
446 evacuate(&w->cfinalizers);
447 }