9a32198c99b19c26e8890eec4ff46aa3106bbb16
[ghc.git] / rts / sm / MarkWeak.c
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team 1998-2008
4 *
5 * Weak pointers and weak-like things in the GC
6 *
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
9 *
10 * http://ghc.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/GC
11 *
12 * ---------------------------------------------------------------------------*/
13
14 #include "PosixSource.h"
15 #include "Rts.h"
16
17 #include "MarkWeak.h"
18 #include "GC.h"
19 #include "GCThread.h"
20 #include "GCTDecl.h"
21 #include "Evac.h"
22 #include "Trace.h"
23 #include "Schedule.h"
24 #include "Weak.h"
25 #include "Storage.h"
26 #include "Threads.h"
27
28 #include "sm/Sanity.h"
29
30 /* -----------------------------------------------------------------------------
31 Weak Pointers
32
33 traverse_weak_ptr_list is called possibly many times during garbage
34 collection. It returns a flag indicating whether it did any work
35 (i.e. called evacuate on any live pointers).
36
37 Invariant: traverse_weak_ptr_list is called when the heap is in an
38 idempotent state. That means that there are no pending
39 evacuate/scavenge operations. This invariant helps the weak
40 pointer code decide which weak pointers are dead - if there are no
41 new live weak pointers, then all the currently unreachable ones are
42 dead.
43
44 For generational GC: we don't try to finalize weak pointers in
45 older generations than the one we're collecting.
46
47 There are three distinct stages to processing weak pointers:
48
49 - weak_stage == WeakPtrs
50
51 We process all the weak pointers whos keys are alive (evacuate
52 their values and finalizers), and repeat until we can find no new
53 live keys. If no live keys are found in this pass, then we
54 evacuate the finalizers of all the dead weak pointers in order to
55 run them.
56
57 - weak_stage == WeakThreads
58
59 Now, we discover which *threads* are still alive. Pointers to
60 threads from the all_threads and main thread lists are the
61 weakest of all: a pointers from the finalizer of a dead weak
62 pointer can keep a thread alive. Any threads found to be unreachable
63 are evacuated and placed on the resurrected_threads list so we
64 can send them a signal later.
65
66 - weak_stage == WeakDone
67
68 No more evacuation is done.
69
70 -------------------------------------------------------------------------- */
71
72 /* Which stage of processing various kinds of weak pointer are we at?
73 * (see traverse_weak_ptr_list() below for discussion).
74 */
75 typedef enum { WeakPtrs, WeakThreads, WeakDone } WeakStage;
76 static WeakStage weak_stage;
77
78 // List of weak pointers whose key is dead
79 StgWeak *dead_weak_ptr_list;
80
81 // List of threads found to be unreachable
82 StgTSO *resurrected_threads;
83
84 static void collectDeadWeakPtrs (generation *gen);
85 static rtsBool tidyWeakList (generation *gen);
86 static rtsBool resurrectUnreachableThreads (generation *gen);
87 static void tidyThreadList (generation *gen);
88
89 void
90 initWeakForGC(void)
91 {
92 nat g;
93
94 for (g = 0; g <= N; g++) {
95 generation *gen = &generations[g];
96 gen->old_weak_ptr_list = gen->weak_ptr_list;
97 gen->weak_ptr_list = NULL;
98 }
99
100 weak_stage = WeakThreads;
101 dead_weak_ptr_list = NULL;
102 resurrected_threads = END_TSO_QUEUE;
103 }
104
105 rtsBool
106 traverseWeakPtrList(void)
107 {
108 rtsBool flag = rtsFalse;
109
110 switch (weak_stage) {
111
112 case WeakDone:
113 return rtsFalse;
114
115 case WeakThreads:
116 /* Now deal with the gen->threads lists, which behave somewhat like
117 * the weak ptr list. If we discover any threads that are about to
118 * become garbage, we wake them up and administer an exception.
119 */
120 {
121 nat g;
122
123 for (g = 0; g <= N; g++) {
124 tidyThreadList(&generations[g]);
125 }
126
127 // Use weak pointer relationships (value is reachable if
128 // key is reachable):
129 for (g = 0; g <= N; g++) {
130 if (tidyWeakList(&generations[g])) {
131 flag = rtsTrue;
132 }
133 }
134
135 // if we evacuated anything new, we must scavenge thoroughly
136 // before we can determine which threads are unreachable.
137 if (flag) return rtsTrue;
138
139 // Resurrect any threads which were unreachable
140 for (g = 0; g <= N; g++) {
141 if (resurrectUnreachableThreads(&generations[g])) {
142 flag = rtsTrue;
143 }
144 }
145
146 // Next, move to the WeakPtrs stage after fully
147 // scavenging the finalizers we've just evacuated.
148 weak_stage = WeakPtrs;
149
150 // if we evacuated anything new, we must scavenge thoroughly
151 // before entering the WeakPtrs stage.
152 if (flag) return rtsTrue;
153
154 // otherwise, fall through...
155 }
156
157 case WeakPtrs:
158 {
159 nat g;
160
161 // resurrecting threads might have made more weak pointers
162 // alive, so traverse those lists again:
163 for (g = 0; g <= N; g++) {
164 if (tidyWeakList(&generations[g])) {
165 flag = rtsTrue;
166 }
167 }
168
169 /* If we didn't make any changes, then we can go round and kill all
170 * the dead weak pointers. The dead_weak_ptr list is used as a list
171 * of pending finalizers later on.
172 */
173 if (flag == rtsFalse) {
174 for (g = 0; g <= N; g++) {
175 collectDeadWeakPtrs(&generations[g]);
176 }
177
178 weak_stage = WeakDone; // *now* we're done,
179 }
180
181 return rtsTrue; // but one more round of scavenging, please
182 }
183
184 default:
185 barf("traverse_weak_ptr_list");
186 return rtsTrue;
187 }
188 }
189
190 static void collectDeadWeakPtrs (generation *gen)
191 {
192 StgWeak *w, *next_w;
193 for (w = gen->old_weak_ptr_list; w != NULL; w = next_w) {
194 // If we have C finalizers, keep the value alive for this GC.
195 // See Note [MallocPtr finalizers] in GHC.ForeignPtr, and #10904
196 if (w->cfinalizers != &stg_NO_FINALIZER_closure) {
197 evacuate(&w->value);
198 }
199 evacuate(&w->finalizer);
200 next_w = w->link;
201 w->link = dead_weak_ptr_list;
202 dead_weak_ptr_list = w;
203 }
204 }
205
206 static rtsBool resurrectUnreachableThreads (generation *gen)
207 {
208 StgTSO *t, *tmp, *next;
209 rtsBool flag = rtsFalse;
210
211 for (t = gen->old_threads; t != END_TSO_QUEUE; t = next) {
212 next = t->global_link;
213
214 // ThreadFinished and ThreadComplete: we have to keep
215 // these on the all_threads list until they
216 // become garbage, because they might get
217 // pending exceptions.
218 switch (t->what_next) {
219 case ThreadKilled:
220 case ThreadComplete:
221 continue;
222 default:
223 tmp = t;
224 evacuate((StgClosure **)&tmp);
225 tmp->global_link = resurrected_threads;
226 resurrected_threads = tmp;
227 flag = rtsTrue;
228 }
229 }
230 return flag;
231 }
232
233 static rtsBool tidyWeakList(generation *gen)
234 {
235 StgWeak *w, **last_w, *next_w;
236 const StgInfoTable *info;
237 StgClosure *new;
238 rtsBool flag = rtsFalse;
239 last_w = &gen->old_weak_ptr_list;
240 for (w = gen->old_weak_ptr_list; w != NULL; w = next_w) {
241
242 /* There might be a DEAD_WEAK on the list if finalizeWeak# was
243 * called on a live weak pointer object. Just remove it.
244 */
245 if (w->header.info == &stg_DEAD_WEAK_info) {
246 next_w = w->link;
247 *last_w = next_w;
248 continue;
249 }
250
251 info = get_itbl((StgClosure *)w);
252 switch (info->type) {
253
254 case WEAK:
255 /* Now, check whether the key is reachable.
256 */
257 new = isAlive(w->key);
258 if (new != NULL) {
259 generation *new_gen;
260
261 w->key = new;
262
263 // Find out which generation this weak ptr is in, and
264 // move it onto the weak ptr list of that generation.
265
266 new_gen = Bdescr((P_)w)->gen;
267 gct->evac_gen_no = new_gen->no;
268
269 // evacuate the value and finalizer
270 evacuate(&w->value);
271 evacuate(&w->finalizer);
272 // remove this weak ptr from the old_weak_ptr list
273 *last_w = w->link;
274 next_w = w->link;
275
276 // and put it on the correct weak ptr list.
277 w->link = new_gen->weak_ptr_list;
278 new_gen->weak_ptr_list = w;
279 flag = rtsTrue;
280
281 if (gen->no != new_gen->no) {
282 debugTrace(DEBUG_weak,
283 "moving weak pointer %p from %d to %d",
284 w, gen->no, new_gen->no);
285 }
286
287
288 debugTrace(DEBUG_weak,
289 "weak pointer still alive at %p -> %p",
290 w, w->key);
291 continue;
292 }
293 else {
294 last_w = &(w->link);
295 next_w = w->link;
296 continue;
297 }
298
299 default:
300 barf("tidyWeakList: not WEAK: %d, %p", info->type, w);
301 }
302 }
303
304 return flag;
305 }
306
307 static void tidyThreadList (generation *gen)
308 {
309 StgTSO *t, *tmp, *next, **prev;
310
311 prev = &gen->old_threads;
312
313 for (t = gen->old_threads; t != END_TSO_QUEUE; t = next) {
314
315 tmp = (StgTSO *)isAlive((StgClosure *)t);
316
317 if (tmp != NULL) {
318 t = tmp;
319 }
320
321 ASSERT(get_itbl((StgClosure *)t)->type == TSO);
322 next = t->global_link;
323
324 // if the thread is not masking exceptions but there are
325 // pending exceptions on its queue, then something has gone
326 // wrong. However, pending exceptions are OK if there is an
327 // FFI call.
328 ASSERT(t->blocked_exceptions == END_BLOCKED_EXCEPTIONS_QUEUE
329 || t->why_blocked == BlockedOnCCall
330 || t->why_blocked == BlockedOnCCall_Interruptible
331 || (t->flags & TSO_BLOCKEX));
332
333 if (tmp == NULL) {
334 // not alive (yet): leave this thread on the
335 // old_all_threads list.
336 prev = &(t->global_link);
337 }
338 else {
339 // alive
340 *prev = next;
341
342 // move this thread onto the correct threads list.
343 generation *new_gen;
344 new_gen = Bdescr((P_)t)->gen;
345 t->global_link = new_gen->threads;
346 new_gen->threads = t;
347 }
348 }
349 }
350
351 #ifdef DEBUG
352 static void checkWeakPtrSanity(StgWeak *hd, StgWeak *tl)
353 {
354 StgWeak *w, *prev;
355 for (w = hd; w != NULL; prev = w, w = w->link) {
356 ASSERT(INFO_PTR_TO_STRUCT(UNTAG_CLOSURE((StgClosure*)w)->header.info)->type == WEAK
357 || UNTAG_CLOSURE((StgClosure*)w)->header.info == &stg_DEAD_WEAK_info);
358 checkClosure((StgClosure*)w);
359 }
360 if (tl != NULL) {
361 ASSERT(prev == tl);
362 }
363 }
364 #endif
365
366 void collectFreshWeakPtrs()
367 {
368 nat i;
369 generation *gen = &generations[0];
370 // move recently allocated weak_ptr_list to the old list as well
371 for (i = 0; i < n_capabilities; i++) {
372 Capability *cap = capabilities[i];
373 if (cap->weak_ptr_list_tl != NULL) {
374 IF_DEBUG(sanity, checkWeakPtrSanity(cap->weak_ptr_list_hd, cap->weak_ptr_list_tl));
375 cap->weak_ptr_list_tl->link = gen->weak_ptr_list;
376 gen->weak_ptr_list = cap->weak_ptr_list_hd;
377 cap->weak_ptr_list_tl = NULL;
378 cap->weak_ptr_list_hd = NULL;
379 } else {
380 ASSERT(cap->weak_ptr_list_hd == NULL);
381 }
382 }
383 }
384
385 /* -----------------------------------------------------------------------------
386 Evacuate every weak pointer object on the weak_ptr_list, and update
387 the link fields.
388 -------------------------------------------------------------------------- */
389
390 void
391 markWeakPtrList ( void )
392 {
393 nat g;
394
395 for (g = 0; g <= N; g++) {
396 generation *gen = &generations[g];
397 StgWeak *w, **last_w;
398
399 last_w = &gen->weak_ptr_list;
400 for (w = gen->weak_ptr_list; w != NULL; w = w->link) {
401 // w might be WEAK, EVACUATED, or DEAD_WEAK (actually CON_STATIC) here
402
403 #ifdef DEBUG
404 { // careful to do this assertion only reading the info ptr
405 // once, because during parallel GC it might change under our feet.
406 const StgInfoTable *info;
407 info = w->header.info;
408 ASSERT(IS_FORWARDING_PTR(info)
409 || info == &stg_DEAD_WEAK_info
410 || INFO_PTR_TO_STRUCT(info)->type == WEAK);
411 }
412 #endif
413
414 evacuate((StgClosure **)last_w);
415 w = *last_w;
416 if (w->header.info == &stg_DEAD_WEAK_info) {
417 last_w = &(w->link);
418 } else {
419 last_w = &(w->link);
420 }
421 }
422 }
423 }
424