Don't look at all the threads before each GC.
[ghc.git] / rts / sm / MarkWeak.c
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team 1998-2008
4 *
5 * Weak pointers and weak-like things in the GC
6 *
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
9 *
10 * http://hackage.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/GC
11 *
12 * ---------------------------------------------------------------------------*/
13
14 #include "Rts.h"
15 #include "Storage.h"
16 #include "MarkWeak.h"
17 #include "GC.h"
18 #include "GCThread.h"
19 #include "Evac.h"
20 #include "Trace.h"
21 #include "Schedule.h"
22
23 /* -----------------------------------------------------------------------------
24 Weak Pointers
25
26 traverse_weak_ptr_list is called possibly many times during garbage
27 collection. It returns a flag indicating whether it did any work
28 (i.e. called evacuate on any live pointers).
29
30 Invariant: traverse_weak_ptr_list is called when the heap is in an
31 idempotent state. That means that there are no pending
32 evacuate/scavenge operations. This invariant helps the weak
33 pointer code decide which weak pointers are dead - if there are no
34 new live weak pointers, then all the currently unreachable ones are
35 dead.
36
37 For generational GC: we just don't try to finalize weak pointers in
38 older generations than the one we're collecting. This could
39 probably be optimised by keeping per-generation lists of weak
40 pointers, but for a few weak pointers this scheme will work.
41
42 There are three distinct stages to processing weak pointers:
43
44 - weak_stage == WeakPtrs
45
46 We process all the weak pointers whos keys are alive (evacuate
47 their values and finalizers), and repeat until we can find no new
48 live keys. If no live keys are found in this pass, then we
49 evacuate the finalizers of all the dead weak pointers in order to
50 run them.
51
52 - weak_stage == WeakThreads
53
54 Now, we discover which *threads* are still alive. Pointers to
55 threads from the all_threads and main thread lists are the
56 weakest of all: a pointers from the finalizer of a dead weak
57 pointer can keep a thread alive. Any threads found to be unreachable
58 are evacuated and placed on the resurrected_threads list so we
59 can send them a signal later.
60
61 - weak_stage == WeakDone
62
63 No more evacuation is done.
64
65 -------------------------------------------------------------------------- */
66
67 /* Which stage of processing various kinds of weak pointer are we at?
68 * (see traverse_weak_ptr_list() below for discussion).
69 */
70 typedef enum { WeakPtrs, WeakThreads, WeakDone } WeakStage;
71 static WeakStage weak_stage;
72
73 /* Weak pointers
74 */
75 StgWeak *old_weak_ptr_list; // also pending finaliser list
76
77 // List of threads found to be unreachable
78 StgTSO *resurrected_threads;
79
80 // List of blocked threads found to have pending throwTos
81 StgTSO *exception_threads;
82
83 void
84 initWeakForGC(void)
85 {
86 old_weak_ptr_list = weak_ptr_list;
87 weak_ptr_list = NULL;
88 weak_stage = WeakPtrs;
89 resurrected_threads = END_TSO_QUEUE;
90 exception_threads = END_TSO_QUEUE;
91 }
92
93 rtsBool
94 traverseWeakPtrList(void)
95 {
96 StgWeak *w, **last_w, *next_w;
97 StgClosure *new;
98 rtsBool flag = rtsFalse;
99
100 switch (weak_stage) {
101
102 case WeakDone:
103 return rtsFalse;
104
105 case WeakPtrs:
106 /* doesn't matter where we evacuate values/finalizers to, since
107 * these pointers are treated as roots (iff the keys are alive).
108 */
109 gct->evac_step = 0;
110
111 last_w = &old_weak_ptr_list;
112 for (w = old_weak_ptr_list; w != NULL; w = next_w) {
113
114 /* There might be a DEAD_WEAK on the list if finalizeWeak# was
115 * called on a live weak pointer object. Just remove it.
116 */
117 if (w->header.info == &stg_DEAD_WEAK_info) {
118 next_w = ((StgDeadWeak *)w)->link;
119 *last_w = next_w;
120 continue;
121 }
122
123 switch (get_itbl(w)->type) {
124
125 case EVACUATED:
126 next_w = (StgWeak *)((StgEvacuated *)w)->evacuee;
127 *last_w = next_w;
128 continue;
129
130 case WEAK:
131 /* Now, check whether the key is reachable.
132 */
133 new = isAlive(w->key);
134 if (new != NULL) {
135 w->key = new;
136 // evacuate the value and finalizer
137 evacuate(&w->value);
138 evacuate(&w->finalizer);
139 // remove this weak ptr from the old_weak_ptr list
140 *last_w = w->link;
141 // and put it on the new weak ptr list
142 next_w = w->link;
143 w->link = weak_ptr_list;
144 weak_ptr_list = w;
145 flag = rtsTrue;
146
147 debugTrace(DEBUG_weak,
148 "weak pointer still alive at %p -> %p",
149 w, w->key);
150 continue;
151 }
152 else {
153 last_w = &(w->link);
154 next_w = w->link;
155 continue;
156 }
157
158 default:
159 barf("traverseWeakPtrList: not WEAK");
160 }
161 }
162
163 /* If we didn't make any changes, then we can go round and kill all
164 * the dead weak pointers. The old_weak_ptr list is used as a list
165 * of pending finalizers later on.
166 */
167 if (flag == rtsFalse) {
168 for (w = old_weak_ptr_list; w; w = w->link) {
169 evacuate(&w->finalizer);
170 }
171
172 // Next, move to the WeakThreads stage after fully
173 // scavenging the finalizers we've just evacuated.
174 weak_stage = WeakThreads;
175 }
176
177 return rtsTrue;
178
179 case WeakThreads:
180 /* Now deal with the all_threads list, which behaves somewhat like
181 * the weak ptr list. If we discover any threads that are about to
182 * become garbage, we wake them up and administer an exception.
183 */
184 {
185 StgTSO *t, *tmp, *next, **prev;
186 nat g, s;
187 step *stp;
188
189 // Traverse thread lists for generations we collected...
190 for (g = 0; g <= N; g++) {
191 for (s = 0; s < generations[g].n_steps; s++) {
192 stp = &generations[g].steps[s];
193
194 prev = &stp->old_threads;
195
196 for (t = stp->old_threads; t != END_TSO_QUEUE; t = next) {
197
198 tmp = (StgTSO *)isAlive((StgClosure *)t);
199
200 if (tmp != NULL) {
201 t = tmp;
202 }
203
204 ASSERT(get_itbl(t)->type == TSO);
205 switch (t->what_next) {
206 case ThreadRelocated:
207 next = t->_link;
208 *prev = next;
209 continue;
210 case ThreadKilled:
211 case ThreadComplete:
212 // finshed or died. The thread might still
213 // be alive, but we don't keep it on the
214 // all_threads list. Don't forget to
215 // stub out its global_link field.
216 next = t->global_link;
217 t->global_link = END_TSO_QUEUE;
218 *prev = next;
219 continue;
220 default:
221 ;
222 }
223
224 if (tmp == NULL) {
225 // not alive (yet): leave this thread on the
226 // old_all_threads list.
227 prev = &(t->global_link);
228 next = t->global_link;
229 }
230 else {
231 // alive
232 next = t->global_link;
233 *prev = next;
234
235 // This is a good place to check for blocked
236 // exceptions. It might be the case that a thread is
237 // blocked on delivering an exception to a thread that
238 // is also blocked - we try to ensure that this
239 // doesn't happen in throwTo(), but it's too hard (or
240 // impossible) to close all the race holes, so we
241 // accept that some might get through and deal with
242 // them here. A GC will always happen at some point,
243 // even if the system is otherwise deadlocked.
244 if (t->blocked_exceptions != END_TSO_QUEUE) {
245 t->global_link = exception_threads;
246 exception_threads = t;
247 } else {
248 // move this thread onto the correct threads list.
249 step *new_step;
250 new_step = Bdescr((P_)t)->step;
251 t->global_link = new_step->threads;
252 new_step->threads = t;
253 }
254 }
255 }
256 }
257 }
258 }
259
260 /* If we evacuated any threads, we need to go back to the scavenger.
261 */
262 if (flag) return rtsTrue;
263
264 /* And resurrect any threads which were about to become garbage.
265 */
266 {
267 nat g, s;
268 step *stp;
269 StgTSO *t, *tmp, *next;
270
271 for (g = 0; g <= N; g++) {
272 for (s = 0; s < generations[g].n_steps; s++) {
273 stp = &generations[g].steps[s];
274
275 for (t = stp->old_threads; t != END_TSO_QUEUE; t = next) {
276 next = t->global_link;
277 tmp = t;
278 evacuate((StgClosure **)&tmp);
279 tmp->global_link = resurrected_threads;
280 resurrected_threads = tmp;
281 }
282 }
283 }
284 }
285
286 /* Finally, we can update the blackhole_queue. This queue
287 * simply strings together TSOs blocked on black holes, it is
288 * not intended to keep anything alive. Hence, we do not follow
289 * pointers on the blackhole_queue until now, when we have
290 * determined which TSOs are otherwise reachable. We know at
291 * this point that all TSOs have been evacuated, however.
292 */
293 {
294 StgTSO **pt;
295 for (pt = &blackhole_queue; *pt != END_TSO_QUEUE; pt = &((*pt)->_link)) {
296 *pt = (StgTSO *)isAlive((StgClosure *)*pt);
297 ASSERT(*pt != NULL);
298 }
299 }
300
301 weak_stage = WeakDone; // *now* we're done,
302 return rtsTrue; // but one more round of scavenging, please
303
304 default:
305 barf("traverse_weak_ptr_list");
306 return rtsTrue;
307 }
308
309 }
310
311 /* -----------------------------------------------------------------------------
312 The blackhole queue
313
314 Threads on this list behave like weak pointers during the normal
315 phase of garbage collection: if the blackhole is reachable, then
316 the thread is reachable too.
317 -------------------------------------------------------------------------- */
318 rtsBool
319 traverseBlackholeQueue (void)
320 {
321 StgTSO *prev, *t, *tmp;
322 rtsBool flag;
323 nat type;
324
325 flag = rtsFalse;
326 prev = NULL;
327
328 for (t = blackhole_queue; t != END_TSO_QUEUE; prev=t, t = t->_link) {
329 // if the thread is not yet alive...
330 if (! (tmp = (StgTSO *)isAlive((StgClosure*)t))) {
331 // if the closure it is blocked on is either (a) a
332 // reachable BLAKCHOLE or (b) not a BLACKHOLE, then we
333 // make the thread alive.
334 if (!isAlive(t->block_info.closure)) {
335 type = get_itbl(t->block_info.closure)->type;
336 if (type == BLACKHOLE || type == CAF_BLACKHOLE) {
337 continue;
338 }
339 }
340 evacuate((StgClosure **)&t);
341 if (prev) prev->_link = t;
342 // no write barrier when on the blackhole queue,
343 // because we traverse the whole queue on every GC.
344 flag = rtsTrue;
345 }
346 }
347 return flag;
348 }
349
350 /* -----------------------------------------------------------------------------
351 After GC, the live weak pointer list may have forwarding pointers
352 on it, because a weak pointer object was evacuated after being
353 moved to the live weak pointer list. We remove those forwarding
354 pointers here.
355
356 Also, we don't consider weak pointer objects to be reachable, but
357 we must nevertheless consider them to be "live" and retain them.
358 Therefore any weak pointer objects which haven't as yet been
359 evacuated need to be evacuated now.
360 -------------------------------------------------------------------------- */
361
362 void
363 markWeakPtrList ( void )
364 {
365 StgWeak *w, **last_w, *tmp;
366
367 last_w = &weak_ptr_list;
368 for (w = weak_ptr_list; w; w = w->link) {
369 // w might be WEAK, EVACUATED, or DEAD_WEAK (actually CON_STATIC) here
370 ASSERT(w->header.info == &stg_DEAD_WEAK_info
371 || get_itbl(w)->type == WEAK || get_itbl(w)->type == EVACUATED);
372 tmp = w;
373 evacuate((StgClosure **)&tmp);
374 *last_w = w;
375 last_w = &(w->link);
376 }
377 }
378