remove EVACUATED: store the forwarding pointer in the info pointer
[ghc.git] / rts / sm / MarkWeak.c
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team 1998-2008
4 *
5 * Weak pointers and weak-like things in the GC
6 *
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
9 *
10 * http://hackage.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/GC
11 *
12 * ---------------------------------------------------------------------------*/
13
14 #include "Rts.h"
15 #include "Storage.h"
16 #include "MarkWeak.h"
17 #include "GC.h"
18 #include "GCThread.h"
19 #include "Evac.h"
20 #include "Trace.h"
21 #include "Schedule.h"
22
23 /* -----------------------------------------------------------------------------
24 Weak Pointers
25
26 traverse_weak_ptr_list is called possibly many times during garbage
27 collection. It returns a flag indicating whether it did any work
28 (i.e. called evacuate on any live pointers).
29
30 Invariant: traverse_weak_ptr_list is called when the heap is in an
31 idempotent state. That means that there are no pending
32 evacuate/scavenge operations. This invariant helps the weak
33 pointer code decide which weak pointers are dead - if there are no
34 new live weak pointers, then all the currently unreachable ones are
35 dead.
36
37 For generational GC: we just don't try to finalize weak pointers in
38 older generations than the one we're collecting. This could
39 probably be optimised by keeping per-generation lists of weak
40 pointers, but for a few weak pointers this scheme will work.
41
42 There are three distinct stages to processing weak pointers:
43
44 - weak_stage == WeakPtrs
45
46 We process all the weak pointers whos keys are alive (evacuate
47 their values and finalizers), and repeat until we can find no new
48 live keys. If no live keys are found in this pass, then we
49 evacuate the finalizers of all the dead weak pointers in order to
50 run them.
51
52 - weak_stage == WeakThreads
53
54 Now, we discover which *threads* are still alive. Pointers to
55 threads from the all_threads and main thread lists are the
56 weakest of all: a pointers from the finalizer of a dead weak
57 pointer can keep a thread alive. Any threads found to be unreachable
58 are evacuated and placed on the resurrected_threads list so we
59 can send them a signal later.
60
61 - weak_stage == WeakDone
62
63 No more evacuation is done.
64
65 -------------------------------------------------------------------------- */
66
67 /* Which stage of processing various kinds of weak pointer are we at?
68 * (see traverse_weak_ptr_list() below for discussion).
69 */
70 typedef enum { WeakPtrs, WeakThreads, WeakDone } WeakStage;
71 static WeakStage weak_stage;
72
73 /* Weak pointers
74 */
75 StgWeak *old_weak_ptr_list; // also pending finaliser list
76
77 // List of threads found to be unreachable
78 StgTSO *resurrected_threads;
79
80 // List of blocked threads found to have pending throwTos
81 StgTSO *exception_threads;
82
83 void
84 initWeakForGC(void)
85 {
86 old_weak_ptr_list = weak_ptr_list;
87 weak_ptr_list = NULL;
88 weak_stage = WeakPtrs;
89 resurrected_threads = END_TSO_QUEUE;
90 exception_threads = END_TSO_QUEUE;
91 }
92
93 rtsBool
94 traverseWeakPtrList(void)
95 {
96 StgWeak *w, **last_w, *next_w;
97 StgClosure *new;
98 rtsBool flag = rtsFalse;
99 const StgInfoTable *info;
100
101 switch (weak_stage) {
102
103 case WeakDone:
104 return rtsFalse;
105
106 case WeakPtrs:
107 /* doesn't matter where we evacuate values/finalizers to, since
108 * these pointers are treated as roots (iff the keys are alive).
109 */
110 gct->evac_step = 0;
111
112 last_w = &old_weak_ptr_list;
113 for (w = old_weak_ptr_list; w != NULL; w = next_w) {
114
115 /* There might be a DEAD_WEAK on the list if finalizeWeak# was
116 * called on a live weak pointer object. Just remove it.
117 */
118 if (w->header.info == &stg_DEAD_WEAK_info) {
119 next_w = ((StgDeadWeak *)w)->link;
120 *last_w = next_w;
121 continue;
122 }
123
124 info = w->header.info;
125 if (IS_FORWARDING_PTR(info)) {
126 next_w = (StgWeak *)UN_FORWARDING_PTR(info);
127 *last_w = next_w;
128 continue;
129 }
130
131 switch (INFO_PTR_TO_STRUCT(info)->type) {
132
133 case WEAK:
134 /* Now, check whether the key is reachable.
135 */
136 new = isAlive(w->key);
137 if (new != NULL) {
138 w->key = new;
139 // evacuate the value and finalizer
140 evacuate(&w->value);
141 evacuate(&w->finalizer);
142 // remove this weak ptr from the old_weak_ptr list
143 *last_w = w->link;
144 // and put it on the new weak ptr list
145 next_w = w->link;
146 w->link = weak_ptr_list;
147 weak_ptr_list = w;
148 flag = rtsTrue;
149
150 debugTrace(DEBUG_weak,
151 "weak pointer still alive at %p -> %p",
152 w, w->key);
153 continue;
154 }
155 else {
156 last_w = &(w->link);
157 next_w = w->link;
158 continue;
159 }
160
161 default:
162 barf("traverseWeakPtrList: not WEAK");
163 }
164 }
165
166 /* If we didn't make any changes, then we can go round and kill all
167 * the dead weak pointers. The old_weak_ptr list is used as a list
168 * of pending finalizers later on.
169 */
170 if (flag == rtsFalse) {
171 for (w = old_weak_ptr_list; w; w = w->link) {
172 evacuate(&w->finalizer);
173 }
174
175 // Next, move to the WeakThreads stage after fully
176 // scavenging the finalizers we've just evacuated.
177 weak_stage = WeakThreads;
178 }
179
180 return rtsTrue;
181
182 case WeakThreads:
183 /* Now deal with the all_threads list, which behaves somewhat like
184 * the weak ptr list. If we discover any threads that are about to
185 * become garbage, we wake them up and administer an exception.
186 */
187 {
188 StgTSO *t, *tmp, *next, **prev;
189 nat g, s;
190 step *stp;
191
192 // Traverse thread lists for generations we collected...
193 for (g = 0; g <= N; g++) {
194 for (s = 0; s < generations[g].n_steps; s++) {
195 stp = &generations[g].steps[s];
196
197 prev = &stp->old_threads;
198
199 for (t = stp->old_threads; t != END_TSO_QUEUE; t = next) {
200
201 tmp = (StgTSO *)isAlive((StgClosure *)t);
202
203 if (tmp != NULL) {
204 t = tmp;
205 }
206
207 ASSERT(get_itbl(t)->type == TSO);
208 switch (t->what_next) {
209 case ThreadRelocated:
210 next = t->_link;
211 *prev = next;
212 continue;
213 case ThreadKilled:
214 case ThreadComplete:
215 // finshed or died. The thread might still
216 // be alive, but we don't keep it on the
217 // all_threads list. Don't forget to
218 // stub out its global_link field.
219 next = t->global_link;
220 t->global_link = END_TSO_QUEUE;
221 *prev = next;
222 continue;
223 default:
224 ;
225 }
226
227 if (tmp == NULL) {
228 // not alive (yet): leave this thread on the
229 // old_all_threads list.
230 prev = &(t->global_link);
231 next = t->global_link;
232 }
233 else {
234 // alive
235 next = t->global_link;
236 *prev = next;
237
238 // This is a good place to check for blocked
239 // exceptions. It might be the case that a thread is
240 // blocked on delivering an exception to a thread that
241 // is also blocked - we try to ensure that this
242 // doesn't happen in throwTo(), but it's too hard (or
243 // impossible) to close all the race holes, so we
244 // accept that some might get through and deal with
245 // them here. A GC will always happen at some point,
246 // even if the system is otherwise deadlocked.
247 if (t->blocked_exceptions != END_TSO_QUEUE) {
248 t->global_link = exception_threads;
249 exception_threads = t;
250 } else {
251 // move this thread onto the correct threads list.
252 step *new_step;
253 new_step = Bdescr((P_)t)->step;
254 t->global_link = new_step->threads;
255 new_step->threads = t;
256 }
257 }
258 }
259 }
260 }
261 }
262
263 /* If we evacuated any threads, we need to go back to the scavenger.
264 */
265 if (flag) return rtsTrue;
266
267 /* And resurrect any threads which were about to become garbage.
268 */
269 {
270 nat g, s;
271 step *stp;
272 StgTSO *t, *tmp, *next;
273
274 for (g = 0; g <= N; g++) {
275 for (s = 0; s < generations[g].n_steps; s++) {
276 stp = &generations[g].steps[s];
277
278 for (t = stp->old_threads; t != END_TSO_QUEUE; t = next) {
279 next = t->global_link;
280 tmp = t;
281 evacuate((StgClosure **)&tmp);
282 tmp->global_link = resurrected_threads;
283 resurrected_threads = tmp;
284 }
285 }
286 }
287 }
288
289 /* Finally, we can update the blackhole_queue. This queue
290 * simply strings together TSOs blocked on black holes, it is
291 * not intended to keep anything alive. Hence, we do not follow
292 * pointers on the blackhole_queue until now, when we have
293 * determined which TSOs are otherwise reachable. We know at
294 * this point that all TSOs have been evacuated, however.
295 */
296 {
297 StgTSO **pt;
298 for (pt = &blackhole_queue; *pt != END_TSO_QUEUE; pt = &((*pt)->_link)) {
299 *pt = (StgTSO *)isAlive((StgClosure *)*pt);
300 ASSERT(*pt != NULL);
301 }
302 }
303
304 weak_stage = WeakDone; // *now* we're done,
305 return rtsTrue; // but one more round of scavenging, please
306
307 default:
308 barf("traverse_weak_ptr_list");
309 return rtsTrue;
310 }
311
312 }
313
314 /* -----------------------------------------------------------------------------
315 The blackhole queue
316
317 Threads on this list behave like weak pointers during the normal
318 phase of garbage collection: if the blackhole is reachable, then
319 the thread is reachable too.
320 -------------------------------------------------------------------------- */
321 rtsBool
322 traverseBlackholeQueue (void)
323 {
324 StgTSO *prev, *t, *tmp;
325 rtsBool flag;
326 nat type;
327
328 flag = rtsFalse;
329 prev = NULL;
330
331 for (t = blackhole_queue; t != END_TSO_QUEUE; prev=t, t = t->_link) {
332 // if the thread is not yet alive...
333 if (! (tmp = (StgTSO *)isAlive((StgClosure*)t))) {
334 // if the closure it is blocked on is either (a) a
335 // reachable BLAKCHOLE or (b) not a BLACKHOLE, then we
336 // make the thread alive.
337 if (!isAlive(t->block_info.closure)) {
338 type = get_itbl(t->block_info.closure)->type;
339 if (type == BLACKHOLE || type == CAF_BLACKHOLE) {
340 continue;
341 }
342 }
343 evacuate((StgClosure **)&t);
344 if (prev) prev->_link = t;
345 // no write barrier when on the blackhole queue,
346 // because we traverse the whole queue on every GC.
347 flag = rtsTrue;
348 }
349 }
350 return flag;
351 }
352
353 /* -----------------------------------------------------------------------------
354 After GC, the live weak pointer list may have forwarding pointers
355 on it, because a weak pointer object was evacuated after being
356 moved to the live weak pointer list. We remove those forwarding
357 pointers here.
358
359 Also, we don't consider weak pointer objects to be reachable, but
360 we must nevertheless consider them to be "live" and retain them.
361 Therefore any weak pointer objects which haven't as yet been
362 evacuated need to be evacuated now.
363 -------------------------------------------------------------------------- */
364
365 void
366 markWeakPtrList ( void )
367 {
368 StgWeak *w, **last_w, *tmp;
369
370 last_w = &weak_ptr_list;
371 for (w = weak_ptr_list; w; w = w->link) {
372 // w might be WEAK, EVACUATED, or DEAD_WEAK (actually CON_STATIC) here
373 ASSERT(IS_FORWARDING_PTR(w->header.info)
374 || w->header.info == &stg_DEAD_WEAK_info
375 || get_itbl(w)->type == WEAK);
376 tmp = w;
377 evacuate((StgClosure **)&tmp);
378 *last_w = w;
379 last_w = &(w->link);
380 }
381 }
382