Don't traverse the entire list of threads on every GC (phase 1)
[ghc.git] / rts / sm / MarkWeak.c
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team 1998-2008
4 *
5 * Weak pointers and weak-like things in the GC
6 *
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
9 *
10 * http://hackage.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/GC
11 *
12 * ---------------------------------------------------------------------------*/
13
14 #include "Rts.h"
15 #include "Storage.h"
16 #include "MarkWeak.h"
17 #include "GC.h"
18 #include "GCThread.h"
19 #include "Evac.h"
20 #include "Trace.h"
21 #include "Schedule.h"
22
23 /* -----------------------------------------------------------------------------
24 Weak Pointers
25
26 traverse_weak_ptr_list is called possibly many times during garbage
27 collection. It returns a flag indicating whether it did any work
28 (i.e. called evacuate on any live pointers).
29
30 Invariant: traverse_weak_ptr_list is called when the heap is in an
31 idempotent state. That means that there are no pending
32 evacuate/scavenge operations. This invariant helps the weak
33 pointer code decide which weak pointers are dead - if there are no
34 new live weak pointers, then all the currently unreachable ones are
35 dead.
36
37 For generational GC: we just don't try to finalize weak pointers in
38 older generations than the one we're collecting. This could
39 probably be optimised by keeping per-generation lists of weak
40 pointers, but for a few weak pointers this scheme will work.
41
42 There are three distinct stages to processing weak pointers:
43
44 - weak_stage == WeakPtrs
45
46 We process all the weak pointers whos keys are alive (evacuate
47 their values and finalizers), and repeat until we can find no new
48 live keys. If no live keys are found in this pass, then we
49 evacuate the finalizers of all the dead weak pointers in order to
50 run them.
51
52 - weak_stage == WeakThreads
53
54 Now, we discover which *threads* are still alive. Pointers to
55 threads from the all_threads and main thread lists are the
56 weakest of all: a pointers from the finalizer of a dead weak
57 pointer can keep a thread alive. Any threads found to be unreachable
58 are evacuated and placed on the resurrected_threads list so we
59 can send them a signal later.
60
61 - weak_stage == WeakDone
62
63 No more evacuation is done.
64
65 -------------------------------------------------------------------------- */
66
67 /* Which stage of processing various kinds of weak pointer are we at?
68 * (see traverse_weak_ptr_list() below for discussion).
69 */
70 typedef enum { WeakPtrs, WeakThreads, WeakDone } WeakStage;
71 static WeakStage weak_stage;
72
73 /* Weak pointers
74 */
75 StgWeak *old_weak_ptr_list; // also pending finaliser list
76
77 /* List of all threads during GC
78 */
79 StgTSO *resurrected_threads;
80
81 void
82 initWeakForGC(void)
83 {
84 old_weak_ptr_list = weak_ptr_list;
85 weak_ptr_list = NULL;
86 weak_stage = WeakPtrs;
87 resurrected_threads = END_TSO_QUEUE;
88 }
89
90 rtsBool
91 traverseWeakPtrList(void)
92 {
93 StgWeak *w, **last_w, *next_w;
94 StgClosure *new;
95 rtsBool flag = rtsFalse;
96
97 switch (weak_stage) {
98
99 case WeakDone:
100 return rtsFalse;
101
102 case WeakPtrs:
103 /* doesn't matter where we evacuate values/finalizers to, since
104 * these pointers are treated as roots (iff the keys are alive).
105 */
106 gct->evac_step = 0;
107
108 last_w = &old_weak_ptr_list;
109 for (w = old_weak_ptr_list; w != NULL; w = next_w) {
110
111 /* There might be a DEAD_WEAK on the list if finalizeWeak# was
112 * called on a live weak pointer object. Just remove it.
113 */
114 if (w->header.info == &stg_DEAD_WEAK_info) {
115 next_w = ((StgDeadWeak *)w)->link;
116 *last_w = next_w;
117 continue;
118 }
119
120 switch (get_itbl(w)->type) {
121
122 case EVACUATED:
123 next_w = (StgWeak *)((StgEvacuated *)w)->evacuee;
124 *last_w = next_w;
125 continue;
126
127 case WEAK:
128 /* Now, check whether the key is reachable.
129 */
130 new = isAlive(w->key);
131 if (new != NULL) {
132 w->key = new;
133 // evacuate the value and finalizer
134 evacuate(&w->value);
135 evacuate(&w->finalizer);
136 // remove this weak ptr from the old_weak_ptr list
137 *last_w = w->link;
138 // and put it on the new weak ptr list
139 next_w = w->link;
140 w->link = weak_ptr_list;
141 weak_ptr_list = w;
142 flag = rtsTrue;
143
144 debugTrace(DEBUG_weak,
145 "weak pointer still alive at %p -> %p",
146 w, w->key);
147 continue;
148 }
149 else {
150 last_w = &(w->link);
151 next_w = w->link;
152 continue;
153 }
154
155 default:
156 barf("traverseWeakPtrList: not WEAK");
157 }
158 }
159
160 /* If we didn't make any changes, then we can go round and kill all
161 * the dead weak pointers. The old_weak_ptr list is used as a list
162 * of pending finalizers later on.
163 */
164 if (flag == rtsFalse) {
165 for (w = old_weak_ptr_list; w; w = w->link) {
166 evacuate(&w->finalizer);
167 }
168
169 // Next, move to the WeakThreads stage after fully
170 // scavenging the finalizers we've just evacuated.
171 weak_stage = WeakThreads;
172 }
173
174 return rtsTrue;
175
176 case WeakThreads:
177 /* Now deal with the all_threads list, which behaves somewhat like
178 * the weak ptr list. If we discover any threads that are about to
179 * become garbage, we wake them up and administer an exception.
180 */
181 {
182 StgTSO *t, *tmp, *next, **prev;
183 nat g, s;
184 step *stp;
185
186 // Traverse thread lists for generations we collected...
187 for (g = 0; g <= N; g++) {
188 for (s = 0; s < generations[g].n_steps; s++) {
189 stp = &generations[g].steps[s];
190
191 prev = &stp->old_threads;
192
193 for (t = stp->old_threads; t != END_TSO_QUEUE; t = next) {
194
195 tmp = (StgTSO *)isAlive((StgClosure *)t);
196
197 if (tmp != NULL) {
198 t = tmp;
199 }
200
201 ASSERT(get_itbl(t)->type == TSO);
202 switch (t->what_next) {
203 case ThreadRelocated:
204 next = t->_link;
205 *prev = next;
206 continue;
207 case ThreadKilled:
208 case ThreadComplete:
209 // finshed or died. The thread might still
210 // be alive, but we don't keep it on the
211 // all_threads list. Don't forget to
212 // stub out its global_link field.
213 next = t->global_link;
214 t->global_link = END_TSO_QUEUE;
215 *prev = next;
216 continue;
217 default:
218 ;
219 }
220
221 if (tmp == NULL) {
222 // not alive (yet): leave this thread on the
223 // old_all_threads list.
224 prev = &(t->global_link);
225 next = t->global_link;
226 }
227 else {
228 step *new_step;
229 // alive: move this thread onto the correct
230 // threads list.
231 next = t->global_link;
232 new_step = Bdescr((P_)t)->step;
233 t->global_link = new_step->threads;
234 new_step->threads = t;
235 *prev = next;
236 }
237 }
238 }
239 }
240 }
241
242 /* If we evacuated any threads, we need to go back to the scavenger.
243 */
244 if (flag) return rtsTrue;
245
246 /* And resurrect any threads which were about to become garbage.
247 */
248 {
249 nat g, s;
250 step *stp;
251 StgTSO *t, *tmp, *next;
252
253 for (g = 0; g <= N; g++) {
254 for (s = 0; s < generations[g].n_steps; s++) {
255 stp = &generations[g].steps[s];
256
257 for (t = stp->old_threads; t != END_TSO_QUEUE; t = next) {
258 next = t->global_link;
259 tmp = t;
260 evacuate((StgClosure **)&tmp);
261 tmp->global_link = resurrected_threads;
262 resurrected_threads = tmp;
263 }
264 }
265 }
266 }
267
268 /* Finally, we can update the blackhole_queue. This queue
269 * simply strings together TSOs blocked on black holes, it is
270 * not intended to keep anything alive. Hence, we do not follow
271 * pointers on the blackhole_queue until now, when we have
272 * determined which TSOs are otherwise reachable. We know at
273 * this point that all TSOs have been evacuated, however.
274 */
275 {
276 StgTSO **pt;
277 for (pt = &blackhole_queue; *pt != END_TSO_QUEUE; pt = &((*pt)->_link)) {
278 *pt = (StgTSO *)isAlive((StgClosure *)*pt);
279 ASSERT(*pt != NULL);
280 }
281 }
282
283 weak_stage = WeakDone; // *now* we're done,
284 return rtsTrue; // but one more round of scavenging, please
285
286 default:
287 barf("traverse_weak_ptr_list");
288 return rtsTrue;
289 }
290
291 }
292
293 /* -----------------------------------------------------------------------------
294 The blackhole queue
295
296 Threads on this list behave like weak pointers during the normal
297 phase of garbage collection: if the blackhole is reachable, then
298 the thread is reachable too.
299 -------------------------------------------------------------------------- */
300 rtsBool
301 traverseBlackholeQueue (void)
302 {
303 StgTSO *prev, *t, *tmp;
304 rtsBool flag;
305 nat type;
306
307 flag = rtsFalse;
308 prev = NULL;
309
310 for (t = blackhole_queue; t != END_TSO_QUEUE; prev=t, t = t->_link) {
311 // if the thread is not yet alive...
312 if (! (tmp = (StgTSO *)isAlive((StgClosure*)t))) {
313 // if the closure it is blocked on is either (a) a
314 // reachable BLAKCHOLE or (b) not a BLACKHOLE, then we
315 // make the thread alive.
316 if (!isAlive(t->block_info.closure)) {
317 type = get_itbl(t->block_info.closure)->type;
318 if (type == BLACKHOLE || type == CAF_BLACKHOLE) {
319 continue;
320 }
321 }
322 evacuate((StgClosure **)&t);
323 if (prev) prev->_link = t;
324 // no write barrier when on the blackhole queue,
325 // because we traverse the whole queue on every GC.
326 flag = rtsTrue;
327 }
328 }
329 return flag;
330 }
331
332 /* -----------------------------------------------------------------------------
333 After GC, the live weak pointer list may have forwarding pointers
334 on it, because a weak pointer object was evacuated after being
335 moved to the live weak pointer list. We remove those forwarding
336 pointers here.
337
338 Also, we don't consider weak pointer objects to be reachable, but
339 we must nevertheless consider them to be "live" and retain them.
340 Therefore any weak pointer objects which haven't as yet been
341 evacuated need to be evacuated now.
342 -------------------------------------------------------------------------- */
343
344 void
345 markWeakPtrList ( void )
346 {
347 StgWeak *w, **last_w, *tmp;
348
349 last_w = &weak_ptr_list;
350 for (w = weak_ptr_list; w; w = w->link) {
351 // w might be WEAK, EVACUATED, or DEAD_WEAK (actually CON_STATIC) here
352 ASSERT(w->header.info == &stg_DEAD_WEAK_info
353 || get_itbl(w)->type == WEAK || get_itbl(w)->type == EVACUATED);
354 tmp = w;
355 evacuate((StgClosure **)&tmp);
356 *last_w = w;
357 last_w = &(w->link);
358 }
359 }
360