d57f7a094b2cb0ca6366cb804242945acc3eace4
[ghc.git] / rts / sm / MarkWeak.c
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team 1998-2008
4 *
5 * Weak pointers and weak-like things in the GC
6 *
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
9 *
10 * http://hackage.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/GC
11 *
12 * ---------------------------------------------------------------------------*/
13
14 #include "PosixSource.h"
15 #include "Rts.h"
16
17 #include "MarkWeak.h"
18 #include "GC.h"
19 #include "GCThread.h"
20 #include "GCTDecl.h"
21 #include "Evac.h"
22 #include "Trace.h"
23 #include "Schedule.h"
24 #include "Weak.h"
25 #include "Storage.h"
26 #include "Threads.h"
27
28 /* -----------------------------------------------------------------------------
29 Weak Pointers
30
31 traverse_weak_ptr_list is called possibly many times during garbage
32 collection. It returns a flag indicating whether it did any work
33 (i.e. called evacuate on any live pointers).
34
35 Invariant: traverse_weak_ptr_list is called when the heap is in an
36 idempotent state. That means that there are no pending
37 evacuate/scavenge operations. This invariant helps the weak
38 pointer code decide which weak pointers are dead - if there are no
39 new live weak pointers, then all the currently unreachable ones are
40 dead.
41
42 For generational GC: we just don't try to finalize weak pointers in
43 older generations than the one we're collecting. This could
44 probably be optimised by keeping per-generation lists of weak
45 pointers, but for a few weak pointers this scheme will work.
46
47 There are three distinct stages to processing weak pointers:
48
49 - weak_stage == WeakPtrs
50
51 We process all the weak pointers whos keys are alive (evacuate
52 their values and finalizers), and repeat until we can find no new
53 live keys. If no live keys are found in this pass, then we
54 evacuate the finalizers of all the dead weak pointers in order to
55 run them.
56
57 - weak_stage == WeakThreads
58
59 Now, we discover which *threads* are still alive. Pointers to
60 threads from the all_threads and main thread lists are the
61 weakest of all: a pointers from the finalizer of a dead weak
62 pointer can keep a thread alive. Any threads found to be unreachable
63 are evacuated and placed on the resurrected_threads list so we
64 can send them a signal later.
65
66 - weak_stage == WeakDone
67
68 No more evacuation is done.
69
70 -------------------------------------------------------------------------- */
71
72 /* Which stage of processing various kinds of weak pointer are we at?
73 * (see traverse_weak_ptr_list() below for discussion).
74 */
75 typedef enum { WeakPtrs, WeakThreads, WeakDone } WeakStage;
76 static WeakStage weak_stage;
77
78 /* Weak pointers
79 */
80 StgWeak *old_weak_ptr_list; // also pending finaliser list
81 StgWeak *weak_ptr_list_tail;
82
83 // List of threads found to be unreachable
84 StgTSO *resurrected_threads;
85
86 static void resurrectUnreachableThreads (generation *gen);
87 static rtsBool tidyThreadList (generation *gen);
88
89 void
90 initWeakForGC(void)
91 {
92 old_weak_ptr_list = weak_ptr_list;
93 weak_ptr_list = NULL;
94 weak_ptr_list_tail = NULL;
95 weak_stage = WeakPtrs;
96 resurrected_threads = END_TSO_QUEUE;
97 }
98
99 rtsBool
100 traverseWeakPtrList(void)
101 {
102 StgWeak *w, **last_w, *next_w;
103 StgClosure *new;
104 rtsBool flag = rtsFalse;
105 const StgInfoTable *info;
106
107 switch (weak_stage) {
108
109 case WeakDone:
110 return rtsFalse;
111
112 case WeakPtrs:
113 /* doesn't matter where we evacuate values/finalizers to, since
114 * these pointers are treated as roots (iff the keys are alive).
115 */
116 gct->evac_gen_no = 0;
117
118 last_w = &old_weak_ptr_list;
119 for (w = old_weak_ptr_list; w != NULL; w = next_w) {
120
121 /* There might be a DEAD_WEAK on the list if finalizeWeak# was
122 * called on a live weak pointer object. Just remove it.
123 */
124 if (w->header.info == &stg_DEAD_WEAK_info) {
125 next_w = ((StgDeadWeak *)w)->link;
126 *last_w = next_w;
127 continue;
128 }
129
130 info = get_itbl((StgClosure *)w);
131 switch (info->type) {
132
133 case WEAK:
134 /* Now, check whether the key is reachable.
135 */
136 new = isAlive(w->key);
137 if (new != NULL) {
138 w->key = new;
139 // evacuate the value and finalizer
140 evacuate(&w->value);
141 evacuate(&w->finalizer);
142 // remove this weak ptr from the old_weak_ptr list
143 *last_w = w->link;
144 next_w = w->link;
145
146 // and put it on the new weak ptr list.
147 // NB. we must retain the order of the weak_ptr_list (#7160)
148 if (weak_ptr_list == NULL) {
149 weak_ptr_list = w;
150 } else {
151 weak_ptr_list_tail->link = w;
152 }
153 weak_ptr_list_tail = w;
154 w->link = NULL;
155 flag = rtsTrue;
156
157 debugTrace(DEBUG_weak,
158 "weak pointer still alive at %p -> %p",
159 w, w->key);
160 continue;
161 }
162 else {
163 last_w = &(w->link);
164 next_w = w->link;
165 continue;
166 }
167
168 default:
169 barf("traverseWeakPtrList: not WEAK");
170 }
171 }
172
173 /* If we didn't make any changes, then we can go round and kill all
174 * the dead weak pointers. The old_weak_ptr list is used as a list
175 * of pending finalizers later on.
176 */
177 if (flag == rtsFalse) {
178 for (w = old_weak_ptr_list; w; w = w->link) {
179 evacuate(&w->finalizer);
180 }
181
182 // Next, move to the WeakThreads stage after fully
183 // scavenging the finalizers we've just evacuated.
184 weak_stage = WeakThreads;
185 }
186
187 return rtsTrue;
188
189 case WeakThreads:
190 /* Now deal with the step->threads lists, which behave somewhat like
191 * the weak ptr list. If we discover any threads that are about to
192 * become garbage, we wake them up and administer an exception.
193 */
194 {
195 nat g;
196
197 // Traverse thread lists for generations we collected...
198 // ToDo when we have one gen per capability:
199 // for (n = 0; n < n_capabilities; n++) {
200 // if (tidyThreadList(&nurseries[n])) {
201 // flag = rtsTrue;
202 // }
203 // }
204 for (g = 0; g <= N; g++) {
205 if (tidyThreadList(&generations[g])) {
206 flag = rtsTrue;
207 }
208 }
209
210 /* If we evacuated any threads, we need to go back to the scavenger.
211 */
212 if (flag) return rtsTrue;
213
214 /* And resurrect any threads which were about to become garbage.
215 */
216 {
217 nat g;
218 for (g = 0; g <= N; g++) {
219 resurrectUnreachableThreads(&generations[g]);
220 }
221 }
222
223 weak_stage = WeakDone; // *now* we're done,
224 return rtsTrue; // but one more round of scavenging, please
225 }
226
227 default:
228 barf("traverse_weak_ptr_list");
229 return rtsTrue;
230 }
231 }
232
233 static void resurrectUnreachableThreads (generation *gen)
234 {
235 StgTSO *t, *tmp, *next;
236
237 for (t = gen->old_threads; t != END_TSO_QUEUE; t = next) {
238 next = t->global_link;
239
240 // ThreadFinished and ThreadComplete: we have to keep
241 // these on the all_threads list until they
242 // become garbage, because they might get
243 // pending exceptions.
244 switch (t->what_next) {
245 case ThreadKilled:
246 case ThreadComplete:
247 continue;
248 default:
249 tmp = t;
250 evacuate((StgClosure **)&tmp);
251 tmp->global_link = resurrected_threads;
252 resurrected_threads = tmp;
253 }
254 }
255 }
256
257 static rtsBool tidyThreadList (generation *gen)
258 {
259 StgTSO *t, *tmp, *next, **prev;
260 rtsBool flag = rtsFalse;
261
262 prev = &gen->old_threads;
263
264 for (t = gen->old_threads; t != END_TSO_QUEUE; t = next) {
265
266 tmp = (StgTSO *)isAlive((StgClosure *)t);
267
268 if (tmp != NULL) {
269 t = tmp;
270 }
271
272 ASSERT(get_itbl((StgClosure *)t)->type == TSO);
273 next = t->global_link;
274
275 // if the thread is not masking exceptions but there are
276 // pending exceptions on its queue, then something has gone
277 // wrong. However, pending exceptions are OK if there is an
278 // FFI call.
279 ASSERT(t->blocked_exceptions == END_BLOCKED_EXCEPTIONS_QUEUE
280 || t->why_blocked == BlockedOnCCall
281 || t->why_blocked == BlockedOnCCall_Interruptible
282 || (t->flags & TSO_BLOCKEX));
283
284 if (tmp == NULL) {
285 // not alive (yet): leave this thread on the
286 // old_all_threads list.
287 prev = &(t->global_link);
288 }
289 else {
290 // alive
291 *prev = next;
292
293 // move this thread onto the correct threads list.
294 generation *new_gen;
295 new_gen = Bdescr((P_)t)->gen;
296 t->global_link = new_gen->threads;
297 new_gen->threads = t;
298 }
299 }
300
301 return flag;
302 }
303
304 /* -----------------------------------------------------------------------------
305 Evacuate every weak pointer object on the weak_ptr_list, and update
306 the link fields.
307
308 ToDo: with a lot of weak pointers, this will be expensive. We
309 should have a per-GC weak pointer list, just like threads.
310 -------------------------------------------------------------------------- */
311
312 void
313 markWeakPtrList ( void )
314 {
315 StgWeak *w, **last_w;
316
317 last_w = &weak_ptr_list;
318 for (w = weak_ptr_list; w; w = w->link) {
319 // w might be WEAK, EVACUATED, or DEAD_WEAK (actually CON_STATIC) here
320
321 #ifdef DEBUG
322 { // careful to do this assertion only reading the info ptr
323 // once, because during parallel GC it might change under our feet.
324 const StgInfoTable *info;
325 info = w->header.info;
326 ASSERT(IS_FORWARDING_PTR(info)
327 || info == &stg_DEAD_WEAK_info
328 || INFO_PTR_TO_STRUCT(info)->type == WEAK);
329 }
330 #endif
331
332 evacuate((StgClosure **)last_w);
333 w = *last_w;
334 if (w->header.info == &stg_DEAD_WEAK_info) {
335 last_w = &(((StgDeadWeak*)w)->link);
336 } else {
337 last_w = &(w->link);
338 }
339 }
340 }
341