Allow multiple C finalizers to be attached to a Weak#
[ghc.git] / rts / sm / MarkWeak.c
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team 1998-2008
4 *
5 * Weak pointers and weak-like things in the GC
6 *
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
9 *
10 * http://hackage.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/GC
11 *
12 * ---------------------------------------------------------------------------*/
13
14 #include "PosixSource.h"
15 #include "Rts.h"
16
17 #include "MarkWeak.h"
18 #include "GC.h"
19 #include "GCThread.h"
20 #include "GCTDecl.h"
21 #include "Evac.h"
22 #include "Trace.h"
23 #include "Schedule.h"
24 #include "Weak.h"
25 #include "Storage.h"
26 #include "Threads.h"
27
28 /* -----------------------------------------------------------------------------
29 Weak Pointers
30
31 traverse_weak_ptr_list is called possibly many times during garbage
32 collection. It returns a flag indicating whether it did any work
33 (i.e. called evacuate on any live pointers).
34
35 Invariant: traverse_weak_ptr_list is called when the heap is in an
36 idempotent state. That means that there are no pending
37 evacuate/scavenge operations. This invariant helps the weak
38 pointer code decide which weak pointers are dead - if there are no
39 new live weak pointers, then all the currently unreachable ones are
40 dead.
41
42 For generational GC: we just don't try to finalize weak pointers in
43 older generations than the one we're collecting. This could
44 probably be optimised by keeping per-generation lists of weak
45 pointers, but for a few weak pointers this scheme will work.
46
47 There are three distinct stages to processing weak pointers:
48
49 - weak_stage == WeakPtrs
50
51 We process all the weak pointers whos keys are alive (evacuate
52 their values and finalizers), and repeat until we can find no new
53 live keys. If no live keys are found in this pass, then we
54 evacuate the finalizers of all the dead weak pointers in order to
55 run them.
56
57 - weak_stage == WeakThreads
58
59 Now, we discover which *threads* are still alive. Pointers to
60 threads from the all_threads and main thread lists are the
61 weakest of all: a pointers from the finalizer of a dead weak
62 pointer can keep a thread alive. Any threads found to be unreachable
63 are evacuated and placed on the resurrected_threads list so we
64 can send them a signal later.
65
66 - weak_stage == WeakDone
67
68 No more evacuation is done.
69
70 -------------------------------------------------------------------------- */
71
72 /* Which stage of processing various kinds of weak pointer are we at?
73 * (see traverse_weak_ptr_list() below for discussion).
74 */
75 typedef enum { WeakPtrs, WeakThreads, WeakDone } WeakStage;
76 static WeakStage weak_stage;
77
78 /* Weak pointers
79 */
80 StgWeak *old_weak_ptr_list; // also pending finaliser list
81 StgWeak *weak_ptr_list_tail;
82
83 // List of threads found to be unreachable
84 StgTSO *resurrected_threads;
85
86 static void resurrectUnreachableThreads (generation *gen);
87 static rtsBool tidyThreadList (generation *gen);
88
89 void
90 initWeakForGC(void)
91 {
92 old_weak_ptr_list = weak_ptr_list;
93 weak_ptr_list = NULL;
94 weak_ptr_list_tail = NULL;
95 weak_stage = WeakPtrs;
96 resurrected_threads = END_TSO_QUEUE;
97 }
98
99 rtsBool
100 traverseWeakPtrList(void)
101 {
102 StgWeak *w, **last_w, *next_w;
103 StgClosure *new;
104 rtsBool flag = rtsFalse;
105 const StgInfoTable *info;
106
107 switch (weak_stage) {
108
109 case WeakDone:
110 return rtsFalse;
111
112 case WeakPtrs:
113 /* doesn't matter where we evacuate values/finalizers to, since
114 * these pointers are treated as roots (iff the keys are alive).
115 */
116 gct->evac_gen_no = 0;
117
118 last_w = &old_weak_ptr_list;
119 for (w = old_weak_ptr_list; w != NULL; w = next_w) {
120
121 /* There might be a DEAD_WEAK on the list if finalizeWeak# was
122 * called on a live weak pointer object. Just remove it.
123 */
124 if (w->header.info == &stg_DEAD_WEAK_info) {
125 next_w = w->link;
126 *last_w = next_w;
127 continue;
128 }
129
130 info = get_itbl((StgClosure *)w);
131 switch (info->type) {
132
133 case WEAK:
134 /* Now, check whether the key is reachable.
135 */
136 new = isAlive(w->key);
137 if (new != NULL) {
138 w->key = new;
139 // evacuate the value and finalizer
140 evacuate(&w->value);
141 evacuate(&w->finalizer);
142 // remove this weak ptr from the old_weak_ptr list
143 *last_w = w->link;
144 next_w = w->link;
145
146 // and put it on the new weak ptr list.
147 if (weak_ptr_list == NULL) {
148 weak_ptr_list = w;
149 } else {
150 weak_ptr_list_tail->link = w;
151 }
152 weak_ptr_list_tail = w;
153 w->link = NULL;
154 flag = rtsTrue;
155
156 debugTrace(DEBUG_weak,
157 "weak pointer still alive at %p -> %p",
158 w, w->key);
159 continue;
160 }
161 else {
162 last_w = &(w->link);
163 next_w = w->link;
164 continue;
165 }
166
167 default:
168 barf("traverseWeakPtrList: not WEAK");
169 }
170 }
171
172 /* If we didn't make any changes, then we can go round and kill all
173 * the dead weak pointers. The old_weak_ptr list is used as a list
174 * of pending finalizers later on.
175 */
176 if (flag == rtsFalse) {
177 for (w = old_weak_ptr_list; w; w = w->link) {
178 evacuate(&w->finalizer);
179 }
180
181 // Next, move to the WeakThreads stage after fully
182 // scavenging the finalizers we've just evacuated.
183 weak_stage = WeakThreads;
184 }
185
186 return rtsTrue;
187
188 case WeakThreads:
189 /* Now deal with the step->threads lists, which behave somewhat like
190 * the weak ptr list. If we discover any threads that are about to
191 * become garbage, we wake them up and administer an exception.
192 */
193 {
194 nat g;
195
196 // Traverse thread lists for generations we collected...
197 // ToDo when we have one gen per capability:
198 // for (n = 0; n < n_capabilities; n++) {
199 // if (tidyThreadList(&nurseries[n])) {
200 // flag = rtsTrue;
201 // }
202 // }
203 for (g = 0; g <= N; g++) {
204 if (tidyThreadList(&generations[g])) {
205 flag = rtsTrue;
206 }
207 }
208
209 /* If we evacuated any threads, we need to go back to the scavenger.
210 */
211 if (flag) return rtsTrue;
212
213 /* And resurrect any threads which were about to become garbage.
214 */
215 {
216 nat g;
217 for (g = 0; g <= N; g++) {
218 resurrectUnreachableThreads(&generations[g]);
219 }
220 }
221
222 weak_stage = WeakDone; // *now* we're done,
223 return rtsTrue; // but one more round of scavenging, please
224 }
225
226 default:
227 barf("traverse_weak_ptr_list");
228 return rtsTrue;
229 }
230 }
231
232 static void resurrectUnreachableThreads (generation *gen)
233 {
234 StgTSO *t, *tmp, *next;
235
236 for (t = gen->old_threads; t != END_TSO_QUEUE; t = next) {
237 next = t->global_link;
238
239 // ThreadFinished and ThreadComplete: we have to keep
240 // these on the all_threads list until they
241 // become garbage, because they might get
242 // pending exceptions.
243 switch (t->what_next) {
244 case ThreadKilled:
245 case ThreadComplete:
246 continue;
247 default:
248 tmp = t;
249 evacuate((StgClosure **)&tmp);
250 tmp->global_link = resurrected_threads;
251 resurrected_threads = tmp;
252 }
253 }
254 }
255
256 static rtsBool tidyThreadList (generation *gen)
257 {
258 StgTSO *t, *tmp, *next, **prev;
259 rtsBool flag = rtsFalse;
260
261 prev = &gen->old_threads;
262
263 for (t = gen->old_threads; t != END_TSO_QUEUE; t = next) {
264
265 tmp = (StgTSO *)isAlive((StgClosure *)t);
266
267 if (tmp != NULL) {
268 t = tmp;
269 }
270
271 ASSERT(get_itbl((StgClosure *)t)->type == TSO);
272 next = t->global_link;
273
274 // if the thread is not masking exceptions but there are
275 // pending exceptions on its queue, then something has gone
276 // wrong. However, pending exceptions are OK if there is an
277 // FFI call.
278 ASSERT(t->blocked_exceptions == END_BLOCKED_EXCEPTIONS_QUEUE
279 || t->why_blocked == BlockedOnCCall
280 || t->why_blocked == BlockedOnCCall_Interruptible
281 || (t->flags & TSO_BLOCKEX));
282
283 if (tmp == NULL) {
284 // not alive (yet): leave this thread on the
285 // old_all_threads list.
286 prev = &(t->global_link);
287 }
288 else {
289 // alive
290 *prev = next;
291
292 // move this thread onto the correct threads list.
293 generation *new_gen;
294 new_gen = Bdescr((P_)t)->gen;
295 t->global_link = new_gen->threads;
296 new_gen->threads = t;
297 }
298 }
299
300 return flag;
301 }
302
303 /* -----------------------------------------------------------------------------
304 Evacuate every weak pointer object on the weak_ptr_list, and update
305 the link fields.
306
307 ToDo: with a lot of weak pointers, this will be expensive. We
308 should have a per-GC weak pointer list, just like threads.
309 -------------------------------------------------------------------------- */
310
311 void
312 markWeakPtrList ( void )
313 {
314 StgWeak *w, **last_w;
315
316 last_w = &weak_ptr_list;
317 for (w = weak_ptr_list; w; w = w->link) {
318 // w might be WEAK, EVACUATED, or DEAD_WEAK (actually CON_STATIC) here
319
320 #ifdef DEBUG
321 { // careful to do this assertion only reading the info ptr
322 // once, because during parallel GC it might change under our feet.
323 const StgInfoTable *info;
324 info = w->header.info;
325 ASSERT(IS_FORWARDING_PTR(info)
326 || info == &stg_DEAD_WEAK_info
327 || INFO_PTR_TO_STRUCT(info)->type == WEAK);
328 }
329 #endif
330
331 evacuate((StgClosure **)last_w);
332 w = *last_w;
333 if (w->header.info == &stg_DEAD_WEAK_info) {
334 last_w = &(w->link);
335 } else {
336 last_w = &(w->link);
337 }
338 }
339 }
340