testsuite: Assert that testsuite ways are known
[ghc.git] / rts / Weak.c
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 1998-1999
4 *
5 * Weak pointers / finalizers
6 *
7 * ---------------------------------------------------------------------------*/
8
9 #include "PosixSource.h"
10 #include "Rts.h"
11 #include "RtsAPI.h"
12
13 #include "RtsUtils.h"
14 #include "Weak.h"
15 #include "Schedule.h"
16 #include "Prelude.h"
17 #include "ThreadLabels.h"
18 #include "Trace.h"
19
20 // List of dead weak pointers collected by the last GC
21 static StgWeak *finalizer_list = NULL;
22
23 // Count of the above list.
24 static uint32_t n_finalizers = 0;
25
26 void
27 runCFinalizers(StgCFinalizerList *list)
28 {
29 StgCFinalizerList *head;
30 for (head = list;
31 (StgClosure *)head != &stg_NO_FINALIZER_closure;
32 head = (StgCFinalizerList *)head->link)
33 {
34 if (head->flag)
35 ((void (*)(void *, void *))head->fptr)(head->eptr, head->ptr);
36 else
37 ((void (*)(void *))head->fptr)(head->ptr);
38 }
39 }
40
41 void
42 runAllCFinalizers(StgWeak *list)
43 {
44 StgWeak *w;
45 Task *task;
46
47 task = myTask();
48 if (task != NULL) {
49 task->running_finalizers = true;
50 }
51
52 for (w = list; w; w = w->link) {
53 // We need to filter out DEAD_WEAK objects, because it's not guaranteed
54 // that the list will not have them when shutting down.
55 // They only get filtered out during GC for the generation they
56 // belong to.
57 // If there's no major GC between the time that the finalizer for the
58 // object from the oldest generation is manually called and shutdown
59 // we end up running the same finalizer twice. See #7170.
60 const StgInfoTable *winfo = w->header.info;
61 load_load_barrier();
62 if (winfo != &stg_DEAD_WEAK_info) {
63 runCFinalizers((StgCFinalizerList *)w->cfinalizers);
64 }
65 }
66
67 if (task != NULL) {
68 task->running_finalizers = false;
69 }
70 }
71
72 /*
73 * scheduleFinalizers() is called on the list of weak pointers found
74 * to be dead after a garbage collection. It overwrites each object
75 * with DEAD_WEAK, and creates a new thread to run the pending finalizers.
76 *
77 * This function is called just after GC. The weak pointers on the
78 * argument list are those whose keys were found to be not reachable,
79 * however the value and finalizer fields have by now been marked live.
80 * The weak pointer object itself may not be alive - i.e. we may be
81 * looking at either an object in from-space or one in to-space. It
82 * doesn't really matter either way.
83 *
84 * Pre-condition: sched_mutex _not_ held.
85 */
86
87 void
88 scheduleFinalizers(Capability *cap, StgWeak *list)
89 {
90 StgWeak *w;
91 StgTSO *t;
92 StgMutArrPtrs *arr;
93 StgWord size;
94 uint32_t n, i;
95
96 ASSERT(n_finalizers == 0);
97
98 finalizer_list = list;
99
100 // Traverse the list and
101 // * count the number of Haskell finalizers
102 // * overwrite all the weak pointers with DEAD_WEAK
103 n = 0;
104 i = 0;
105 for (w = list; w; w = w->link) {
106 // Better not be a DEAD_WEAK at this stage; the garbage
107 // collector removes DEAD_WEAKs from the weak pointer list.
108 ASSERT(w->header.info != &stg_DEAD_WEAK_info);
109
110 if (w->finalizer != &stg_NO_FINALIZER_closure) {
111 n++;
112 }
113
114 // Remember the length of the list, for runSomeFinalizers() below
115 i++;
116
117 #if defined(PROFILING)
118 // A weak pointer is inherently used, so we do not need to call
119 // LDV_recordDead().
120 //
121 // Furthermore, when PROFILING is turned on, dead weak
122 // pointers are exactly as large as weak pointers, so there is
123 // no need to fill the slop, either. See stg_DEAD_WEAK_info
124 // in StgMiscClosures.cmm.
125 #endif
126
127 // We must overwrite the header with DEAD_WEAK, so that if
128 // there's a later call to finalizeWeak# on this weak pointer,
129 // we don't run the finalizer again.
130 SET_HDR(w, &stg_DEAD_WEAK_info, w->header.prof.ccs);
131 }
132
133 n_finalizers = i;
134
135 // No Haskell finalizers to run?
136 if (n == 0) return;
137
138 debugTrace(DEBUG_weak, "weak: batching %d finalizers", n);
139
140 size = n + mutArrPtrsCardTableSize(n);
141 arr = (StgMutArrPtrs *)allocate(cap, sizeofW(StgMutArrPtrs) + size);
142 TICK_ALLOC_PRIM(sizeofW(StgMutArrPtrs), n, 0);
143 // No write barrier needed here; this array is only going to referred to by this core.
144 SET_HDR(arr, &stg_MUT_ARR_PTRS_FROZEN_CLEAN_info, CCS_SYSTEM);
145 arr->ptrs = n;
146 arr->size = size;
147
148 n = 0;
149 for (w = list; w; w = w->link) {
150 if (w->finalizer != &stg_NO_FINALIZER_closure) {
151 arr->payload[n] = w->finalizer;
152 n++;
153 }
154 }
155 // set all the cards to 1
156 for (i = n; i < size; i++) {
157 arr->payload[i] = (StgClosure *)(W_)(-1);
158 }
159
160 t = createIOThread(cap,
161 RtsFlags.GcFlags.initialStkSize,
162 rts_apply(cap,
163 rts_apply(cap,
164 (StgClosure *)runFinalizerBatch_closure,
165 rts_mkInt(cap,n)),
166 (StgClosure *)arr)
167 );
168
169 scheduleThread(cap,t);
170 labelThread(cap, t, "weak finalizer thread");
171 }
172
173 /* -----------------------------------------------------------------------------
174 Incrementally running C finalizers
175
176 The GC detects all the dead finalizers, but we don't want to run
177 them during the GC because that increases the time that the runtime
178 is paused.
179
180 What options are there?
181
182 1. Parallelise running the C finalizers across the GC threads
183 - doesn't solve the pause problem, just reduces it (maybe by a lot)
184
185 2. Make a Haskell thread to run the C finalizers, like we do for
186 Haskell finalizers.
187 + scheduling is handled for us
188 - no guarantee that we'll process finalizers in a timely manner
189
190 3. Run finalizers when any capability is idle.
191 + reduces pause to 0
192 - requires scheduler modifications
193 - if the runtime is busy, finalizers wait until the next GC
194
195 4. like (3), but also run finalizers incrementally between GCs.
196 - reduces the delay to run finalizers compared with (3)
197
198 For now we do (3). It would be easy to do (4) later by adding a
199 call to doIdleGCWork() in the scheduler loop, but I haven't found
200 that necessary so far.
201
202 -------------------------------------------------------------------------- */
203
204 // Run this many finalizers before returning from
205 // runSomeFinalizers(). This is so that we only tie up the capability
206 // for a short time, and respond quickly if new work becomes
207 // available.
208 static const int32_t finalizer_chunk = 100;
209
210 // non-zero if a thread is already in runSomeFinalizers(). This
211 // protects the globals finalizer_list and n_finalizers.
212 static volatile StgWord finalizer_lock = 0;
213
214 //
215 // Run some C finalizers. Returns true if there's more work to do.
216 //
217 bool runSomeFinalizers(bool all)
218 {
219 if (n_finalizers == 0)
220 return false;
221
222 if (cas(&finalizer_lock, 0, 1) != 0) {
223 // another capability is doing the work, it's safe to say
224 // there's nothing to do, because the thread already in
225 // runSomeFinalizers() will call in again.
226 return false;
227 }
228
229 debugTrace(DEBUG_sched, "running C finalizers, %d remaining", n_finalizers);
230
231 Task *task = myTask();
232 if (task != NULL) {
233 task->running_finalizers = true;
234 }
235
236 StgWeak *w = finalizer_list;
237 int32_t count = 0;
238 while (w != NULL) {
239 runCFinalizers((StgCFinalizerList *)w->cfinalizers);
240 w = w->link;
241 ++count;
242 if (!all && count >= finalizer_chunk) break;
243 }
244
245 finalizer_list = w;
246 n_finalizers -= count;
247
248 if (task != NULL) {
249 task->running_finalizers = false;
250 }
251
252 debugTrace(DEBUG_sched, "ran %d C finalizers", count);
253
254 write_barrier();
255 finalizer_lock = 0;
256
257 return n_finalizers != 0;
258 }