Enable two-step allocator on FreeBSD
[ghc.git] / rts / Weak.c
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 1998-1999
4 *
5 * Weak pointers / finalizers
6 *
7 * ---------------------------------------------------------------------------*/
8
9 #include "PosixSource.h"
10 #include "Rts.h"
11 #include "RtsAPI.h"
12
13 #include "RtsUtils.h"
14 #include "Weak.h"
15 #include "Schedule.h"
16 #include "Prelude.h"
17 #include "ThreadLabels.h"
18 #include "Trace.h"
19
20 // List of dead weak pointers collected by the last GC
21 static StgWeak *finalizer_list = NULL;
22
23 // Count of the above list.
24 static uint32_t n_finalizers = 0;
25
26 void
27 runCFinalizers(StgCFinalizerList *list)
28 {
29 StgCFinalizerList *head;
30 for (head = list;
31 (StgClosure *)head != &stg_NO_FINALIZER_closure;
32 head = (StgCFinalizerList *)head->link)
33 {
34 if (head->flag)
35 ((void (*)(void *, void *))head->fptr)(head->eptr, head->ptr);
36 else
37 ((void (*)(void *))head->fptr)(head->ptr);
38 }
39 }
40
41 void
42 runAllCFinalizers(StgWeak *list)
43 {
44 StgWeak *w;
45 Task *task;
46
47 task = myTask();
48 if (task != NULL) {
49 task->running_finalizers = true;
50 }
51
52 for (w = list; w; w = w->link) {
53 // We need to filter out DEAD_WEAK objects, because it's not guaranteed
54 // that the list will not have them when shutting down.
55 // They only get filtered out during GC for the generation they
56 // belong to.
57 // If there's no major GC between the time that the finalizer for the
58 // object from the oldest generation is manually called and shutdown
59 // we end up running the same finalizer twice. See #7170.
60 if (w->header.info != &stg_DEAD_WEAK_info) {
61 runCFinalizers((StgCFinalizerList *)w->cfinalizers);
62 }
63 }
64
65 if (task != NULL) {
66 task->running_finalizers = false;
67 }
68 }
69
70 /*
71 * scheduleFinalizers() is called on the list of weak pointers found
72 * to be dead after a garbage collection. It overwrites each object
73 * with DEAD_WEAK, and creates a new thread to run the pending finalizers.
74 *
75 * This function is called just after GC. The weak pointers on the
76 * argument list are those whose keys were found to be not reachable,
77 * however the value and finalizer fields have by now been marked live.
78 * The weak pointer object itself may not be alive - i.e. we may be
79 * looking at either an object in from-space or one in to-space. It
80 * doesn't really matter either way.
81 *
82 * Pre-condition: sched_mutex _not_ held.
83 */
84
85 void
86 scheduleFinalizers(Capability *cap, StgWeak *list)
87 {
88 StgWeak *w;
89 StgTSO *t;
90 StgMutArrPtrs *arr;
91 StgWord size;
92 uint32_t n, i;
93
94 ASSERT(n_finalizers == 0);
95
96 finalizer_list = list;
97
98 // Traverse the list and
99 // * count the number of Haskell finalizers
100 // * overwrite all the weak pointers with DEAD_WEAK
101 n = 0;
102 i = 0;
103 for (w = list; w; w = w->link) {
104 // Better not be a DEAD_WEAK at this stage; the garbage
105 // collector removes DEAD_WEAKs from the weak pointer list.
106 ASSERT(w->header.info != &stg_DEAD_WEAK_info);
107
108 if (w->finalizer != &stg_NO_FINALIZER_closure) {
109 n++;
110 }
111
112 // Remember the length of the list, for runSomeFinalizers() below
113 i++;
114
115 #if defined(PROFILING)
116 // A weak pointer is inherently used, so we do not need to call
117 // LDV_recordDead().
118 //
119 // Furthermore, when PROFILING is turned on, dead weak
120 // pointers are exactly as large as weak pointers, so there is
121 // no need to fill the slop, either. See stg_DEAD_WEAK_info
122 // in StgMiscClosures.cmm.
123 #endif
124
125 // We must overwrite the header with DEAD_WEAK, so that if
126 // there's a later call to finalizeWeak# on this weak pointer,
127 // we don't run the finalizer again.
128 SET_HDR(w, &stg_DEAD_WEAK_info, w->header.prof.ccs);
129 }
130
131 n_finalizers = i;
132
133 // No Haskell finalizers to run?
134 if (n == 0) return;
135
136 debugTrace(DEBUG_weak, "weak: batching %d finalizers", n);
137
138 size = n + mutArrPtrsCardTableSize(n);
139 arr = (StgMutArrPtrs *)allocate(cap, sizeofW(StgMutArrPtrs) + size);
140 TICK_ALLOC_PRIM(sizeofW(StgMutArrPtrs), n, 0);
141 SET_HDR(arr, &stg_MUT_ARR_PTRS_FROZEN_CLEAN_info, CCS_SYSTEM);
142 arr->ptrs = n;
143 arr->size = size;
144
145 n = 0;
146 for (w = list; w; w = w->link) {
147 if (w->finalizer != &stg_NO_FINALIZER_closure) {
148 arr->payload[n] = w->finalizer;
149 n++;
150 }
151 }
152 // set all the cards to 1
153 for (i = n; i < size; i++) {
154 arr->payload[i] = (StgClosure *)(W_)(-1);
155 }
156
157 t = createIOThread(cap,
158 RtsFlags.GcFlags.initialStkSize,
159 rts_apply(cap,
160 rts_apply(cap,
161 (StgClosure *)runFinalizerBatch_closure,
162 rts_mkInt(cap,n)),
163 (StgClosure *)arr)
164 );
165
166 scheduleThread(cap,t);
167 labelThread(cap, t, "weak finalizer thread");
168 }
169
170 /* -----------------------------------------------------------------------------
171 Incrementally running C finalizers
172
173 The GC detects all the dead finalizers, but we don't want to run
174 them during the GC because that increases the time that the runtime
175 is paused.
176
177 What options are there?
178
179 1. Parallelise running the C finalizers across the GC threads
180 - doesn't solve the pause problem, just reduces it (maybe by a lot)
181
182 2. Make a Haskell thread to run the C finalizers, like we do for
183 Haskell finalizers.
184 + scheduling is handled for us
185 - no guarantee that we'll process finalizers in a timely manner
186
187 3. Run finalizers when any capability is idle.
188 + reduces pause to 0
189 - requires scheduler modifications
190 - if the runtime is busy, finalizers wait until the next GC
191
192 4. like (3), but also run finalizers incrementally between GCs.
193 - reduces the delay to run finalizers compared with (3)
194
195 For now we do (3). It would be easy to do (4) later by adding a
196 call to doIdleGCWork() in the scheduler loop, but I haven't found
197 that necessary so far.
198
199 -------------------------------------------------------------------------- */
200
201 // Run this many finalizers before returning from
202 // runSomeFinalizers(). This is so that we only tie up the capability
203 // for a short time, and respond quickly if new work becomes
204 // available.
205 static const int32_t finalizer_chunk = 100;
206
207 // non-zero if a thread is already in runSomeFinalizers(). This
208 // protects the globals finalizer_list and n_finalizers.
209 static volatile StgWord finalizer_lock = 0;
210
211 //
212 // Run some C finalizers. Returns true if there's more work to do.
213 //
214 bool runSomeFinalizers(bool all)
215 {
216 if (n_finalizers == 0)
217 return false;
218
219 if (cas(&finalizer_lock, 0, 1) != 0) {
220 // another capability is doing the work, it's safe to say
221 // there's nothing to do, because the thread already in
222 // runSomeFinalizers() will call in again.
223 return false;
224 }
225
226 debugTrace(DEBUG_sched, "running C finalizers, %d remaining", n_finalizers);
227
228 Task *task = myTask();
229 if (task != NULL) {
230 task->running_finalizers = true;
231 }
232
233 StgWeak *w = finalizer_list;
234 int32_t count = 0;
235 while (w != NULL) {
236 runCFinalizers((StgCFinalizerList *)w->cfinalizers);
237 w = w->link;
238 ++count;
239 if (!all && count >= finalizer_chunk) break;
240 }
241
242 finalizer_list = w;
243 n_finalizers -= count;
244
245 if (task != NULL) {
246 task->running_finalizers = false;
247 }
248
249 debugTrace(DEBUG_sched, "ran %d C finalizers", count);
250
251 write_barrier();
252 finalizer_lock = 0;
253
254 return n_finalizers != 0;
255 }