CNF: Silence pointer fix-up message unless gc debugging is enabled
[ghc.git] / rts / CheckUnload.c
1 /* ----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 2013-
4 *
5 * Check whether dynamically-loaded object code can be safely
6 * unloaded, by searching for references to it from the heap and RTS
7 * data structures.
8 *
9 * --------------------------------------------------------------------------*/
10
11 #include "PosixSource.h"
12 #include "Rts.h"
13
14 #include "RtsUtils.h"
15 #include "Hash.h"
16 #include "LinkerInternals.h"
17 #include "CheckUnload.h"
18 #include "sm/Storage.h"
19 #include "sm/GCThread.h"
20
21 //
22 // Code that we unload may be referenced from:
23 // - info pointers in heap objects and stack frames
24 // - pointers to static objects from the heap
25 // - StablePtrs to static objects
26 // - pointers to cost centres from the cost centre tree
27 //
28 // We can find live static objects after a major GC, so we don't have
29 // to look at every closure pointer in the heap. However, we do have
30 // to look at every info pointer. So this is like a heap census
31 // traversal: we look at the header of every object, but not its
32 // contents.
33 //
34 // On the assumption that there aren't many different info pointers in
35 // a typical heap, we insert addresses into a hash table. The
36 // first time we see an address, we check it against the pending
37 // unloadable objects and if it lies within any of them, we mark that
38 // object as referenced so that it won't get unloaded in this round.
39 //
40
41 static void checkAddress (HashTable *addrs, const void *addr)
42 {
43 ObjectCode *oc;
44 int i;
45
46 if (!lookupHashTable(addrs, (W_)addr)) {
47 insertHashTable(addrs, (W_)addr, addr);
48
49 for (oc = unloaded_objects; oc; oc = oc->next) {
50 for (i = 0; i < oc->n_sections; i++) {
51 if (oc->sections[i].kind != SECTIONKIND_OTHER) {
52 if ((W_)addr >= (W_)oc->sections[i].start &&
53 (W_)addr < (W_)oc->sections[i].start
54 + oc->sections[i].size) {
55 oc->referenced = 1;
56 return;
57 }
58 }
59 }
60 }
61 }
62 }
63
64 static void searchStackChunk (HashTable *addrs, StgPtr sp, StgPtr stack_end)
65 {
66 StgPtr p;
67 const StgRetInfoTable *info;
68
69 p = sp;
70 while (p < stack_end) {
71 info = get_ret_itbl((StgClosure *)p);
72
73 switch (info->i.type) {
74 case RET_SMALL:
75 case RET_BIG:
76 checkAddress(addrs, (const void*)info);
77 break;
78
79 default:
80 break;
81 }
82
83 p += stack_frame_sizeW((StgClosure*)p);
84 }
85 }
86
87
88 static void searchHeapBlocks (HashTable *addrs, bdescr *bd)
89 {
90 StgPtr p;
91 const StgInfoTable *info;
92 uint32_t size;
93 bool prim;
94
95 for (; bd != NULL; bd = bd->link) {
96
97 if (bd->flags & BF_PINNED) {
98 // Assume that objects in PINNED blocks cannot refer to
99 continue;
100 }
101
102 p = bd->start;
103 while (p < bd->free) {
104 info = get_itbl((StgClosure *)p);
105 prim = false;
106
107 switch (info->type) {
108
109 case THUNK:
110 size = thunk_sizeW_fromITBL(info);
111 break;
112
113 case THUNK_1_1:
114 case THUNK_0_2:
115 case THUNK_2_0:
116 size = sizeofW(StgThunkHeader) + 2;
117 break;
118
119 case THUNK_1_0:
120 case THUNK_0_1:
121 case THUNK_SELECTOR:
122 size = sizeofW(StgThunkHeader) + 1;
123 break;
124
125 case FUN:
126 case FUN_1_0:
127 case FUN_0_1:
128 case FUN_1_1:
129 case FUN_0_2:
130 case FUN_2_0:
131 case CONSTR:
132 case CONSTR_NOCAF:
133 case CONSTR_1_0:
134 case CONSTR_0_1:
135 case CONSTR_1_1:
136 case CONSTR_0_2:
137 case CONSTR_2_0:
138 size = sizeW_fromITBL(info);
139 break;
140
141 case BLACKHOLE:
142 case BLOCKING_QUEUE:
143 prim = true;
144 size = sizeW_fromITBL(info);
145 break;
146
147 case IND:
148 // Special case/Delicate Hack: INDs don't normally
149 // appear, since we're doing this heap census right
150 // after GC. However, GarbageCollect() also does
151 // resurrectThreads(), which can update some
152 // blackholes when it calls raiseAsync() on the
153 // resurrected threads. So we know that any IND will
154 // be the size of a BLACKHOLE.
155 prim = true;
156 size = BLACKHOLE_sizeW();
157 break;
158
159 case BCO:
160 prim = true;
161 size = bco_sizeW((StgBCO *)p);
162 break;
163
164 case MVAR_CLEAN:
165 case MVAR_DIRTY:
166 case TVAR:
167 case WEAK:
168 case PRIM:
169 case MUT_PRIM:
170 case MUT_VAR_CLEAN:
171 case MUT_VAR_DIRTY:
172 prim = true;
173 size = sizeW_fromITBL(info);
174 break;
175
176 case AP:
177 prim = true;
178 size = ap_sizeW((StgAP *)p);
179 break;
180
181 case PAP:
182 prim = true;
183 size = pap_sizeW((StgPAP *)p);
184 break;
185
186 case AP_STACK:
187 {
188 StgAP_STACK *ap = (StgAP_STACK *)p;
189 prim = true;
190 size = ap_stack_sizeW(ap);
191 searchStackChunk(addrs, (StgPtr)ap->payload,
192 (StgPtr)ap->payload + ap->size);
193 break;
194 }
195
196 case ARR_WORDS:
197 prim = true;
198 size = arr_words_sizeW((StgArrBytes*)p);
199 break;
200
201 case MUT_ARR_PTRS_CLEAN:
202 case MUT_ARR_PTRS_DIRTY:
203 case MUT_ARR_PTRS_FROZEN:
204 case MUT_ARR_PTRS_FROZEN0:
205 prim = true;
206 size = mut_arr_ptrs_sizeW((StgMutArrPtrs *)p);
207 break;
208
209 case SMALL_MUT_ARR_PTRS_CLEAN:
210 case SMALL_MUT_ARR_PTRS_DIRTY:
211 case SMALL_MUT_ARR_PTRS_FROZEN:
212 case SMALL_MUT_ARR_PTRS_FROZEN0:
213 prim = true;
214 size = small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs *)p);
215 break;
216
217 case TSO:
218 prim = true;
219 size = sizeofW(StgTSO);
220 break;
221
222 case STACK: {
223 StgStack *stack = (StgStack*)p;
224 prim = true;
225 searchStackChunk(addrs, stack->sp,
226 stack->stack + stack->stack_size);
227 size = stack_sizeW(stack);
228 break;
229 }
230
231 case TREC_CHUNK:
232 prim = true;
233 size = sizeofW(StgTRecChunk);
234 break;
235
236 default:
237 barf("heapCensus, unknown object: %d", info->type);
238 }
239
240 if (!prim) {
241 checkAddress(addrs,info);
242 }
243
244 p += size;
245 }
246 }
247 }
248
249 #if defined(PROFILING)
250 //
251 // Do not unload the object if the CCS tree refers to a CCS or CC which
252 // originates in the object.
253 //
254 static void searchCostCentres (HashTable *addrs, CostCentreStack *ccs)
255 {
256 IndexTable *i;
257
258 checkAddress(addrs, ccs);
259 checkAddress(addrs, ccs->cc);
260 for (i = ccs->indexTable; i != NULL; i = i->next) {
261 if (!i->back_edge) {
262 searchCostCentres(addrs, i->ccs);
263 }
264 }
265 }
266 #endif
267
268 //
269 // Check whether we can unload any object code. This is called at the
270 // appropriate point during a GC, where all the heap data is nice and
271 // packed together and we have a linked list of the static objects.
272 //
273 // The check involves a complete heap traversal, but you only pay for
274 // this (a) when you have called unloadObj(), and (b) at a major GC,
275 // which is much more expensive than the traversal we're doing here.
276 //
277 void checkUnload (StgClosure *static_objects)
278 {
279 uint32_t g, n;
280 HashTable *addrs;
281 StgClosure* p;
282 const StgInfoTable *info;
283 ObjectCode *oc, *prev, *next;
284 gen_workspace *ws;
285 StgClosure* link;
286
287 if (unloaded_objects == NULL) return;
288
289 ACQUIRE_LOCK(&linker_unloaded_mutex);
290
291 // Mark every unloadable object as unreferenced initially
292 for (oc = unloaded_objects; oc; oc = oc->next) {
293 IF_DEBUG(linker, debugBelch("Checking whether to unload %" PATH_FMT "\n",
294 oc->fileName));
295 oc->referenced = false;
296 }
297
298 addrs = allocHashTable();
299
300 for (p = static_objects; p != END_OF_STATIC_OBJECT_LIST; p = link) {
301 p = UNTAG_STATIC_LIST_PTR(p);
302 checkAddress(addrs, p);
303 info = get_itbl(p);
304 link = *STATIC_LINK(info, p);
305 }
306
307 // CAFs on revertible_caf_list are not on static_objects
308 for (p = (StgClosure*)revertible_caf_list;
309 p != END_OF_CAF_LIST;
310 p = ((StgIndStatic *)p)->static_link) {
311 p = UNTAG_STATIC_LIST_PTR(p);
312 checkAddress(addrs, p);
313 }
314
315 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
316 searchHeapBlocks (addrs, generations[g].blocks);
317 searchHeapBlocks (addrs, generations[g].large_objects);
318
319 for (n = 0; n < n_capabilities; n++) {
320 ws = &gc_threads[n]->gens[g];
321 searchHeapBlocks(addrs, ws->todo_bd);
322 searchHeapBlocks(addrs, ws->part_list);
323 searchHeapBlocks(addrs, ws->scavd_list);
324 }
325 }
326
327 #if defined(PROFILING)
328 /* Traverse the cost centre tree, calling checkAddress on each CCS/CC */
329 searchCostCentres(addrs, CCS_MAIN);
330
331 /* Also check each cost centre in the CC_LIST */
332 CostCentre *cc;
333 for (cc = CC_LIST; cc != NULL; cc = cc->link) {
334 checkAddress(addrs, cc);
335 }
336 #endif /* PROFILING */
337
338 // Look through the unloadable objects, and any object that is still
339 // marked as unreferenced can be physically unloaded, because we
340 // have no references to it.
341 prev = NULL;
342 for (oc = unloaded_objects; oc; oc = next) {
343 next = oc->next;
344 if (oc->referenced == 0) {
345 if (prev == NULL) {
346 unloaded_objects = oc->next;
347 } else {
348 prev->next = oc->next;
349 }
350 IF_DEBUG(linker, debugBelch("Unloading object file %" PATH_FMT "\n",
351 oc->fileName));
352 freeObjectCode(oc);
353 } else {
354 IF_DEBUG(linker, debugBelch("Object file still in use: %"
355 PATH_FMT "\n", oc->fileName));
356 prev = oc;
357 }
358 }
359
360 freeHashTable(addrs, NULL);
361
362 RELEASE_LOCK(&linker_unloaded_mutex);
363 }