rts: More const correct-ness fixes
[ghc.git] / rts / CheckUnload.c
1 /* ----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 2013-
4 *
5 * Check whether dynamically-loaded object code can be safely
6 * unloaded, by searching for references to it from the heap and RTS
7 * data structures.
8 *
9 * --------------------------------------------------------------------------*/
10
11 #include "PosixSource.h"
12 #include "Rts.h"
13
14 #include "RtsUtils.h"
15 #include "Hash.h"
16 #include "LinkerInternals.h"
17 #include "CheckUnload.h"
18 #include "sm/Storage.h"
19 #include "sm/GCThread.h"
20
21 //
22 // Code that we unload may be referenced from:
23 // - info pointers in heap objects and stack frames
24 // - pointers to static objects from the heap
25 // - StablePtrs to static objects
26 // - pointers to cost centres from the cost centre tree
27 //
28 // We can find live static objects after a major GC, so we don't have
29 // to look at every closure pointer in the heap. However, we do have
30 // to look at every info pointer. So this is like a heap census
31 // traversal: we look at the header of every object, but not its
32 // contents.
33 //
34 // On the assumption that there aren't many different info pointers in
35 // a typical heap, we insert addresses into a hash table. The
36 // first time we see an address, we check it against the pending
37 // unloadable objects and if it lies within any of them, we mark that
38 // object as referenced so that it won't get unloaded in this round.
39 //
40
41 static void checkAddress (HashTable *addrs, const void *addr)
42 {
43 ObjectCode *oc;
44 int i;
45
46 if (!lookupHashTable(addrs, (W_)addr)) {
47 insertHashTable(addrs, (W_)addr, addr);
48
49 for (oc = unloaded_objects; oc; oc = oc->next) {
50 for (i = 0; i < oc->n_sections; i++) {
51 if (oc->sections[i].kind != SECTIONKIND_OTHER) {
52 if ((W_)addr >= (W_)oc->sections[i].start &&
53 (W_)addr < (W_)oc->sections[i].start
54 + oc->sections[i].size) {
55 oc->referenced = 1;
56 return;
57 }
58 }
59 }
60 }
61 }
62 }
63
64 static void searchStackChunk (HashTable *addrs, StgPtr sp, StgPtr stack_end)
65 {
66 StgPtr p;
67 const StgRetInfoTable *info;
68
69 p = sp;
70 while (p < stack_end) {
71 info = get_ret_itbl((StgClosure *)p);
72
73 switch (info->i.type) {
74 case RET_SMALL:
75 case RET_BIG:
76 checkAddress(addrs, (const void*)info);
77 break;
78
79 default:
80 break;
81 }
82
83 p += stack_frame_sizeW((StgClosure*)p);
84 }
85 }
86
87
88 static void searchHeapBlocks (HashTable *addrs, bdescr *bd)
89 {
90 StgPtr p;
91 const StgInfoTable *info;
92 uint32_t size;
93 rtsBool prim;
94
95 for (; bd != NULL; bd = bd->link) {
96
97 if (bd->flags & BF_PINNED) {
98 // Assume that objects in PINNED blocks cannot refer to
99 continue;
100 }
101
102 p = bd->start;
103 while (p < bd->free) {
104 info = get_itbl((StgClosure *)p);
105 prim = rtsFalse;
106
107 switch (info->type) {
108
109 case THUNK:
110 size = thunk_sizeW_fromITBL(info);
111 break;
112
113 case THUNK_1_1:
114 case THUNK_0_2:
115 case THUNK_2_0:
116 size = sizeofW(StgThunkHeader) + 2;
117 break;
118
119 case THUNK_1_0:
120 case THUNK_0_1:
121 case THUNK_SELECTOR:
122 size = sizeofW(StgThunkHeader) + 1;
123 break;
124
125 case CONSTR:
126 case FUN:
127 case FUN_1_0:
128 case FUN_0_1:
129 case FUN_1_1:
130 case FUN_0_2:
131 case FUN_2_0:
132 case CONSTR_1_0:
133 case CONSTR_0_1:
134 case CONSTR_1_1:
135 case CONSTR_0_2:
136 case CONSTR_2_0:
137 size = sizeW_fromITBL(info);
138 break;
139
140 case BLACKHOLE:
141 case BLOCKING_QUEUE:
142 prim = rtsTrue;
143 size = sizeW_fromITBL(info);
144 break;
145
146 case IND:
147 // Special case/Delicate Hack: INDs don't normally
148 // appear, since we're doing this heap census right
149 // after GC. However, GarbageCollect() also does
150 // resurrectThreads(), which can update some
151 // blackholes when it calls raiseAsync() on the
152 // resurrected threads. So we know that any IND will
153 // be the size of a BLACKHOLE.
154 prim = rtsTrue;
155 size = BLACKHOLE_sizeW();
156 break;
157
158 case BCO:
159 prim = rtsTrue;
160 size = bco_sizeW((StgBCO *)p);
161 break;
162
163 case MVAR_CLEAN:
164 case MVAR_DIRTY:
165 case TVAR:
166 case WEAK:
167 case PRIM:
168 case MUT_PRIM:
169 case MUT_VAR_CLEAN:
170 case MUT_VAR_DIRTY:
171 prim = rtsTrue;
172 size = sizeW_fromITBL(info);
173 break;
174
175 case AP:
176 prim = rtsTrue;
177 size = ap_sizeW((StgAP *)p);
178 break;
179
180 case PAP:
181 prim = rtsTrue;
182 size = pap_sizeW((StgPAP *)p);
183 break;
184
185 case AP_STACK:
186 {
187 StgAP_STACK *ap = (StgAP_STACK *)p;
188 prim = rtsTrue;
189 size = ap_stack_sizeW(ap);
190 searchStackChunk(addrs, (StgPtr)ap->payload,
191 (StgPtr)ap->payload + ap->size);
192 break;
193 }
194
195 case ARR_WORDS:
196 prim = rtsTrue;
197 size = arr_words_sizeW((StgArrBytes*)p);
198 break;
199
200 case MUT_ARR_PTRS_CLEAN:
201 case MUT_ARR_PTRS_DIRTY:
202 case MUT_ARR_PTRS_FROZEN:
203 case MUT_ARR_PTRS_FROZEN0:
204 prim = rtsTrue;
205 size = mut_arr_ptrs_sizeW((StgMutArrPtrs *)p);
206 break;
207
208 case SMALL_MUT_ARR_PTRS_CLEAN:
209 case SMALL_MUT_ARR_PTRS_DIRTY:
210 case SMALL_MUT_ARR_PTRS_FROZEN:
211 case SMALL_MUT_ARR_PTRS_FROZEN0:
212 prim = rtsTrue;
213 size = small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs *)p);
214 break;
215
216 case TSO:
217 prim = rtsTrue;
218 size = sizeofW(StgTSO);
219 break;
220
221 case STACK: {
222 StgStack *stack = (StgStack*)p;
223 prim = rtsTrue;
224 searchStackChunk(addrs, stack->sp,
225 stack->stack + stack->stack_size);
226 size = stack_sizeW(stack);
227 break;
228 }
229
230 case TREC_CHUNK:
231 prim = rtsTrue;
232 size = sizeofW(StgTRecChunk);
233 break;
234
235 default:
236 barf("heapCensus, unknown object: %d", info->type);
237 }
238
239 if (!prim) {
240 checkAddress(addrs,info);
241 }
242
243 p += size;
244 }
245 }
246 }
247
248 #ifdef PROFILING
249 //
250 // Do not unload the object if the CCS tree refers to a CCS or CC which
251 // originates in the object.
252 //
253 static void searchCostCentres (HashTable *addrs, CostCentreStack *ccs)
254 {
255 IndexTable *i;
256
257 checkAddress(addrs, ccs);
258 checkAddress(addrs, ccs->cc);
259 for (i = ccs->indexTable; i != NULL; i = i->next) {
260 if (!i->back_edge) {
261 searchCostCentres(addrs, i->ccs);
262 }
263 }
264 }
265 #endif
266
267 //
268 // Check whether we can unload any object code. This is called at the
269 // appropriate point during a GC, where all the heap data is nice and
270 // packed together and we have a linked list of the static objects.
271 //
272 // The check involves a complete heap traversal, but you only pay for
273 // this (a) when you have called unloadObj(), and (b) at a major GC,
274 // which is much more expensive than the traversal we're doing here.
275 //
276 void checkUnload (StgClosure *static_objects)
277 {
278 uint32_t g, n;
279 HashTable *addrs;
280 StgClosure* p;
281 const StgInfoTable *info;
282 ObjectCode *oc, *prev, *next;
283 gen_workspace *ws;
284 StgClosure* link;
285
286 if (unloaded_objects == NULL) return;
287
288 ACQUIRE_LOCK(&linker_unloaded_mutex);
289
290 // Mark every unloadable object as unreferenced initially
291 for (oc = unloaded_objects; oc; oc = oc->next) {
292 IF_DEBUG(linker, debugBelch("Checking whether to unload %" PATH_FMT "\n",
293 oc->fileName));
294 oc->referenced = rtsFalse;
295 }
296
297 addrs = allocHashTable();
298
299 for (p = static_objects; p != END_OF_STATIC_OBJECT_LIST; p = link) {
300 p = UNTAG_STATIC_LIST_PTR(p);
301 checkAddress(addrs, p);
302 info = get_itbl(p);
303 link = *STATIC_LINK(info, p);
304 }
305
306 // CAFs on revertible_caf_list are not on static_objects
307 for (p = (StgClosure*)revertible_caf_list;
308 p != END_OF_CAF_LIST;
309 p = ((StgIndStatic *)p)->static_link) {
310 p = UNTAG_STATIC_LIST_PTR(p);
311 checkAddress(addrs, p);
312 }
313
314 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
315 searchHeapBlocks (addrs, generations[g].blocks);
316 searchHeapBlocks (addrs, generations[g].large_objects);
317
318 for (n = 0; n < n_capabilities; n++) {
319 ws = &gc_threads[n]->gens[g];
320 searchHeapBlocks(addrs, ws->todo_bd);
321 searchHeapBlocks(addrs, ws->part_list);
322 searchHeapBlocks(addrs, ws->scavd_list);
323 }
324 }
325
326 #ifdef PROFILING
327 /* Traverse the cost centre tree, calling checkAddress on each CCS/CC */
328 searchCostCentres(addrs, CCS_MAIN);
329
330 /* Also check each cost centre in the CC_LIST */
331 CostCentre *cc;
332 for (cc = CC_LIST; cc != NULL; cc = cc->link) {
333 checkAddress(addrs, cc);
334 }
335 #endif /* PROFILING */
336
337 // Look through the unloadable objects, and any object that is still
338 // marked as unreferenced can be physically unloaded, because we
339 // have no references to it.
340 prev = NULL;
341 for (oc = unloaded_objects; oc; oc = next) {
342 next = oc->next;
343 if (oc->referenced == 0) {
344 if (prev == NULL) {
345 unloaded_objects = oc->next;
346 } else {
347 prev->next = oc->next;
348 }
349 IF_DEBUG(linker, debugBelch("Unloading object file %" PATH_FMT "\n",
350 oc->fileName));
351 freeObjectCode(oc);
352 } else {
353 IF_DEBUG(linker, debugBelch("Object file still in use: %"
354 PATH_FMT "\n", oc->fileName));
355 prev = oc;
356 }
357 }
358
359 freeHashTable(addrs, NULL);
360
361 RELEASE_LOCK(&linker_unloaded_mutex);
362 }