Support more sphinx-build versions in configure script
[ghc.git] / rts / CheckUnload.c
1 /* ----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 2013-
4 *
5 * Check whether dynamically-loaded object code can be safely
6 * unloaded, by searching for references to it from the heap and RTS
7 * data structures.
8 *
9 * --------------------------------------------------------------------------*/
10
11 #include "PosixSource.h"
12 #include "Rts.h"
13
14 #include "RtsUtils.h"
15 #include "Hash.h"
16 #include "LinkerInternals.h"
17 #include "CheckUnload.h"
18 #include "sm/Storage.h"
19 #include "sm/GCThread.h"
20
21 //
22 // Code that we unload may be referenced from:
23 // - info pointers in heap objects and stack frames
24 // - pointers to static objects from the heap
25 // - StablePtrs to static objects
26 //
27 // We can find live static objects after a major GC, so we don't have
28 // to look at every closure pointer in the heap. However, we do have
29 // to look at every info pointer. So this is like a heap census
30 // traversal: we look at the header of every object, but not its
31 // contents.
32 //
33 // On the assumption that there aren't many different info pointers in
34 // a typical heap, we insert addresses into a hash table. The
35 // first time we see an address, we check it against the pending
36 // unloadable objects and if it lies within any of them, we mark that
37 // object as referenced so that it won't get unloaded in this round.
38 //
39
40 static void checkAddress (HashTable *addrs, void *addr)
41 {
42 ObjectCode *oc;
43 int i;
44
45 if (!lookupHashTable(addrs, (W_)addr)) {
46 insertHashTable(addrs, (W_)addr, addr);
47
48 for (oc = unloaded_objects; oc; oc = oc->next) {
49 for (i = 0; i < oc->n_sections; i++) {
50 if (oc->sections[i].kind != SECTIONKIND_OTHER) {
51 if ((W_)addr >= (W_)oc->sections[i].start &&
52 (W_)addr < (W_)oc->sections[i].start
53 + oc->sections[i].size) {
54 oc->referenced = 1;
55 return;
56 }
57 }
58 }
59 }
60 }
61 }
62
63 static void searchStackChunk (HashTable *addrs, StgPtr sp, StgPtr stack_end)
64 {
65 StgPtr p;
66 const StgRetInfoTable *info;
67
68 p = sp;
69 while (p < stack_end) {
70 info = get_ret_itbl((StgClosure *)p);
71
72 switch (info->i.type) {
73 case RET_SMALL:
74 case RET_BIG:
75 checkAddress(addrs, (void*)info);
76 break;
77
78 default:
79 break;
80 }
81
82 p += stack_frame_sizeW((StgClosure*)p);
83 }
84 }
85
86
87 static void searchHeapBlocks (HashTable *addrs, bdescr *bd)
88 {
89 StgPtr p;
90 StgInfoTable *info;
91 nat size;
92 rtsBool prim;
93
94 for (; bd != NULL; bd = bd->link) {
95
96 if (bd->flags & BF_PINNED) {
97 // Assume that objects in PINNED blocks cannot refer to
98 continue;
99 }
100
101 p = bd->start;
102 while (p < bd->free) {
103 info = get_itbl((StgClosure *)p);
104 prim = rtsFalse;
105
106 switch (info->type) {
107
108 case THUNK:
109 size = thunk_sizeW_fromITBL(info);
110 break;
111
112 case THUNK_1_1:
113 case THUNK_0_2:
114 case THUNK_2_0:
115 size = sizeofW(StgThunkHeader) + 2;
116 break;
117
118 case THUNK_1_0:
119 case THUNK_0_1:
120 case THUNK_SELECTOR:
121 size = sizeofW(StgThunkHeader) + 1;
122 break;
123
124 case CONSTR:
125 case FUN:
126 case FUN_1_0:
127 case FUN_0_1:
128 case FUN_1_1:
129 case FUN_0_2:
130 case FUN_2_0:
131 case CONSTR_1_0:
132 case CONSTR_0_1:
133 case CONSTR_1_1:
134 case CONSTR_0_2:
135 case CONSTR_2_0:
136 size = sizeW_fromITBL(info);
137 break;
138
139 case IND_PERM:
140 case BLACKHOLE:
141 case BLOCKING_QUEUE:
142 prim = rtsTrue;
143 size = sizeW_fromITBL(info);
144 break;
145
146 case IND:
147 // Special case/Delicate Hack: INDs don't normally
148 // appear, since we're doing this heap census right
149 // after GC. However, GarbageCollect() also does
150 // resurrectThreads(), which can update some
151 // blackholes when it calls raiseAsync() on the
152 // resurrected threads. So we know that any IND will
153 // be the size of a BLACKHOLE.
154 prim = rtsTrue;
155 size = BLACKHOLE_sizeW();
156 break;
157
158 case BCO:
159 prim = rtsTrue;
160 size = bco_sizeW((StgBCO *)p);
161 break;
162
163 case MVAR_CLEAN:
164 case MVAR_DIRTY:
165 case TVAR:
166 case WEAK:
167 case PRIM:
168 case MUT_PRIM:
169 case MUT_VAR_CLEAN:
170 case MUT_VAR_DIRTY:
171 prim = rtsTrue;
172 size = sizeW_fromITBL(info);
173 break;
174
175 case AP:
176 prim = rtsTrue;
177 size = ap_sizeW((StgAP *)p);
178 break;
179
180 case PAP:
181 prim = rtsTrue;
182 size = pap_sizeW((StgPAP *)p);
183 break;
184
185 case AP_STACK:
186 {
187 StgAP_STACK *ap = (StgAP_STACK *)p;
188 prim = rtsTrue;
189 size = ap_stack_sizeW(ap);
190 searchStackChunk(addrs, (StgPtr)ap->payload,
191 (StgPtr)ap->payload + ap->size);
192 break;
193 }
194
195 case ARR_WORDS:
196 prim = rtsTrue;
197 size = arr_words_sizeW((StgArrBytes*)p);
198 break;
199
200 case MUT_ARR_PTRS_CLEAN:
201 case MUT_ARR_PTRS_DIRTY:
202 case MUT_ARR_PTRS_FROZEN:
203 case MUT_ARR_PTRS_FROZEN0:
204 prim = rtsTrue;
205 size = mut_arr_ptrs_sizeW((StgMutArrPtrs *)p);
206 break;
207
208 case SMALL_MUT_ARR_PTRS_CLEAN:
209 case SMALL_MUT_ARR_PTRS_DIRTY:
210 case SMALL_MUT_ARR_PTRS_FROZEN:
211 case SMALL_MUT_ARR_PTRS_FROZEN0:
212 prim = rtsTrue;
213 size = small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs *)p);
214 break;
215
216 case TSO:
217 prim = rtsTrue;
218 size = sizeofW(StgTSO);
219 break;
220
221 case STACK: {
222 StgStack *stack = (StgStack*)p;
223 prim = rtsTrue;
224 searchStackChunk(addrs, stack->sp,
225 stack->stack + stack->stack_size);
226 size = stack_sizeW(stack);
227 break;
228 }
229
230 case TREC_CHUNK:
231 prim = rtsTrue;
232 size = sizeofW(StgTRecChunk);
233 break;
234
235 default:
236 barf("heapCensus, unknown object: %d", info->type);
237 }
238
239 if (!prim) {
240 checkAddress(addrs,info);
241 }
242
243 p += size;
244 }
245 }
246 }
247
248 //
249 // Check whether we can unload any object code. This is called at the
250 // appropriate point during a GC, where all the heap data is nice and
251 // packed together and we have a linked list of the static objects.
252 //
253 // The check involves a complete heap traversal, but you only pay for
254 // this (a) when you have called unloadObj(), and (b) at a major GC,
255 // which is much more expensive than the traversal we're doing here.
256 //
257 void checkUnload (StgClosure *static_objects)
258 {
259 nat g, n;
260 HashTable *addrs;
261 StgClosure* p;
262 const StgInfoTable *info;
263 ObjectCode *oc, *prev, *next;
264 gen_workspace *ws;
265 StgClosure* link;
266
267 if (unloaded_objects == NULL) return;
268
269 ACQUIRE_LOCK(&linker_unloaded_mutex);
270
271 // Mark every unloadable object as unreferenced initially
272 for (oc = unloaded_objects; oc; oc = oc->next) {
273 IF_DEBUG(linker, debugBelch("Checking whether to unload %" PATH_FMT "\n",
274 oc->fileName));
275 oc->referenced = rtsFalse;
276 }
277
278 addrs = allocHashTable();
279
280 for (p = static_objects; p != END_OF_STATIC_OBJECT_LIST; p = link) {
281 p = UNTAG_STATIC_LIST_PTR(p);
282 checkAddress(addrs, p);
283 info = get_itbl(p);
284 link = *STATIC_LINK(info, p);
285 }
286
287 // CAFs on revertible_caf_list are not on static_objects
288 for (p = (StgClosure*)revertible_caf_list;
289 p != END_OF_CAF_LIST;
290 p = ((StgIndStatic *)p)->static_link) {
291 p = UNTAG_STATIC_LIST_PTR(p);
292 checkAddress(addrs, p);
293 }
294
295 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
296 searchHeapBlocks (addrs, generations[g].blocks);
297 searchHeapBlocks (addrs, generations[g].large_objects);
298
299 for (n = 0; n < n_capabilities; n++) {
300 ws = &gc_threads[n]->gens[g];
301 searchHeapBlocks(addrs, ws->todo_bd);
302 searchHeapBlocks(addrs, ws->part_list);
303 searchHeapBlocks(addrs, ws->scavd_list);
304 }
305 }
306
307 // Look through the unloadable objects, and any object that is still
308 // marked as unreferenced can be physically unloaded, because we
309 // have no references to it.
310 prev = NULL;
311 for (oc = unloaded_objects; oc; oc = next) {
312 next = oc->next;
313 if (oc->referenced == 0) {
314 if (prev == NULL) {
315 unloaded_objects = oc->next;
316 } else {
317 prev->next = oc->next;
318 }
319 IF_DEBUG(linker, debugBelch("Unloading object file %" PATH_FMT "\n",
320 oc->fileName));
321 freeObjectCode(oc);
322 } else {
323 IF_DEBUG(linker, debugBelch("Object file still in use: %"
324 PATH_FMT "\n", oc->fileName));
325 prev = oc;
326 }
327 }
328
329 freeHashTable(addrs, NULL);
330
331 RELEASE_LOCK(&linker_unloaded_mutex);
332 }