Use snwprintf instead of swprintf in rts/Linker.c.
[ghc.git] / rts / CheckUnload.c
1 /* ----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 2013-
4 *
5 * Check whether dynamically-loaded object code can be safely
6 * unloaded, by searching for references to it from the heap and RTS
7 * data structures.
8 *
9 * --------------------------------------------------------------------------*/
10
11 #include "PosixSource.h"
12 #include "Rts.h"
13
14 #include "RtsUtils.h"
15 #include "Hash.h"
16 #include "LinkerInternals.h"
17 #include "CheckUnload.h"
18 #include "sm/Storage.h"
19 #include "sm/GCThread.h"
20
21 //
22 // Code that we unload may be referenced from:
23 // - info pointers in heap objects and stack frames
24 // - pointers to static objects from the heap
25 // - StablePtrs to static objects
26 //
27 // We can find live static objects after a major GC, so we don't have
28 // to look at every closure pointer in the heap. However, we do have
29 // to look at every info pointer. So this is like a heap census
30 // traversal: we look at the header of every object, but not its
31 // contents.
32 //
33 // On the assumption that there aren't many different info pointers in
34 // a typical heap, we insert addresses into a hash table. The
35 // first time we see an address, we check it against the pending
36 // unloadable objects and if it lies within any of them, we mark that
37 // object as referenced so that it won't get unloaded in this round.
38 //
39
40 static void checkAddress (HashTable *addrs, void *addr)
41 {
42 ObjectCode *oc;
43
44 if (!lookupHashTable(addrs, (W_)addr)) {
45 insertHashTable(addrs, (W_)addr, addr);
46
47 for (oc = unloaded_objects; oc; oc = oc->next) {
48 if ((W_)addr >= (W_)oc->image &&
49 (W_)addr < (W_)oc->image + oc->fileSize) {
50 oc->referenced = 1;
51 break;
52 }
53 }
54 }
55 }
56
57 static void searchStackChunk (HashTable *addrs, StgPtr sp, StgPtr stack_end)
58 {
59 StgPtr p;
60 const StgRetInfoTable *info;
61
62 p = sp;
63 while (p < stack_end) {
64 info = get_ret_itbl((StgClosure *)p);
65
66 switch (info->i.type) {
67 case RET_SMALL:
68 case RET_BIG:
69 checkAddress(addrs, (void*)info);
70 break;
71
72 default:
73 break;
74 }
75
76 p += stack_frame_sizeW((StgClosure*)p);
77 }
78 }
79
80
81 static void searchHeapBlocks (HashTable *addrs, bdescr *bd)
82 {
83 StgPtr p;
84 StgInfoTable *info;
85 nat size;
86 rtsBool prim;
87
88 for (; bd != NULL; bd = bd->link) {
89
90 if (bd->flags & BF_PINNED) {
91 // Assume that objects in PINNED blocks cannot refer to
92 continue;
93 }
94
95 p = bd->start;
96 while (p < bd->free) {
97 info = get_itbl((StgClosure *)p);
98 prim = rtsFalse;
99
100 switch (info->type) {
101
102 case THUNK:
103 size = thunk_sizeW_fromITBL(info);
104 break;
105
106 case THUNK_1_1:
107 case THUNK_0_2:
108 case THUNK_2_0:
109 size = sizeofW(StgThunkHeader) + 2;
110 break;
111
112 case THUNK_1_0:
113 case THUNK_0_1:
114 case THUNK_SELECTOR:
115 size = sizeofW(StgThunkHeader) + 1;
116 break;
117
118 case CONSTR:
119 case FUN:
120 case FUN_1_0:
121 case FUN_0_1:
122 case FUN_1_1:
123 case FUN_0_2:
124 case FUN_2_0:
125 case CONSTR_1_0:
126 case CONSTR_0_1:
127 case CONSTR_1_1:
128 case CONSTR_0_2:
129 case CONSTR_2_0:
130 size = sizeW_fromITBL(info);
131 break;
132
133 case IND_PERM:
134 case BLACKHOLE:
135 case BLOCKING_QUEUE:
136 prim = rtsTrue;
137 size = sizeW_fromITBL(info);
138 break;
139
140 case IND:
141 // Special case/Delicate Hack: INDs don't normally
142 // appear, since we're doing this heap census right
143 // after GC. However, GarbageCollect() also does
144 // resurrectThreads(), which can update some
145 // blackholes when it calls raiseAsync() on the
146 // resurrected threads. So we know that any IND will
147 // be the size of a BLACKHOLE.
148 prim = rtsTrue;
149 size = BLACKHOLE_sizeW();
150 break;
151
152 case BCO:
153 prim = rtsTrue;
154 size = bco_sizeW((StgBCO *)p);
155 break;
156
157 case MVAR_CLEAN:
158 case MVAR_DIRTY:
159 case TVAR:
160 case WEAK:
161 case PRIM:
162 case MUT_PRIM:
163 case MUT_VAR_CLEAN:
164 case MUT_VAR_DIRTY:
165 prim = rtsTrue;
166 size = sizeW_fromITBL(info);
167 break;
168
169 case AP:
170 prim = rtsTrue;
171 size = ap_sizeW((StgAP *)p);
172 break;
173
174 case PAP:
175 prim = rtsTrue;
176 size = pap_sizeW((StgPAP *)p);
177 break;
178
179 case AP_STACK:
180 {
181 StgAP_STACK *ap = (StgAP_STACK *)p;
182 prim = rtsTrue;
183 size = ap_stack_sizeW(ap);
184 searchStackChunk(addrs, (StgPtr)ap->payload,
185 (StgPtr)ap->payload + ap->size);
186 break;
187 }
188
189 case ARR_WORDS:
190 prim = rtsTrue;
191 size = arr_words_sizeW((StgArrWords*)p);
192 break;
193
194 case MUT_ARR_PTRS_CLEAN:
195 case MUT_ARR_PTRS_DIRTY:
196 case MUT_ARR_PTRS_FROZEN:
197 case MUT_ARR_PTRS_FROZEN0:
198 prim = rtsTrue;
199 size = mut_arr_ptrs_sizeW((StgMutArrPtrs *)p);
200 break;
201
202 case SMALL_MUT_ARR_PTRS_CLEAN:
203 case SMALL_MUT_ARR_PTRS_DIRTY:
204 case SMALL_MUT_ARR_PTRS_FROZEN:
205 case SMALL_MUT_ARR_PTRS_FROZEN0:
206 prim = rtsTrue;
207 size = small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs *)p);
208 break;
209
210 case TSO:
211 prim = rtsTrue;
212 size = sizeofW(StgTSO);
213 break;
214
215 case STACK: {
216 StgStack *stack = (StgStack*)p;
217 prim = rtsTrue;
218 searchStackChunk(addrs, stack->sp,
219 stack->stack + stack->stack_size);
220 size = stack_sizeW(stack);
221 break;
222 }
223
224 case TREC_CHUNK:
225 prim = rtsTrue;
226 size = sizeofW(StgTRecChunk);
227 break;
228
229 default:
230 barf("heapCensus, unknown object: %d", info->type);
231 }
232
233 if (!prim) {
234 checkAddress(addrs,info);
235 }
236
237 p += size;
238 }
239 }
240 }
241
242 //
243 // Check whether we can unload any object code. This is called at the
244 // appropriate point during a GC, where all the heap data is nice and
245 // packed together and we have a linked list of the static objects.
246 //
247 // The check involves a complete heap traversal, but you only pay for
248 // this (a) when you have called unloadObj(), and (b) at a major GC,
249 // which is much more expensive than the traversal we're doing here.
250 //
251 void checkUnload (StgClosure *static_objects)
252 {
253 nat g, n;
254 HashTable *addrs;
255 StgClosure* p;
256 const StgInfoTable *info;
257 ObjectCode *oc, *prev, *next;
258 gen_workspace *ws;
259 StgClosure* link;
260
261 if (unloaded_objects == NULL) return;
262
263 // Mark every unloadable object as unreferenced initially
264 for (oc = unloaded_objects; oc; oc = oc->next) {
265 IF_DEBUG(linker, debugBelch("Checking whether to unload %" PATH_FMT "\n",
266 oc->fileName));
267 oc->referenced = rtsFalse;
268 }
269
270 addrs = allocHashTable();
271
272 for (p = static_objects; p != END_OF_STATIC_LIST; p = link) {
273 checkAddress(addrs, p);
274 info = get_itbl(p);
275 link = *STATIC_LINK(info, p);
276 }
277
278 // CAFs on revertible_caf_list are not on static_objects
279 for (p = (StgClosure*)revertible_caf_list;
280 p != END_OF_STATIC_LIST;
281 p = ((StgIndStatic *)p)->static_link) {
282 checkAddress(addrs, p);
283 }
284
285 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
286 searchHeapBlocks (addrs, generations[g].blocks);
287 searchHeapBlocks (addrs, generations[g].large_objects);
288
289 for (n = 0; n < n_capabilities; n++) {
290 ws = &gc_threads[n]->gens[g];
291 searchHeapBlocks(addrs, ws->todo_bd);
292 searchHeapBlocks(addrs, ws->part_list);
293 searchHeapBlocks(addrs, ws->scavd_list);
294 }
295 }
296
297 // Look through the unloadable objects, and any object that is still
298 // marked as unreferenced can be physically unloaded, because we
299 // have no references to it.
300 prev = NULL;
301 for (oc = unloaded_objects; oc; oc = next) {
302 next = oc->next;
303 if (oc->referenced == 0) {
304 if (prev == NULL) {
305 unloaded_objects = oc->next;
306 } else {
307 prev->next = oc->next;
308 }
309 IF_DEBUG(linker, debugBelch("Unloading object file %" PATH_FMT "\n",
310 oc->fileName));
311 freeObjectCode(oc);
312 } else {
313 IF_DEBUG(linker, debugBelch("Object file still in use: %"
314 PATH_FMT "\n", oc->fileName));
315 prev = oc;
316 }
317 }
318
319 freeHashTable(addrs, NULL);
320 }