Remove CONSTR_CHARLIKE and CONSTR_INTLIKE closure types
[ghc.git] / rts / LdvProfile.c
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 2001
4 * Author: Sungwoo Park
5 *
6 * Lag/Drag/Void profiling.
7 *
8 * ---------------------------------------------------------------------------*/
9
10 #ifdef PROFILING
11
12 #include "Rts.h"
13 #include "LdvProfile.h"
14 #include "RtsFlags.h"
15 #include "Profiling.h"
16 #include "Stats.h"
17 #include "Storage.h"
18 #include "RtsUtils.h"
19 #include "Schedule.h"
20
21 /* --------------------------------------------------------------------------
22 * Fills in the slop when a *dynamic* closure changes its type.
23 * First calls LDV_recordDead() to declare the closure is dead, and then
24 * fills in the slop.
25 *
26 * Invoked when:
27 * 1) blackholing, UPD_BH_UPDATABLE() and UPD_BH_SINGLE_ENTRY (in
28 * includes/StgMacros.h), threadLazyBlackHole() and
29 * threadSqueezeStack() (in GC.c).
30 * 2) updating with indirection closures, updateWithIndirection()
31 * and updateWithPermIndirection() (in Storage.h).
32 *
33 * LDV_recordDead_FILL_SLOP_DYNAMIC() is not called on 'inherently used'
34 * closures such as TSO. It is not called on PAP because PAP is not updatable.
35 * ----------------------------------------------------------------------- */
36 void
37 LDV_recordDead_FILL_SLOP_DYNAMIC( StgClosure *p )
38 {
39 nat size, i;
40
41 #if defined(__GNUC__) && __GNUC__ < 3 && defined(DEBUG)
42 #error Please use gcc 3.0+ to compile this file with DEBUG; gcc < 3.0 miscompiles it
43 #endif
44
45 if (era > 0) {
46 // very like FILL_SLOP(), except that we call LDV_recordDead().
47 size = closure_sizeW(p);
48
49 LDV_recordDead((StgClosure *)(p), size);
50
51 if (size > sizeofW(StgThunkHeader)) {
52 for (i = 0; i < size - sizeofW(StgThunkHeader); i++) {
53 ((StgThunk *)(p))->payload[i] = 0;
54 }
55 }
56 }
57 }
58
59 /* --------------------------------------------------------------------------
60 * This function is called eventually on every object destroyed during
61 * a garbage collection, whether it is a major garbage collection or
62 * not. If c is an 'inherently used' closure, nothing happens. If c
63 * is an ordinary closure, LDV_recordDead() is called on c with its
64 * proper size which excludes the profiling header portion in the
65 * closure. Returns the size of the closure, including the profiling
66 * header portion, so that the caller can find the next closure.
67 * ----------------------------------------------------------------------- */
68 STATIC_INLINE nat
69 processHeapClosureForDead( StgClosure *c )
70 {
71 nat size;
72 StgInfoTable *info;
73
74 info = get_itbl(c);
75
76 if (info->type != EVACUATED) {
77 ASSERT(((LDVW(c) & LDV_CREATE_MASK) >> LDV_SHIFT) <= era &&
78 ((LDVW(c) & LDV_CREATE_MASK) >> LDV_SHIFT) > 0);
79 ASSERT(((LDVW(c) & LDV_STATE_MASK) == LDV_STATE_CREATE) ||
80 (
81 (LDVW(c) & LDV_LAST_MASK) <= era &&
82 (LDVW(c) & LDV_LAST_MASK) > 0
83 ));
84 }
85
86 if (info->type == EVACUATED) {
87 // The size of the evacuated closure is currently stored in
88 // the LDV field. See SET_EVACUAEE_FOR_LDV() in
89 // includes/StgLdvProf.h.
90 return LDVW(c);
91 }
92
93 size = closure_sizeW(c);
94
95 switch (info->type) {
96 /*
97 'inherently used' cases: do nothing.
98 */
99 case TSO:
100 case MVAR:
101 case MUT_ARR_PTRS_CLEAN:
102 case MUT_ARR_PTRS_DIRTY:
103 case MUT_ARR_PTRS_FROZEN:
104 case MUT_ARR_PTRS_FROZEN0:
105 case ARR_WORDS:
106 case WEAK:
107 case MUT_VAR_CLEAN:
108 case MUT_VAR_DIRTY:
109 case BCO:
110 case STABLE_NAME:
111 case TVAR_WAIT_QUEUE:
112 case TVAR:
113 case TREC_HEADER:
114 case TREC_CHUNK:
115 return size;
116
117 /*
118 ordinary cases: call LDV_recordDead().
119 */
120 case THUNK:
121 case THUNK_1_0:
122 case THUNK_0_1:
123 case THUNK_SELECTOR:
124 case THUNK_2_0:
125 case THUNK_1_1:
126 case THUNK_0_2:
127 case AP:
128 case PAP:
129 case AP_STACK:
130 case CONSTR:
131 case CONSTR_1_0:
132 case CONSTR_0_1:
133 case CONSTR_2_0:
134 case CONSTR_1_1:
135 case CONSTR_0_2:
136 case FUN:
137 case FUN_1_0:
138 case FUN_0_1:
139 case FUN_2_0:
140 case FUN_1_1:
141 case FUN_0_2:
142 case BLACKHOLE:
143 case SE_BLACKHOLE:
144 case CAF_BLACKHOLE:
145 case SE_CAF_BLACKHOLE:
146 case IND_PERM:
147 case IND_OLDGEN_PERM:
148 /*
149 'Ingore' cases
150 */
151 // Why can we ignore IND/IND_OLDGEN closures? We assume that
152 // any census is preceded by a major garbage collection, which
153 // IND/IND_OLDGEN closures cannot survive. Therefore, it is no
154 // use considering IND/IND_OLDGEN closures in the meanwhile
155 // because they will perish before the next census at any
156 // rate.
157 case IND:
158 case IND_OLDGEN:
159 // Found a dead closure: record its size
160 LDV_recordDead(c, size);
161 return size;
162
163 /*
164 Error case
165 */
166 // static objects
167 case IND_STATIC:
168 case CONSTR_STATIC:
169 case FUN_STATIC:
170 case THUNK_STATIC:
171 case CONSTR_NOCAF_STATIC:
172 // stack objects
173 case UPDATE_FRAME:
174 case CATCH_FRAME:
175 case STOP_FRAME:
176 case RET_DYN:
177 case RET_BCO:
178 case RET_SMALL:
179 case RET_VEC_SMALL:
180 case RET_BIG:
181 case RET_VEC_BIG:
182 // others
183 case BLOCKED_FETCH:
184 case FETCH_ME:
185 case FETCH_ME_BQ:
186 case RBH:
187 case REMOTE_REF:
188 case INVALID_OBJECT:
189 default:
190 barf("Invalid object in processHeapClosureForDead(): %d", info->type);
191 return 0;
192 }
193 }
194
195 /* --------------------------------------------------------------------------
196 * Calls processHeapClosureForDead() on every *dead* closures in the
197 * heap blocks starting at bd.
198 * ----------------------------------------------------------------------- */
199 static void
200 processHeapForDead( bdescr *bd )
201 {
202 StgPtr p;
203
204 while (bd != NULL) {
205 p = bd->start;
206 while (p < bd->free) {
207 p += processHeapClosureForDead((StgClosure *)p);
208 while (p < bd->free && !*p) // skip slop
209 p++;
210 }
211 ASSERT(p == bd->free);
212 bd = bd->link;
213 }
214 }
215
216 /* --------------------------------------------------------------------------
217 * Calls processHeapClosureForDead() on every *dead* closures in the nursery.
218 * ----------------------------------------------------------------------- */
219 static void
220 processNurseryForDead( void )
221 {
222 StgPtr p, bdLimit;
223 bdescr *bd;
224
225 bd = MainCapability.r.rNursery->blocks;
226 while (bd->start < bd->free) {
227 p = bd->start;
228 bdLimit = bd->start + BLOCK_SIZE_W;
229 while (p < bd->free && p < bdLimit) {
230 p += processHeapClosureForDead((StgClosure *)p);
231 while (p < bd->free && p < bdLimit && !*p) // skip slop
232 p++;
233 }
234 bd = bd->link;
235 if (bd == NULL)
236 break;
237 }
238 }
239
240 /* --------------------------------------------------------------------------
241 * Calls processHeapClosureForDead() on every *dead* closures in the
242 * small object pool.
243 * ----------------------------------------------------------------------- */
244 static void
245 processSmallObjectPoolForDead( void )
246 {
247 bdescr *bd;
248 StgPtr p;
249
250 bd = small_alloc_list;
251
252 // first block
253 if (bd == NULL)
254 return;
255
256 p = bd->start;
257 while (p < alloc_Hp) {
258 p += processHeapClosureForDead((StgClosure *)p);
259 while (p < alloc_Hp && !*p) // skip slop
260 p++;
261 }
262 ASSERT(p == alloc_Hp);
263
264 bd = bd->link;
265 while (bd != NULL) {
266 p = bd->start;
267 while (p < bd->free) {
268 p += processHeapClosureForDead((StgClosure *)p);
269 while (p < bd->free && !*p) // skip slop
270 p++;
271 }
272 ASSERT(p == bd->free);
273 bd = bd->link;
274 }
275 }
276
277 /* --------------------------------------------------------------------------
278 * Calls processHeapClosureForDead() on every *dead* closures in the closure
279 * chain.
280 * ----------------------------------------------------------------------- */
281 static void
282 processChainForDead( bdescr *bd )
283 {
284 // Any object still in the chain is dead!
285 while (bd != NULL) {
286 processHeapClosureForDead((StgClosure *)bd->start);
287 bd = bd->link;
288 }
289 }
290
291 /* --------------------------------------------------------------------------
292 * Start a census for *dead* closures, and calls
293 * processHeapClosureForDead() on every closure which died in the
294 * current garbage collection. This function is called from a garbage
295 * collector right before tidying up, when all dead closures are still
296 * stored in the heap and easy to identify. Generations 0 through N
297 * have just beed garbage collected.
298 * ----------------------------------------------------------------------- */
299 void
300 LdvCensusForDead( nat N )
301 {
302 nat g, s;
303
304 // ldvTime == 0 means that LDV profiling is currently turned off.
305 if (era == 0)
306 return;
307
308 if (RtsFlags.GcFlags.generations == 1) {
309 //
310 // Todo: support LDV for two-space garbage collection.
311 //
312 barf("Lag/Drag/Void profiling not supported with -G1");
313 } else {
314 for (g = 0; g <= N; g++)
315 for (s = 0; s < generations[g].n_steps; s++) {
316 if (g == 0 && s == 0) {
317 processSmallObjectPoolForDead();
318 processNurseryForDead();
319 processChainForDead(generations[g].steps[s].large_objects);
320 } else{
321 processHeapForDead(generations[g].steps[s].old_blocks);
322 processChainForDead(generations[g].steps[s].large_objects);
323 }
324 }
325 }
326 }
327
328 /* --------------------------------------------------------------------------
329 * Regard any closure in the current heap as dead or moribund and update
330 * LDV statistics accordingly.
331 * Called from shutdownHaskell() in RtsStartup.c.
332 * Also, stops LDV profiling by resetting ldvTime to 0.
333 * ----------------------------------------------------------------------- */
334 void
335 LdvCensusKillAll( void )
336 {
337 LdvCensusForDead(RtsFlags.GcFlags.generations - 1);
338 }
339
340 #endif /* PROFILING */