rts/Linker.c: distinct between DATA and CODE labels when importing
[ghc.git] / rts / LdvProfile.c
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 2001
4 * Author: Sungwoo Park
5 *
6 * Lag/Drag/Void profiling.
7 *
8 * ---------------------------------------------------------------------------*/
9
10 #ifdef PROFILING
11
12 #include "PosixSource.h"
13 #include "Rts.h"
14
15 #include "Profiling.h"
16 #include "LdvProfile.h"
17 #include "Stats.h"
18 #include "RtsUtils.h"
19 #include "Schedule.h"
20
21 /* --------------------------------------------------------------------------
22 * This function is called eventually on every object destroyed during
23 * a garbage collection, whether it is a major garbage collection or
24 * not. If c is an 'inherently used' closure, nothing happens. If c
25 * is an ordinary closure, LDV_recordDead() is called on c with its
26 * proper size which excludes the profiling header portion in the
27 * closure. Returns the size of the closure, including the profiling
28 * header portion, so that the caller can find the next closure.
29 * ----------------------------------------------------------------------- */
30 STATIC_INLINE nat
31 processHeapClosureForDead( StgClosure *c )
32 {
33 nat size;
34 const StgInfoTable *info;
35
36 info = get_itbl(c);
37
38 info = c->header.info;
39 if (IS_FORWARDING_PTR(info)) {
40 // The size of the evacuated closure is currently stored in
41 // the LDV field. See SET_EVACUAEE_FOR_LDV() in
42 // includes/StgLdvProf.h.
43 return LDVW(c);
44 }
45 info = INFO_PTR_TO_STRUCT(info);
46
47 ASSERT(((LDVW(c) & LDV_CREATE_MASK) >> LDV_SHIFT) <= era &&
48 ((LDVW(c) & LDV_CREATE_MASK) >> LDV_SHIFT) > 0);
49 ASSERT(((LDVW(c) & LDV_STATE_MASK) == LDV_STATE_CREATE) ||
50 (
51 (LDVW(c) & LDV_LAST_MASK) <= era &&
52 (LDVW(c) & LDV_LAST_MASK) > 0
53 ));
54
55
56 size = closure_sizeW(c);
57
58 switch (info->type) {
59 /*
60 'inherently used' cases: do nothing.
61 */
62 case TSO:
63 case STACK:
64 case MVAR_CLEAN:
65 case MVAR_DIRTY:
66 case TVAR:
67 case MUT_ARR_PTRS_CLEAN:
68 case MUT_ARR_PTRS_DIRTY:
69 case MUT_ARR_PTRS_FROZEN:
70 case MUT_ARR_PTRS_FROZEN0:
71 case SMALL_MUT_ARR_PTRS_CLEAN:
72 case SMALL_MUT_ARR_PTRS_DIRTY:
73 case SMALL_MUT_ARR_PTRS_FROZEN:
74 case SMALL_MUT_ARR_PTRS_FROZEN0:
75 case ARR_WORDS:
76 case WEAK:
77 case MUT_VAR_CLEAN:
78 case MUT_VAR_DIRTY:
79 case BCO:
80 case PRIM:
81 case MUT_PRIM:
82 case TREC_CHUNK:
83 return size;
84
85 /*
86 ordinary cases: call LDV_recordDead().
87 */
88 case THUNK:
89 case THUNK_1_0:
90 case THUNK_0_1:
91 case THUNK_SELECTOR:
92 case THUNK_2_0:
93 case THUNK_1_1:
94 case THUNK_0_2:
95 case AP:
96 case PAP:
97 case AP_STACK:
98 case CONSTR:
99 case CONSTR_1_0:
100 case CONSTR_0_1:
101 case CONSTR_2_0:
102 case CONSTR_1_1:
103 case CONSTR_0_2:
104 case FUN:
105 case FUN_1_0:
106 case FUN_0_1:
107 case FUN_2_0:
108 case FUN_1_1:
109 case FUN_0_2:
110 case BLACKHOLE:
111 case BLOCKING_QUEUE:
112 case IND_PERM:
113 /*
114 'Ingore' cases
115 */
116 // Why can we ignore IND closures? We assume that
117 // any census is preceded by a major garbage collection, which
118 // IND closures cannot survive. Therefore, it is no
119 // use considering IND closures in the meanwhile
120 // because they will perish before the next census at any
121 // rate.
122 case IND:
123 // Found a dead closure: record its size
124 LDV_recordDead(c, size);
125 return size;
126
127 /*
128 Error case
129 */
130 // static objects
131 case IND_STATIC:
132 case CONSTR_STATIC:
133 case FUN_STATIC:
134 case THUNK_STATIC:
135 case CONSTR_NOCAF_STATIC:
136 // stack objects
137 case UPDATE_FRAME:
138 case CATCH_FRAME:
139 case UNDERFLOW_FRAME:
140 case STOP_FRAME:
141 case RET_BCO:
142 case RET_SMALL:
143 case RET_BIG:
144 // others
145 case INVALID_OBJECT:
146 default:
147 barf("Invalid object in processHeapClosureForDead(): %d", info->type);
148 return 0;
149 }
150 }
151
152 /* --------------------------------------------------------------------------
153 * Calls processHeapClosureForDead() on every *dead* closures in the
154 * heap blocks starting at bd.
155 * ----------------------------------------------------------------------- */
156 static void
157 processHeapForDead( bdescr *bd )
158 {
159 StgPtr p;
160
161 while (bd != NULL) {
162 p = bd->start;
163 while (p < bd->free) {
164 p += processHeapClosureForDead((StgClosure *)p);
165 while (p < bd->free && !*p) // skip slop
166 p++;
167 }
168 ASSERT(p == bd->free);
169 bd = bd->link;
170 }
171 }
172
173 /* --------------------------------------------------------------------------
174 * Calls processHeapClosureForDead() on every *dead* closures in the nursery.
175 * ----------------------------------------------------------------------- */
176 static void
177 processNurseryForDead( void )
178 {
179 StgPtr p;
180 bdescr *bd;
181
182 for (bd = MainCapability.r.rNursery->blocks; bd != NULL; bd = bd->link) {
183 p = bd->start;
184 while (p < bd->free) {
185 while (p < bd->free && !*p) p++; // skip slop
186 if (p >= bd->free) break;
187 p += processHeapClosureForDead((StgClosure *)p);
188 }
189 }
190 }
191
192 /* --------------------------------------------------------------------------
193 * Calls processHeapClosureForDead() on every *dead* closures in the closure
194 * chain.
195 * ----------------------------------------------------------------------- */
196 static void
197 processChainForDead( bdescr *bd )
198 {
199 // Any object still in the chain is dead!
200 while (bd != NULL) {
201 if (!(bd->flags & BF_PINNED)) {
202 processHeapClosureForDead((StgClosure *)bd->start);
203 }
204 bd = bd->link;
205 }
206 }
207
208 /* --------------------------------------------------------------------------
209 * Start a census for *dead* closures, and calls
210 * processHeapClosureForDead() on every closure which died in the
211 * current garbage collection. This function is called from a garbage
212 * collector right before tidying up, when all dead closures are still
213 * stored in the heap and easy to identify. Generations 0 through N
214 * have just been garbage collected.
215 * ----------------------------------------------------------------------- */
216 void
217 LdvCensusForDead( nat N )
218 {
219 nat g;
220
221 // ldvTime == 0 means that LDV profiling is currently turned off.
222 if (era == 0)
223 return;
224
225 if (RtsFlags.GcFlags.generations == 1) {
226 //
227 // Todo: support LDV for two-space garbage collection.
228 //
229 barf("Lag/Drag/Void profiling not supported with -G1");
230 } else {
231 processNurseryForDead();
232 for (g = 0; g <= N; g++) {
233 processHeapForDead(generations[g].old_blocks);
234 processChainForDead(generations[g].large_objects);
235 }
236 }
237 }
238
239 /* --------------------------------------------------------------------------
240 * Regard any closure in the current heap as dead or moribund and update
241 * LDV statistics accordingly.
242 * Called from shutdownHaskell() in RtsStartup.c.
243 * Also, stops LDV profiling by resetting ldvTime to 0.
244 * ----------------------------------------------------------------------- */
245 void
246 LdvCensusKillAll( void )
247 {
248 LdvCensusForDead(RtsFlags.GcFlags.generations - 1);
249 }
250
251 #endif /* PROFILING */