rts_ccs_length
[ghc.git] / rts / ProfHeap.c
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 1998-2003
4 *
5 * Support for heap profiling
6 *
7 * ---------------------------------------------------------------------------*/
8
9 #if defined(DEBUG) && !defined(PROFILING)
10 #define DEBUG_HEAP_PROF
11 #else
12 #undef DEBUG_HEAP_PROF
13 #endif
14
15 #if defined(PROFILING) || defined(DEBUG_HEAP_PROF)
16
17 #include "PosixSource.h"
18 #include "Rts.h"
19 #include "RtsUtils.h"
20 #include "RtsFlags.h"
21 #include "Profiling.h"
22 #include "Storage.h"
23 #include "ProfHeap.h"
24 #include "Stats.h"
25 #include "Hash.h"
26 #include "RetainerProfile.h"
27 #include "LdvProfile.h"
28 #include "Arena.h"
29 #include "Printer.h"
30
31 #include <string.h>
32 #include <stdlib.h>
33 #include <math.h>
34
35 /* -----------------------------------------------------------------------------
36 * era stores the current time period. It is the same as the
37 * number of censuses that have been performed.
38 *
39 * RESTRICTION:
40 * era must be no longer than LDV_SHIFT (15 or 30) bits.
41 * Invariants:
42 * era is initialized to 1 in initHeapProfiling().
43 *
44 * max_era is initialized to 2^LDV_SHIFT in initHeapProfiling().
45 * When era reaches max_era, the profiling stops because a closure can
46 * store only up to (max_era - 1) as its creation or last use time.
47 * -------------------------------------------------------------------------- */
48 unsigned int era;
49 static nat max_era;
50
51 /* -----------------------------------------------------------------------------
52 * Counters
53 *
54 * For most heap profiles each closure identity gets a simple count
55 * of live words in the heap at each census. However, if we're
56 * selecting by biography, then we have to keep the various
57 * lag/drag/void counters for each identity.
58 * -------------------------------------------------------------------------- */
59 typedef struct _counter {
60 void *identity;
61 union {
62 nat resid;
63 struct {
64 int prim; // total size of 'inherently used' closures
65 int not_used; // total size of 'never used' closures
66 int used; // total size of 'used at least once' closures
67 int void_total; // current total size of 'destroyed without being used' closures
68 int drag_total; // current total size of 'used at least once and waiting to die'
69 } ldv;
70 } c;
71 struct _counter *next;
72 } counter;
73
74 STATIC_INLINE void
75 initLDVCtr( counter *ctr )
76 {
77 ctr->c.ldv.prim = 0;
78 ctr->c.ldv.not_used = 0;
79 ctr->c.ldv.used = 0;
80 ctr->c.ldv.void_total = 0;
81 ctr->c.ldv.drag_total = 0;
82 }
83
84 typedef struct {
85 double time; // the time in MUT time when the census is made
86 HashTable * hash;
87 counter * ctrs;
88 Arena * arena;
89
90 // for LDV profiling, when just displaying by LDV
91 int prim;
92 int not_used;
93 int used;
94 int void_total;
95 int drag_total;
96 } Census;
97
98 static Census *censuses = NULL;
99 static nat n_censuses = 0;
100
101 #ifdef PROFILING
102 static void aggregateCensusInfo( void );
103 #endif
104
105 static void dumpCensus( Census *census );
106
107 /* -----------------------------------------------------------------------------
108 Closure Type Profiling;
109
110 PROBABLY TOTALLY OUT OF DATE -- ToDo (SDM)
111 -------------------------------------------------------------------------- */
112
113 #ifdef DEBUG_HEAP_PROF
114 static char *type_names[] = {
115 "INVALID_OBJECT"
116 , "CONSTR"
117 , "CONSTR_STATIC"
118 , "CONSTR_NOCAF_STATIC"
119
120 , "FUN"
121 , "FUN_STATIC"
122
123 , "THUNK"
124 , "THUNK_STATIC"
125 , "THUNK_SELECTOR"
126
127 , "BCO"
128 , "AP_STACK"
129 , "AP"
130
131 , "PAP"
132
133 , "IND"
134 , "IND_OLDGEN"
135 , "IND_PERM"
136 , "IND_OLDGEN_PERM"
137 , "IND_STATIC"
138
139 , "RET_BCO"
140 , "RET_SMALL"
141 , "RET_VEC_SMALL"
142 , "RET_BIG"
143 , "RET_VEC_BIG"
144 , "RET_DYN"
145 , "UPDATE_FRAME"
146 , "CATCH_FRAME"
147 , "STOP_FRAME"
148
149 , "BLACKHOLE"
150 , "MVAR"
151
152 , "ARR_WORDS"
153
154 , "MUT_ARR_PTRS_CLEAN"
155 , "MUT_ARR_PTRS_DIRTY"
156 , "MUT_ARR_PTRS_FROZEN"
157 , "MUT_VAR_CLEAN"
158 , "MUT_VAR_DIRTY"
159
160 , "WEAK"
161
162 , "TSO"
163
164 , "BLOCKED_FETCH"
165 , "FETCH_ME"
166
167 , "EVACUATED"
168 };
169
170 #endif /* DEBUG_HEAP_PROF */
171
172 /* -----------------------------------------------------------------------------
173 * Find the "closure identity", which is a unique pointer reresenting
174 * the band to which this closure's heap space is attributed in the
175 * heap profile.
176 * ------------------------------------------------------------------------- */
177 STATIC_INLINE void *
178 closureIdentity( StgClosure *p )
179 {
180 switch (RtsFlags.ProfFlags.doHeapProfile) {
181
182 #ifdef PROFILING
183 case HEAP_BY_CCS:
184 return p->header.prof.ccs;
185 case HEAP_BY_MOD:
186 return p->header.prof.ccs->cc->module;
187 case HEAP_BY_DESCR:
188 return get_itbl(p)->prof.closure_desc;
189 case HEAP_BY_TYPE:
190 return get_itbl(p)->prof.closure_type;
191 case HEAP_BY_RETAINER:
192 // AFAIK, the only closures in the heap which might not have a
193 // valid retainer set are DEAD_WEAK closures.
194 if (isRetainerSetFieldValid(p))
195 return retainerSetOf(p);
196 else
197 return NULL;
198
199 #else // DEBUG
200 case HEAP_BY_INFOPTR:
201 return (void *)((StgClosure *)p)->header.info;
202 case HEAP_BY_CLOSURE_TYPE:
203 return type_names[get_itbl(p)->type];
204
205 #endif
206 default:
207 barf("closureIdentity");
208 }
209 }
210
211 /* --------------------------------------------------------------------------
212 * Profiling type predicates
213 * ----------------------------------------------------------------------- */
214 #ifdef PROFILING
215 STATIC_INLINE rtsBool
216 doingLDVProfiling( void )
217 {
218 return (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV
219 || RtsFlags.ProfFlags.bioSelector != NULL);
220 }
221
222 STATIC_INLINE rtsBool
223 doingRetainerProfiling( void )
224 {
225 return (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER
226 || RtsFlags.ProfFlags.retainerSelector != NULL);
227 }
228 #endif /* PROFILING */
229
230 // Precesses a closure 'c' being destroyed whose size is 'size'.
231 // Make sure that LDV_recordDead() is not invoked on 'inherently used' closures
232 // such as TSO; they should not be involved in computing dragNew or voidNew.
233 //
234 // Even though era is checked in both LdvCensusForDead() and
235 // LdvCensusKillAll(), we still need to make sure that era is > 0 because
236 // LDV_recordDead() may be called from elsewhere in the runtime system. E.g.,
237 // when a thunk is replaced by an indirection object.
238
239 #ifdef PROFILING
240 void
241 LDV_recordDead( StgClosure *c, nat size )
242 {
243 void *id;
244 nat t;
245 counter *ctr;
246
247 if (era > 0 && closureSatisfiesConstraints(c)) {
248 size -= sizeofW(StgProfHeader);
249 ASSERT(LDVW(c) != 0);
250 if ((LDVW((c)) & LDV_STATE_MASK) == LDV_STATE_CREATE) {
251 t = (LDVW((c)) & LDV_CREATE_MASK) >> LDV_SHIFT;
252 if (t < era) {
253 if (RtsFlags.ProfFlags.bioSelector == NULL) {
254 censuses[t].void_total += (int)size;
255 censuses[era].void_total -= (int)size;
256 ASSERT(censuses[t].void_total < censuses[t].not_used);
257 } else {
258 id = closureIdentity(c);
259 ctr = lookupHashTable(censuses[t].hash, (StgWord)id);
260 ASSERT( ctr != NULL );
261 ctr->c.ldv.void_total += (int)size;
262 ctr = lookupHashTable(censuses[era].hash, (StgWord)id);
263 if (ctr == NULL) {
264 ctr = arenaAlloc(censuses[era].arena, sizeof(counter));
265 initLDVCtr(ctr);
266 insertHashTable(censuses[era].hash, (StgWord)id, ctr);
267 ctr->identity = id;
268 ctr->next = censuses[era].ctrs;
269 censuses[era].ctrs = ctr;
270 }
271 ctr->c.ldv.void_total -= (int)size;
272 }
273 }
274 } else {
275 t = LDVW((c)) & LDV_LAST_MASK;
276 if (t + 1 < era) {
277 if (RtsFlags.ProfFlags.bioSelector == NULL) {
278 censuses[t+1].drag_total += size;
279 censuses[era].drag_total -= size;
280 } else {
281 void *id;
282 id = closureIdentity(c);
283 ctr = lookupHashTable(censuses[t+1].hash, (StgWord)id);
284 ASSERT( ctr != NULL );
285 ctr->c.ldv.drag_total += (int)size;
286 ctr = lookupHashTable(censuses[era].hash, (StgWord)id);
287 if (ctr == NULL) {
288 ctr = arenaAlloc(censuses[era].arena, sizeof(counter));
289 initLDVCtr(ctr);
290 insertHashTable(censuses[era].hash, (StgWord)id, ctr);
291 ctr->identity = id;
292 ctr->next = censuses[era].ctrs;
293 censuses[era].ctrs = ctr;
294 }
295 ctr->c.ldv.drag_total -= (int)size;
296 }
297 }
298 }
299 }
300 }
301 #endif
302
303 /* --------------------------------------------------------------------------
304 * Initialize censuses[era];
305 * ----------------------------------------------------------------------- */
306 STATIC_INLINE void
307 initEra(Census *census)
308 {
309 census->hash = allocHashTable();
310 census->ctrs = NULL;
311 census->arena = newArena();
312
313 census->not_used = 0;
314 census->used = 0;
315 census->prim = 0;
316 census->void_total = 0;
317 census->drag_total = 0;
318 }
319
320 /* --------------------------------------------------------------------------
321 * Increases era by 1 and initialize census[era].
322 * Reallocates gi[] and increases its size if needed.
323 * ----------------------------------------------------------------------- */
324 static void
325 nextEra( void )
326 {
327 #ifdef PROFILING
328 if (doingLDVProfiling()) {
329 era++;
330
331 if (era == max_era) {
332 errorBelch("maximum number of censuses reached; use +RTS -i to reduce");
333 stg_exit(EXIT_FAILURE);
334 }
335
336 if (era == n_censuses) {
337 n_censuses *= 2;
338 censuses = stgReallocBytes(censuses, sizeof(Census) * n_censuses,
339 "nextEra");
340 }
341 }
342 #endif /* PROFILING */
343
344 initEra( &censuses[era] );
345 }
346
347 /* -----------------------------------------------------------------------------
348 * DEBUG heap profiling, by info table
349 * -------------------------------------------------------------------------- */
350
351 #ifdef DEBUG_HEAP_PROF
352 FILE *hp_file;
353 static char *hp_filename;
354
355 void initProfiling1( void )
356 {
357 }
358
359 void initProfiling2( void )
360 {
361 if (RtsFlags.ProfFlags.doHeapProfile) {
362 /* Initialise the log file name */
363 hp_filename = stgMallocBytes(strlen(prog_name) + 6, "hpFileName");
364 sprintf(hp_filename, "%s.hp", prog_name);
365
366 /* open the log file */
367 if ((hp_file = fopen(hp_filename, "w")) == NULL) {
368 debugBelch("Can't open profiling report file %s\n",
369 hp_filename);
370 RtsFlags.ProfFlags.doHeapProfile = 0;
371 return;
372 }
373 }
374
375 initHeapProfiling();
376 }
377
378 void endProfiling( void )
379 {
380 endHeapProfiling();
381 }
382 #endif /* DEBUG_HEAP_PROF */
383
384 static void
385 printSample(rtsBool beginSample, StgDouble sampleValue)
386 {
387 StgDouble fractionalPart, integralPart;
388 fractionalPart = modf(sampleValue, &integralPart);
389 fprintf(hp_file, "%s %" FMT_Word64 ".%02" FMT_Word64 "\n",
390 (beginSample ? "BEGIN_SAMPLE" : "END_SAMPLE"),
391 (StgWord64)integralPart, (StgWord64)(fractionalPart * 100));
392 }
393
394 /* --------------------------------------------------------------------------
395 * Initialize the heap profilier
396 * ----------------------------------------------------------------------- */
397 nat
398 initHeapProfiling(void)
399 {
400 if (! RtsFlags.ProfFlags.doHeapProfile) {
401 return 0;
402 }
403
404 #ifdef PROFILING
405 if (doingLDVProfiling() && doingRetainerProfiling()) {
406 errorBelch("cannot mix -hb and -hr");
407 stg_exit(EXIT_FAILURE);
408 }
409 #endif
410
411 // we only count eras if we're doing LDV profiling. Otherwise era
412 // is fixed at zero.
413 #ifdef PROFILING
414 if (doingLDVProfiling()) {
415 era = 1;
416 } else
417 #endif
418 {
419 era = 0;
420 }
421
422 { // max_era = 2^LDV_SHIFT
423 nat p;
424 max_era = 1;
425 for (p = 0; p < LDV_SHIFT; p++)
426 max_era *= 2;
427 }
428
429 n_censuses = 32;
430 censuses = stgMallocBytes(sizeof(Census) * n_censuses, "initHeapProfiling");
431
432 initEra( &censuses[era] );
433
434 /* initProfilingLogFile(); */
435 fprintf(hp_file, "JOB \"%s", prog_name);
436
437 #ifdef PROFILING
438 {
439 int count;
440 for(count = 1; count < prog_argc; count++)
441 fprintf(hp_file, " %s", prog_argv[count]);
442 fprintf(hp_file, " +RTS");
443 for(count = 0; count < rts_argc; count++)
444 fprintf(hp_file, " %s", rts_argv[count]);
445 }
446 #endif /* PROFILING */
447
448 fprintf(hp_file, "\"\n" );
449
450 fprintf(hp_file, "DATE \"%s\"\n", time_str());
451
452 fprintf(hp_file, "SAMPLE_UNIT \"seconds\"\n");
453 fprintf(hp_file, "VALUE_UNIT \"bytes\"\n");
454
455 printSample(rtsTrue, 0);
456 printSample(rtsFalse, 0);
457
458 #ifdef DEBUG_HEAP_PROF
459 DEBUG_LoadSymbols(prog_name);
460 #endif
461
462 #ifdef PROFILING
463 if (doingRetainerProfiling()) {
464 initRetainerProfiling();
465 }
466 #endif
467
468 return 0;
469 }
470
471 void
472 endHeapProfiling(void)
473 {
474 StgDouble seconds;
475
476 if (! RtsFlags.ProfFlags.doHeapProfile) {
477 return;
478 }
479
480 #ifdef PROFILING
481 if (doingRetainerProfiling()) {
482 endRetainerProfiling();
483 }
484 #endif
485
486 #ifdef PROFILING
487 if (doingLDVProfiling()) {
488 nat t;
489 LdvCensusKillAll();
490 aggregateCensusInfo();
491 for (t = 1; t < era; t++) {
492 dumpCensus( &censuses[t] );
493 }
494 }
495 #endif
496
497 seconds = mut_user_time();
498 printSample(rtsTrue, seconds);
499 printSample(rtsFalse, seconds);
500 fclose(hp_file);
501 }
502
503
504
505 #ifdef PROFILING
506 static size_t
507 buf_append(char *p, const char *q, char *end)
508 {
509 int m;
510
511 for (m = 0; p < end; p++, q++, m++) {
512 *p = *q;
513 if (*q == '\0') { break; }
514 }
515 return m;
516 }
517
518 static void
519 fprint_ccs(FILE *fp, CostCentreStack *ccs, nat max_length)
520 {
521 char buf[max_length+1], *p, *buf_end;
522
523 // MAIN on its own gets printed as "MAIN", otherwise we ignore MAIN.
524 if (ccs == CCS_MAIN) {
525 fprintf(fp, "MAIN");
526 return;
527 }
528
529 fprintf(fp, "(%ld)", ccs->ccsID);
530
531 p = buf;
532 buf_end = buf + max_length + 1;
533
534 // keep printing components of the stack until we run out of space
535 // in the buffer. If we run out of space, end with "...".
536 for (; ccs != NULL && ccs != CCS_MAIN; ccs = ccs->prevStack) {
537
538 // CAF cost centres print as M.CAF, but we leave the module
539 // name out of all the others to save space.
540 if (!strcmp(ccs->cc->label,"CAF")) {
541 p += buf_append(p, ccs->cc->module, buf_end);
542 p += buf_append(p, ".CAF", buf_end);
543 } else {
544 if (ccs->prevStack != NULL && ccs->prevStack != CCS_MAIN) {
545 p += buf_append(p, "/", buf_end);
546 }
547 p += buf_append(p, ccs->cc->label, buf_end);
548 }
549
550 if (p >= buf_end) {
551 sprintf(buf+max_length-4, "...");
552 break;
553 }
554 }
555 fprintf(fp, "%s", buf);
556 }
557 #endif /* PROFILING */
558
559 rtsBool
560 strMatchesSelector( char* str, char* sel )
561 {
562 char* p;
563 // debugBelch("str_matches_selector %s %s\n", str, sel);
564 while (1) {
565 // Compare str against wherever we've got to in sel.
566 p = str;
567 while (*p != '\0' && *sel != ',' && *sel != '\0' && *p == *sel) {
568 p++; sel++;
569 }
570 // Match if all of str used and have reached the end of a sel fragment.
571 if (*p == '\0' && (*sel == ',' || *sel == '\0'))
572 return rtsTrue;
573
574 // No match. Advance sel to the start of the next elem.
575 while (*sel != ',' && *sel != '\0') sel++;
576 if (*sel == ',') sel++;
577
578 /* Run out of sel ?? */
579 if (*sel == '\0') return rtsFalse;
580 }
581 }
582
583 /* -----------------------------------------------------------------------------
584 * Figure out whether a closure should be counted in this census, by
585 * testing against all the specified constraints.
586 * -------------------------------------------------------------------------- */
587 rtsBool
588 closureSatisfiesConstraints( StgClosure* p )
589 {
590 #ifdef DEBUG_HEAP_PROF
591 (void)p; /* keep gcc -Wall happy */
592 return rtsTrue;
593 #else
594 rtsBool b;
595
596 // The CCS has a selected field to indicate whether this closure is
597 // deselected by not being mentioned in the module, CC, or CCS
598 // selectors.
599 if (!p->header.prof.ccs->selected) {
600 return rtsFalse;
601 }
602
603 if (RtsFlags.ProfFlags.descrSelector) {
604 b = strMatchesSelector( (get_itbl((StgClosure *)p))->prof.closure_desc,
605 RtsFlags.ProfFlags.descrSelector );
606 if (!b) return rtsFalse;
607 }
608 if (RtsFlags.ProfFlags.typeSelector) {
609 b = strMatchesSelector( (get_itbl((StgClosure *)p))->prof.closure_type,
610 RtsFlags.ProfFlags.typeSelector );
611 if (!b) return rtsFalse;
612 }
613 if (RtsFlags.ProfFlags.retainerSelector) {
614 RetainerSet *rs;
615 nat i;
616 // We must check that the retainer set is valid here. One
617 // reason it might not be valid is if this closure is a
618 // a newly deceased weak pointer (i.e. a DEAD_WEAK), since
619 // these aren't reached by the retainer profiler's traversal.
620 if (isRetainerSetFieldValid((StgClosure *)p)) {
621 rs = retainerSetOf((StgClosure *)p);
622 if (rs != NULL) {
623 for (i = 0; i < rs->num; i++) {
624 b = strMatchesSelector( rs->element[i]->cc->label,
625 RtsFlags.ProfFlags.retainerSelector );
626 if (b) return rtsTrue;
627 }
628 }
629 }
630 return rtsFalse;
631 }
632 return rtsTrue;
633 #endif /* PROFILING */
634 }
635
636 /* -----------------------------------------------------------------------------
637 * Aggregate the heap census info for biographical profiling
638 * -------------------------------------------------------------------------- */
639 #ifdef PROFILING
640 static void
641 aggregateCensusInfo( void )
642 {
643 HashTable *acc;
644 nat t;
645 counter *c, *d, *ctrs;
646 Arena *arena;
647
648 if (!doingLDVProfiling()) return;
649
650 // Aggregate the LDV counters when displaying by biography.
651 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV) {
652 int void_total, drag_total;
653
654 // Now we compute void_total and drag_total for each census
655 // After the program has finished, the void_total field of
656 // each census contains the count of words that were *created*
657 // in this era and were eventually void. Conversely, if a
658 // void closure was destroyed in this era, it will be
659 // represented by a negative count of words in void_total.
660 //
661 // To get the count of live words that are void at each
662 // census, just propagate the void_total count forwards:
663
664 void_total = 0;
665 drag_total = 0;
666 for (t = 1; t < era; t++) { // note: start at 1, not 0
667 void_total += censuses[t].void_total;
668 drag_total += censuses[t].drag_total;
669 censuses[t].void_total = void_total;
670 censuses[t].drag_total = drag_total;
671
672 ASSERT( censuses[t].void_total <= censuses[t].not_used );
673 // should be true because: void_total is the count of
674 // live words that are void at this census, which *must*
675 // be less than the number of live words that have not
676 // been used yet.
677
678 ASSERT( censuses[t].drag_total <= censuses[t].used );
679 // similar reasoning as above.
680 }
681
682 return;
683 }
684
685 // otherwise... we're doing a heap profile that is restricted to
686 // some combination of lag, drag, void or use. We've kept all the
687 // census info for all censuses so far, but we still need to
688 // aggregate the counters forwards.
689
690 arena = newArena();
691 acc = allocHashTable();
692 ctrs = NULL;
693
694 for (t = 1; t < era; t++) {
695
696 // first look through all the counters we're aggregating
697 for (c = ctrs; c != NULL; c = c->next) {
698 // if one of the totals is non-zero, then this closure
699 // type must be present in the heap at this census time...
700 d = lookupHashTable(censuses[t].hash, (StgWord)c->identity);
701
702 if (d == NULL) {
703 // if this closure identity isn't present in the
704 // census for this time period, then our running
705 // totals *must* be zero.
706 ASSERT(c->c.ldv.void_total == 0 && c->c.ldv.drag_total == 0);
707
708 // debugCCS(c->identity);
709 // debugBelch(" census=%d void_total=%d drag_total=%d\n",
710 // t, c->c.ldv.void_total, c->c.ldv.drag_total);
711 } else {
712 d->c.ldv.void_total += c->c.ldv.void_total;
713 d->c.ldv.drag_total += c->c.ldv.drag_total;
714 c->c.ldv.void_total = d->c.ldv.void_total;
715 c->c.ldv.drag_total = d->c.ldv.drag_total;
716
717 ASSERT( c->c.ldv.void_total >= 0 );
718 ASSERT( c->c.ldv.drag_total >= 0 );
719 }
720 }
721
722 // now look through the counters in this census to find new ones
723 for (c = censuses[t].ctrs; c != NULL; c = c->next) {
724 d = lookupHashTable(acc, (StgWord)c->identity);
725 if (d == NULL) {
726 d = arenaAlloc( arena, sizeof(counter) );
727 initLDVCtr(d);
728 insertHashTable( acc, (StgWord)c->identity, d );
729 d->identity = c->identity;
730 d->next = ctrs;
731 ctrs = d;
732 d->c.ldv.void_total = c->c.ldv.void_total;
733 d->c.ldv.drag_total = c->c.ldv.drag_total;
734 }
735 ASSERT( c->c.ldv.void_total >= 0 );
736 ASSERT( c->c.ldv.drag_total >= 0 );
737 }
738 }
739
740 freeHashTable(acc, NULL);
741 arenaFree(arena);
742 }
743 #endif
744
745 /* -----------------------------------------------------------------------------
746 * Print out the results of a heap census.
747 * -------------------------------------------------------------------------- */
748 static void
749 dumpCensus( Census *census )
750 {
751 counter *ctr;
752 int count;
753
754 printSample(rtsTrue, census->time);
755
756 #ifdef PROFILING
757 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV) {
758 fprintf(hp_file, "VOID\t%lu\n", (unsigned long)(census->void_total) * sizeof(W_));
759 fprintf(hp_file, "LAG\t%lu\n",
760 (unsigned long)(census->not_used - census->void_total) * sizeof(W_));
761 fprintf(hp_file, "USE\t%lu\n",
762 (unsigned long)(census->used - census->drag_total) * sizeof(W_));
763 fprintf(hp_file, "INHERENT_USE\t%lu\n",
764 (unsigned long)(census->prim) * sizeof(W_));
765 fprintf(hp_file, "DRAG\t%lu\n",
766 (unsigned long)(census->drag_total) * sizeof(W_));
767 printSample(rtsFalse, census->time);
768 return;
769 }
770 #endif
771
772 for (ctr = census->ctrs; ctr != NULL; ctr = ctr->next) {
773
774 #ifdef PROFILING
775 if (RtsFlags.ProfFlags.bioSelector != NULL) {
776 count = 0;
777 if (strMatchesSelector("lag", RtsFlags.ProfFlags.bioSelector))
778 count += ctr->c.ldv.not_used - ctr->c.ldv.void_total;
779 if (strMatchesSelector("drag", RtsFlags.ProfFlags.bioSelector))
780 count += ctr->c.ldv.drag_total;
781 if (strMatchesSelector("void", RtsFlags.ProfFlags.bioSelector))
782 count += ctr->c.ldv.void_total;
783 if (strMatchesSelector("use", RtsFlags.ProfFlags.bioSelector))
784 count += ctr->c.ldv.used - ctr->c.ldv.drag_total;
785 } else
786 #endif
787 {
788 count = ctr->c.resid;
789 }
790
791 ASSERT( count >= 0 );
792
793 if (count == 0) continue;
794
795 #ifdef DEBUG_HEAP_PROF
796 switch (RtsFlags.ProfFlags.doHeapProfile) {
797 case HEAP_BY_INFOPTR:
798 fprintf(hp_file, "%s", lookupGHCName(ctr->identity));
799 break;
800 case HEAP_BY_CLOSURE_TYPE:
801 fprintf(hp_file, "%s", (char *)ctr->identity);
802 break;
803 }
804 #endif
805
806 #ifdef PROFILING
807 switch (RtsFlags.ProfFlags.doHeapProfile) {
808 case HEAP_BY_CCS:
809 fprint_ccs(hp_file, (CostCentreStack *)ctr->identity, RtsFlags.ProfFlags.ccsLength);
810 break;
811 case HEAP_BY_MOD:
812 case HEAP_BY_DESCR:
813 case HEAP_BY_TYPE:
814 fprintf(hp_file, "%s", (char *)ctr->identity);
815 break;
816 case HEAP_BY_RETAINER:
817 {
818 RetainerSet *rs = (RetainerSet *)ctr->identity;
819
820 // it might be the distinguished retainer set rs_MANY:
821 if (rs == &rs_MANY) {
822 fprintf(hp_file, "MANY");
823 break;
824 }
825
826 // Mark this retainer set by negating its id, because it
827 // has appeared in at least one census. We print the
828 // values of all such retainer sets into the log file at
829 // the end. A retainer set may exist but not feature in
830 // any censuses if it arose as the intermediate retainer
831 // set for some closure during retainer set calculation.
832 if (rs->id > 0)
833 rs->id = -(rs->id);
834
835 // report in the unit of bytes: * sizeof(StgWord)
836 printRetainerSetShort(hp_file, rs);
837 break;
838 }
839 default:
840 barf("dumpCensus; doHeapProfile");
841 }
842 #endif
843
844 fprintf(hp_file, "\t%lu\n", (unsigned long)count * sizeof(W_));
845 }
846
847 printSample(rtsFalse, census->time);
848 }
849
850 /* -----------------------------------------------------------------------------
851 * Code to perform a heap census.
852 * -------------------------------------------------------------------------- */
853 static void
854 heapCensusChain( Census *census, bdescr *bd )
855 {
856 StgPtr p;
857 StgInfoTable *info;
858 void *identity;
859 nat size;
860 counter *ctr;
861 nat real_size;
862 rtsBool prim;
863
864 for (; bd != NULL; bd = bd->link) {
865
866 // HACK: ignore pinned blocks, because they contain gaps.
867 // It's not clear exactly what we'd like to do here, since we
868 // can't tell which objects in the block are actually alive.
869 // Perhaps the whole block should be counted as SYSTEM memory.
870 if (bd->flags & BF_PINNED) {
871 continue;
872 }
873
874 p = bd->start;
875 while (p < bd->free) {
876 info = get_itbl((StgClosure *)p);
877 prim = rtsFalse;
878
879 switch (info->type) {
880
881 case THUNK:
882 size = thunk_sizeW_fromITBL(info);
883 break;
884
885 case THUNK_1_1:
886 case THUNK_0_2:
887 case THUNK_2_0:
888 size = sizeofW(StgThunkHeader) + 2;
889 break;
890
891 case THUNK_1_0:
892 case THUNK_0_1:
893 case THUNK_SELECTOR:
894 size = sizeofW(StgThunkHeader) + 1;
895 break;
896
897 case CONSTR:
898 case FUN:
899 case IND_PERM:
900 case IND_OLDGEN:
901 case IND_OLDGEN_PERM:
902 case CAF_BLACKHOLE:
903 case SE_CAF_BLACKHOLE:
904 case SE_BLACKHOLE:
905 case BLACKHOLE:
906 case FUN_1_0:
907 case FUN_0_1:
908 case FUN_1_1:
909 case FUN_0_2:
910 case FUN_2_0:
911 case CONSTR_1_0:
912 case CONSTR_0_1:
913 case CONSTR_1_1:
914 case CONSTR_0_2:
915 case CONSTR_2_0:
916 size = sizeW_fromITBL(info);
917 break;
918
919 case IND:
920 // Special case/Delicate Hack: INDs don't normally
921 // appear, since we're doing this heap census right
922 // after GC. However, GarbageCollect() also does
923 // resurrectThreads(), which can update some
924 // blackholes when it calls raiseAsync() on the
925 // resurrected threads. So we know that any IND will
926 // be the size of a BLACKHOLE.
927 size = BLACKHOLE_sizeW();
928 break;
929
930 case BCO:
931 prim = rtsTrue;
932 size = bco_sizeW((StgBCO *)p);
933 break;
934
935 case MVAR:
936 case WEAK:
937 case STABLE_NAME:
938 case MUT_VAR_CLEAN:
939 case MUT_VAR_DIRTY:
940 prim = rtsTrue;
941 size = sizeW_fromITBL(info);
942 break;
943
944 case AP:
945 size = ap_sizeW((StgAP *)p);
946 break;
947
948 case PAP:
949 size = pap_sizeW((StgPAP *)p);
950 break;
951
952 case AP_STACK:
953 size = ap_stack_sizeW((StgAP_STACK *)p);
954 break;
955
956 case ARR_WORDS:
957 prim = rtsTrue;
958 size = arr_words_sizeW(stgCast(StgArrWords*,p));
959 break;
960
961 case MUT_ARR_PTRS_CLEAN:
962 case MUT_ARR_PTRS_DIRTY:
963 case MUT_ARR_PTRS_FROZEN:
964 case MUT_ARR_PTRS_FROZEN0:
965 prim = rtsTrue;
966 size = mut_arr_ptrs_sizeW((StgMutArrPtrs *)p);
967 break;
968
969 case TSO:
970 prim = rtsTrue;
971 #ifdef DEBUG_HEAP_PROF
972 size = tso_sizeW((StgTSO *)p);
973 break;
974 #else
975 if (RtsFlags.ProfFlags.includeTSOs) {
976 size = tso_sizeW((StgTSO *)p);
977 break;
978 } else {
979 // Skip this TSO and move on to the next object
980 p += tso_sizeW((StgTSO *)p);
981 continue;
982 }
983 #endif
984
985 case TREC_HEADER:
986 prim = rtsTrue;
987 size = sizeofW(StgTRecHeader);
988 break;
989
990 case TVAR_WATCH_QUEUE:
991 prim = rtsTrue;
992 size = sizeofW(StgTVarWatchQueue);
993 break;
994
995 case INVARIANT_CHECK_QUEUE:
996 prim = rtsTrue;
997 size = sizeofW(StgInvariantCheckQueue);
998 break;
999
1000 case ATOMIC_INVARIANT:
1001 prim = rtsTrue;
1002 size = sizeofW(StgAtomicInvariant);
1003 break;
1004
1005 case TVAR:
1006 prim = rtsTrue;
1007 size = sizeofW(StgTVar);
1008 break;
1009
1010 case TREC_CHUNK:
1011 prim = rtsTrue;
1012 size = sizeofW(StgTRecChunk);
1013 break;
1014
1015 default:
1016 barf("heapCensus, unknown object: %d", info->type);
1017 }
1018
1019 identity = NULL;
1020
1021 #ifdef DEBUG_HEAP_PROF
1022 real_size = size;
1023 #else
1024 // subtract the profiling overhead
1025 real_size = size - sizeofW(StgProfHeader);
1026 #endif
1027
1028 if (closureSatisfiesConstraints((StgClosure*)p)) {
1029 #ifdef PROFILING
1030 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV) {
1031 if (prim)
1032 census->prim += real_size;
1033 else if ((LDVW(p) & LDV_STATE_MASK) == LDV_STATE_CREATE)
1034 census->not_used += real_size;
1035 else
1036 census->used += real_size;
1037 } else
1038 #endif
1039 {
1040 identity = closureIdentity((StgClosure *)p);
1041
1042 if (identity != NULL) {
1043 ctr = lookupHashTable( census->hash, (StgWord)identity );
1044 if (ctr != NULL) {
1045 #ifdef PROFILING
1046 if (RtsFlags.ProfFlags.bioSelector != NULL) {
1047 if (prim)
1048 ctr->c.ldv.prim += real_size;
1049 else if ((LDVW(p) & LDV_STATE_MASK) == LDV_STATE_CREATE)
1050 ctr->c.ldv.not_used += real_size;
1051 else
1052 ctr->c.ldv.used += real_size;
1053 } else
1054 #endif
1055 {
1056 ctr->c.resid += real_size;
1057 }
1058 } else {
1059 ctr = arenaAlloc( census->arena, sizeof(counter) );
1060 initLDVCtr(ctr);
1061 insertHashTable( census->hash, (StgWord)identity, ctr );
1062 ctr->identity = identity;
1063 ctr->next = census->ctrs;
1064 census->ctrs = ctr;
1065
1066 #ifdef PROFILING
1067 if (RtsFlags.ProfFlags.bioSelector != NULL) {
1068 if (prim)
1069 ctr->c.ldv.prim = real_size;
1070 else if ((LDVW(p) & LDV_STATE_MASK) == LDV_STATE_CREATE)
1071 ctr->c.ldv.not_used = real_size;
1072 else
1073 ctr->c.ldv.used = real_size;
1074 } else
1075 #endif
1076 {
1077 ctr->c.resid = real_size;
1078 }
1079 }
1080 }
1081 }
1082 }
1083
1084 p += size;
1085 }
1086 }
1087 }
1088
1089 void
1090 heapCensus( void )
1091 {
1092 nat g, s;
1093 Census *census;
1094
1095 census = &censuses[era];
1096 census->time = mut_user_time();
1097
1098 // calculate retainer sets if necessary
1099 #ifdef PROFILING
1100 if (doingRetainerProfiling()) {
1101 retainerProfile();
1102 }
1103 #endif
1104
1105 #ifdef PROFILING
1106 stat_startHeapCensus();
1107 #endif
1108
1109 // Traverse the heap, collecting the census info
1110
1111 // First the small_alloc_list: we have to fix the free pointer at
1112 // the end by calling tidyAllocatedLists() first.
1113 tidyAllocateLists();
1114 heapCensusChain( census, small_alloc_list );
1115
1116 // Now traverse the heap in each generation/step.
1117 if (RtsFlags.GcFlags.generations == 1) {
1118 heapCensusChain( census, g0s0->blocks );
1119 } else {
1120 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
1121 for (s = 0; s < generations[g].n_steps; s++) {
1122 heapCensusChain( census, generations[g].steps[s].blocks );
1123 // Are we interested in large objects? might be
1124 // confusing to include the stack in a heap profile.
1125 heapCensusChain( census, generations[g].steps[s].large_objects );
1126 }
1127 }
1128 }
1129
1130 // dump out the census info
1131 #ifdef PROFILING
1132 // We can't generate any info for LDV profiling until
1133 // the end of the run...
1134 if (!doingLDVProfiling())
1135 dumpCensus( census );
1136 #else
1137 dumpCensus( census );
1138 #endif
1139
1140
1141 // free our storage, unless we're keeping all the census info for
1142 // future restriction by biography.
1143 #ifdef PROFILING
1144 if (RtsFlags.ProfFlags.bioSelector == NULL)
1145 #endif
1146 {
1147 freeHashTable( census->hash, NULL/* don't free the elements */ );
1148 arenaFree( census->arena );
1149 census->hash = NULL;
1150 census->arena = NULL;
1151 }
1152
1153 // we're into the next time period now
1154 nextEra();
1155
1156 #ifdef PROFILING
1157 stat_endHeapCensus();
1158 #endif
1159 }
1160
1161 #endif /* PROFILING || DEBUG_HEAP_PROF */
1162