Free more things that we allocate
[ghc.git] / rts / ProfHeap.c
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 1998-2003
4 *
5 * Support for heap profiling
6 *
7 * ---------------------------------------------------------------------------*/
8
9 #if defined(DEBUG) && !defined(PROFILING)
10 #define DEBUG_HEAP_PROF
11 #else
12 #undef DEBUG_HEAP_PROF
13 #endif
14
15 #if defined(PROFILING) || defined(DEBUG_HEAP_PROF)
16
17 #include "PosixSource.h"
18 #include "Rts.h"
19 #include "RtsUtils.h"
20 #include "RtsFlags.h"
21 #include "Profiling.h"
22 #include "ProfHeap.h"
23 #include "Stats.h"
24 #include "Hash.h"
25 #include "RetainerProfile.h"
26 #include "LdvProfile.h"
27 #include "Arena.h"
28 #include "Printer.h"
29
30 #include <string.h>
31 #include <stdlib.h>
32 #include <math.h>
33
34 /* -----------------------------------------------------------------------------
35 * era stores the current time period. It is the same as the
36 * number of censuses that have been performed.
37 *
38 * RESTRICTION:
39 * era must be no longer than LDV_SHIFT (15 or 30) bits.
40 * Invariants:
41 * era is initialized to 1 in initHeapProfiling().
42 *
43 * max_era is initialized to 2^LDV_SHIFT in initHeapProfiling().
44 * When era reaches max_era, the profiling stops because a closure can
45 * store only up to (max_era - 1) as its creation or last use time.
46 * -------------------------------------------------------------------------- */
47 unsigned int era;
48 static nat max_era;
49
50 /* -----------------------------------------------------------------------------
51 * Counters
52 *
53 * For most heap profiles each closure identity gets a simple count
54 * of live words in the heap at each census. However, if we're
55 * selecting by biography, then we have to keep the various
56 * lag/drag/void counters for each identity.
57 * -------------------------------------------------------------------------- */
58 typedef struct _counter {
59 void *identity;
60 union {
61 nat resid;
62 struct {
63 int prim; // total size of 'inherently used' closures
64 int not_used; // total size of 'never used' closures
65 int used; // total size of 'used at least once' closures
66 int void_total; // current total size of 'destroyed without being used' closures
67 int drag_total; // current total size of 'used at least once and waiting to die'
68 } ldv;
69 } c;
70 struct _counter *next;
71 } counter;
72
73 STATIC_INLINE void
74 initLDVCtr( counter *ctr )
75 {
76 ctr->c.ldv.prim = 0;
77 ctr->c.ldv.not_used = 0;
78 ctr->c.ldv.used = 0;
79 ctr->c.ldv.void_total = 0;
80 ctr->c.ldv.drag_total = 0;
81 }
82
83 typedef struct {
84 double time; // the time in MUT time when the census is made
85 HashTable * hash;
86 counter * ctrs;
87 Arena * arena;
88
89 // for LDV profiling, when just displaying by LDV
90 int prim;
91 int not_used;
92 int used;
93 int void_total;
94 int drag_total;
95 } Census;
96
97 static Census *censuses = NULL;
98 static nat n_censuses = 0;
99
100 #ifdef PROFILING
101 static void aggregateCensusInfo( void );
102 #endif
103
104 static void dumpCensus( Census *census );
105
106 /* -----------------------------------------------------------------------------
107 Closure Type Profiling;
108
109 PROBABLY TOTALLY OUT OF DATE -- ToDo (SDM)
110 -------------------------------------------------------------------------- */
111
112 #ifdef DEBUG_HEAP_PROF
113 static char *type_names[] = {
114 "INVALID_OBJECT"
115 , "CONSTR"
116 , "CONSTR_STATIC"
117 , "CONSTR_NOCAF_STATIC"
118
119 , "FUN"
120 , "FUN_STATIC"
121
122 , "THUNK"
123 , "THUNK_STATIC"
124 , "THUNK_SELECTOR"
125
126 , "BCO"
127 , "AP_STACK"
128 , "AP"
129
130 , "PAP"
131
132 , "IND"
133 , "IND_OLDGEN"
134 , "IND_PERM"
135 , "IND_OLDGEN_PERM"
136 , "IND_STATIC"
137
138 , "RET_BCO"
139 , "RET_SMALL"
140 , "RET_VEC_SMALL"
141 , "RET_BIG"
142 , "RET_VEC_BIG"
143 , "RET_DYN"
144 , "UPDATE_FRAME"
145 , "CATCH_FRAME"
146 , "STOP_FRAME"
147
148 , "BLACKHOLE"
149 , "MVAR"
150
151 , "ARR_WORDS"
152
153 , "MUT_ARR_PTRS_CLEAN"
154 , "MUT_ARR_PTRS_DIRTY"
155 , "MUT_ARR_PTRS_FROZEN"
156 , "MUT_VAR_CLEAN"
157 , "MUT_VAR_DIRTY"
158
159 , "WEAK"
160
161 , "TSO"
162
163 , "BLOCKED_FETCH"
164 , "FETCH_ME"
165
166 , "EVACUATED"
167 };
168
169 #endif /* DEBUG_HEAP_PROF */
170
171 /* -----------------------------------------------------------------------------
172 * Find the "closure identity", which is a unique pointer reresenting
173 * the band to which this closure's heap space is attributed in the
174 * heap profile.
175 * ------------------------------------------------------------------------- */
176 STATIC_INLINE void *
177 closureIdentity( StgClosure *p )
178 {
179 switch (RtsFlags.ProfFlags.doHeapProfile) {
180
181 #ifdef PROFILING
182 case HEAP_BY_CCS:
183 return p->header.prof.ccs;
184 case HEAP_BY_MOD:
185 return p->header.prof.ccs->cc->module;
186 case HEAP_BY_DESCR:
187 return get_itbl(p)->prof.closure_desc;
188 case HEAP_BY_TYPE:
189 return get_itbl(p)->prof.closure_type;
190 case HEAP_BY_RETAINER:
191 // AFAIK, the only closures in the heap which might not have a
192 // valid retainer set are DEAD_WEAK closures.
193 if (isRetainerSetFieldValid(p))
194 return retainerSetOf(p);
195 else
196 return NULL;
197
198 #else // DEBUG
199 case HEAP_BY_INFOPTR:
200 return (void *)((StgClosure *)p)->header.info;
201 case HEAP_BY_CLOSURE_TYPE:
202 return type_names[get_itbl(p)->type];
203
204 #endif
205 default:
206 barf("closureIdentity");
207 }
208 }
209
210 /* --------------------------------------------------------------------------
211 * Profiling type predicates
212 * ----------------------------------------------------------------------- */
213 #ifdef PROFILING
214 STATIC_INLINE rtsBool
215 doingLDVProfiling( void )
216 {
217 return (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV
218 || RtsFlags.ProfFlags.bioSelector != NULL);
219 }
220
221 STATIC_INLINE rtsBool
222 doingRetainerProfiling( void )
223 {
224 return (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER
225 || RtsFlags.ProfFlags.retainerSelector != NULL);
226 }
227 #endif /* PROFILING */
228
229 // Precesses a closure 'c' being destroyed whose size is 'size'.
230 // Make sure that LDV_recordDead() is not invoked on 'inherently used' closures
231 // such as TSO; they should not be involved in computing dragNew or voidNew.
232 //
233 // Even though era is checked in both LdvCensusForDead() and
234 // LdvCensusKillAll(), we still need to make sure that era is > 0 because
235 // LDV_recordDead() may be called from elsewhere in the runtime system. E.g.,
236 // when a thunk is replaced by an indirection object.
237
238 #ifdef PROFILING
239 void
240 LDV_recordDead( StgClosure *c, nat size )
241 {
242 void *id;
243 nat t;
244 counter *ctr;
245
246 if (era > 0 && closureSatisfiesConstraints(c)) {
247 size -= sizeofW(StgProfHeader);
248 ASSERT(LDVW(c) != 0);
249 if ((LDVW((c)) & LDV_STATE_MASK) == LDV_STATE_CREATE) {
250 t = (LDVW((c)) & LDV_CREATE_MASK) >> LDV_SHIFT;
251 if (t < era) {
252 if (RtsFlags.ProfFlags.bioSelector == NULL) {
253 censuses[t].void_total += (int)size;
254 censuses[era].void_total -= (int)size;
255 ASSERT(censuses[t].void_total < censuses[t].not_used);
256 } else {
257 id = closureIdentity(c);
258 ctr = lookupHashTable(censuses[t].hash, (StgWord)id);
259 ASSERT( ctr != NULL );
260 ctr->c.ldv.void_total += (int)size;
261 ctr = lookupHashTable(censuses[era].hash, (StgWord)id);
262 if (ctr == NULL) {
263 ctr = arenaAlloc(censuses[era].arena, sizeof(counter));
264 initLDVCtr(ctr);
265 insertHashTable(censuses[era].hash, (StgWord)id, ctr);
266 ctr->identity = id;
267 ctr->next = censuses[era].ctrs;
268 censuses[era].ctrs = ctr;
269 }
270 ctr->c.ldv.void_total -= (int)size;
271 }
272 }
273 } else {
274 t = LDVW((c)) & LDV_LAST_MASK;
275 if (t + 1 < era) {
276 if (RtsFlags.ProfFlags.bioSelector == NULL) {
277 censuses[t+1].drag_total += size;
278 censuses[era].drag_total -= size;
279 } else {
280 void *id;
281 id = closureIdentity(c);
282 ctr = lookupHashTable(censuses[t+1].hash, (StgWord)id);
283 ASSERT( ctr != NULL );
284 ctr->c.ldv.drag_total += (int)size;
285 ctr = lookupHashTable(censuses[era].hash, (StgWord)id);
286 if (ctr == NULL) {
287 ctr = arenaAlloc(censuses[era].arena, sizeof(counter));
288 initLDVCtr(ctr);
289 insertHashTable(censuses[era].hash, (StgWord)id, ctr);
290 ctr->identity = id;
291 ctr->next = censuses[era].ctrs;
292 censuses[era].ctrs = ctr;
293 }
294 ctr->c.ldv.drag_total -= (int)size;
295 }
296 }
297 }
298 }
299 }
300 #endif
301
302 /* --------------------------------------------------------------------------
303 * Initialize censuses[era];
304 * ----------------------------------------------------------------------- */
305 STATIC_INLINE void
306 initEra(Census *census)
307 {
308 census->hash = allocHashTable();
309 census->ctrs = NULL;
310 census->arena = newArena();
311
312 census->not_used = 0;
313 census->used = 0;
314 census->prim = 0;
315 census->void_total = 0;
316 census->drag_total = 0;
317 }
318
319 STATIC_INLINE void
320 freeEra(Census *census)
321 {
322 arenaFree(census->arena);
323 freeHashTable(census->hash, NULL);
324 }
325
326 /* --------------------------------------------------------------------------
327 * Increases era by 1 and initialize census[era].
328 * Reallocates gi[] and increases its size if needed.
329 * ----------------------------------------------------------------------- */
330 static void
331 nextEra( void )
332 {
333 #ifdef PROFILING
334 if (doingLDVProfiling()) {
335 era++;
336
337 if (era == max_era) {
338 errorBelch("maximum number of censuses reached; use +RTS -i to reduce");
339 stg_exit(EXIT_FAILURE);
340 }
341
342 if (era == n_censuses) {
343 n_censuses *= 2;
344 censuses = stgReallocBytes(censuses, sizeof(Census) * n_censuses,
345 "nextEra");
346 }
347 }
348 #endif /* PROFILING */
349
350 initEra( &censuses[era] );
351 }
352
353 /* -----------------------------------------------------------------------------
354 * DEBUG heap profiling, by info table
355 * -------------------------------------------------------------------------- */
356
357 #ifdef DEBUG_HEAP_PROF
358 FILE *hp_file;
359 static char *hp_filename;
360
361 void initProfiling1( void )
362 {
363 }
364
365 void freeProfiling1( void )
366 {
367 }
368
369 void initProfiling2( void )
370 {
371 if (RtsFlags.ProfFlags.doHeapProfile) {
372 /* Initialise the log file name */
373 hp_filename = stgMallocBytes(strlen(prog_name) + 6, "hpFileName");
374 sprintf(hp_filename, "%s.hp", prog_name);
375
376 /* open the log file */
377 if ((hp_file = fopen(hp_filename, "w")) == NULL) {
378 debugBelch("Can't open profiling report file %s\n",
379 hp_filename);
380 RtsFlags.ProfFlags.doHeapProfile = 0;
381 return;
382 }
383 }
384
385 initHeapProfiling();
386 }
387
388 void endProfiling( void )
389 {
390 endHeapProfiling();
391 }
392 #endif /* DEBUG_HEAP_PROF */
393
394 static void
395 printSample(rtsBool beginSample, StgDouble sampleValue)
396 {
397 StgDouble fractionalPart, integralPart;
398 fractionalPart = modf(sampleValue, &integralPart);
399 fprintf(hp_file, "%s %" FMT_Word64 ".%02" FMT_Word64 "\n",
400 (beginSample ? "BEGIN_SAMPLE" : "END_SAMPLE"),
401 (StgWord64)integralPart, (StgWord64)(fractionalPart * 100));
402 }
403
404 /* --------------------------------------------------------------------------
405 * Initialize the heap profilier
406 * ----------------------------------------------------------------------- */
407 nat
408 initHeapProfiling(void)
409 {
410 if (! RtsFlags.ProfFlags.doHeapProfile) {
411 return 0;
412 }
413
414 #ifdef PROFILING
415 if (doingLDVProfiling() && doingRetainerProfiling()) {
416 errorBelch("cannot mix -hb and -hr");
417 stg_exit(EXIT_FAILURE);
418 }
419 #endif
420
421 // we only count eras if we're doing LDV profiling. Otherwise era
422 // is fixed at zero.
423 #ifdef PROFILING
424 if (doingLDVProfiling()) {
425 era = 1;
426 } else
427 #endif
428 {
429 era = 0;
430 }
431
432 { // max_era = 2^LDV_SHIFT
433 nat p;
434 max_era = 1;
435 for (p = 0; p < LDV_SHIFT; p++)
436 max_era *= 2;
437 }
438
439 n_censuses = 32;
440 censuses = stgMallocBytes(sizeof(Census) * n_censuses, "initHeapProfiling");
441
442 initEra( &censuses[era] );
443
444 /* initProfilingLogFile(); */
445 fprintf(hp_file, "JOB \"%s", prog_name);
446
447 #ifdef PROFILING
448 {
449 int count;
450 for(count = 1; count < prog_argc; count++)
451 fprintf(hp_file, " %s", prog_argv[count]);
452 fprintf(hp_file, " +RTS");
453 for(count = 0; count < rts_argc; count++)
454 fprintf(hp_file, " %s", rts_argv[count]);
455 }
456 #endif /* PROFILING */
457
458 fprintf(hp_file, "\"\n" );
459
460 fprintf(hp_file, "DATE \"%s\"\n", time_str());
461
462 fprintf(hp_file, "SAMPLE_UNIT \"seconds\"\n");
463 fprintf(hp_file, "VALUE_UNIT \"bytes\"\n");
464
465 printSample(rtsTrue, 0);
466 printSample(rtsFalse, 0);
467
468 #ifdef DEBUG_HEAP_PROF
469 DEBUG_LoadSymbols(prog_name);
470 #endif
471
472 #ifdef PROFILING
473 if (doingRetainerProfiling()) {
474 initRetainerProfiling();
475 }
476 #endif
477
478 return 0;
479 }
480
481 void
482 endHeapProfiling(void)
483 {
484 StgDouble seconds;
485
486 if (! RtsFlags.ProfFlags.doHeapProfile) {
487 return;
488 }
489
490 #ifdef PROFILING
491 if (doingRetainerProfiling()) {
492 endRetainerProfiling();
493 }
494 #endif
495
496 #ifdef PROFILING
497 if (doingLDVProfiling()) {
498 nat t;
499 LdvCensusKillAll();
500 aggregateCensusInfo();
501 for (t = 1; t < era; t++) {
502 dumpCensus( &censuses[t] );
503 }
504 }
505 #endif
506
507 {
508 nat t;
509 for (t = 0; t <= era; t++) {
510 freeEra( &censuses[t] );
511 }
512 }
513 stgFree(censuses);
514
515 seconds = mut_user_time();
516 printSample(rtsTrue, seconds);
517 printSample(rtsFalse, seconds);
518 fclose(hp_file);
519 }
520
521
522
523 #ifdef PROFILING
524 static size_t
525 buf_append(char *p, const char *q, char *end)
526 {
527 int m;
528
529 for (m = 0; p < end; p++, q++, m++) {
530 *p = *q;
531 if (*q == '\0') { break; }
532 }
533 return m;
534 }
535
536 static void
537 fprint_ccs(FILE *fp, CostCentreStack *ccs, nat max_length)
538 {
539 char buf[max_length+1], *p, *buf_end;
540
541 // MAIN on its own gets printed as "MAIN", otherwise we ignore MAIN.
542 if (ccs == CCS_MAIN) {
543 fprintf(fp, "MAIN");
544 return;
545 }
546
547 fprintf(fp, "(%ld)", ccs->ccsID);
548
549 p = buf;
550 buf_end = buf + max_length + 1;
551
552 // keep printing components of the stack until we run out of space
553 // in the buffer. If we run out of space, end with "...".
554 for (; ccs != NULL && ccs != CCS_MAIN; ccs = ccs->prevStack) {
555
556 // CAF cost centres print as M.CAF, but we leave the module
557 // name out of all the others to save space.
558 if (!strcmp(ccs->cc->label,"CAF")) {
559 p += buf_append(p, ccs->cc->module, buf_end);
560 p += buf_append(p, ".CAF", buf_end);
561 } else {
562 p += buf_append(p, ccs->cc->label, buf_end);
563 if (ccs->prevStack != NULL && ccs->prevStack != CCS_MAIN) {
564 p += buf_append(p, "/", buf_end);
565 }
566 }
567
568 if (p >= buf_end) {
569 sprintf(buf+max_length-4, "...");
570 break;
571 }
572 }
573 fprintf(fp, "%s", buf);
574 }
575 #endif /* PROFILING */
576
577 rtsBool
578 strMatchesSelector( char* str, char* sel )
579 {
580 char* p;
581 // debugBelch("str_matches_selector %s %s\n", str, sel);
582 while (1) {
583 // Compare str against wherever we've got to in sel.
584 p = str;
585 while (*p != '\0' && *sel != ',' && *sel != '\0' && *p == *sel) {
586 p++; sel++;
587 }
588 // Match if all of str used and have reached the end of a sel fragment.
589 if (*p == '\0' && (*sel == ',' || *sel == '\0'))
590 return rtsTrue;
591
592 // No match. Advance sel to the start of the next elem.
593 while (*sel != ',' && *sel != '\0') sel++;
594 if (*sel == ',') sel++;
595
596 /* Run out of sel ?? */
597 if (*sel == '\0') return rtsFalse;
598 }
599 }
600
601 /* -----------------------------------------------------------------------------
602 * Figure out whether a closure should be counted in this census, by
603 * testing against all the specified constraints.
604 * -------------------------------------------------------------------------- */
605 rtsBool
606 closureSatisfiesConstraints( StgClosure* p )
607 {
608 #ifdef DEBUG_HEAP_PROF
609 (void)p; /* keep gcc -Wall happy */
610 return rtsTrue;
611 #else
612 rtsBool b;
613
614 // The CCS has a selected field to indicate whether this closure is
615 // deselected by not being mentioned in the module, CC, or CCS
616 // selectors.
617 if (!p->header.prof.ccs->selected) {
618 return rtsFalse;
619 }
620
621 if (RtsFlags.ProfFlags.descrSelector) {
622 b = strMatchesSelector( (get_itbl((StgClosure *)p))->prof.closure_desc,
623 RtsFlags.ProfFlags.descrSelector );
624 if (!b) return rtsFalse;
625 }
626 if (RtsFlags.ProfFlags.typeSelector) {
627 b = strMatchesSelector( (get_itbl((StgClosure *)p))->prof.closure_type,
628 RtsFlags.ProfFlags.typeSelector );
629 if (!b) return rtsFalse;
630 }
631 if (RtsFlags.ProfFlags.retainerSelector) {
632 RetainerSet *rs;
633 nat i;
634 // We must check that the retainer set is valid here. One
635 // reason it might not be valid is if this closure is a
636 // a newly deceased weak pointer (i.e. a DEAD_WEAK), since
637 // these aren't reached by the retainer profiler's traversal.
638 if (isRetainerSetFieldValid((StgClosure *)p)) {
639 rs = retainerSetOf((StgClosure *)p);
640 if (rs != NULL) {
641 for (i = 0; i < rs->num; i++) {
642 b = strMatchesSelector( rs->element[i]->cc->label,
643 RtsFlags.ProfFlags.retainerSelector );
644 if (b) return rtsTrue;
645 }
646 }
647 }
648 return rtsFalse;
649 }
650 return rtsTrue;
651 #endif /* PROFILING */
652 }
653
654 /* -----------------------------------------------------------------------------
655 * Aggregate the heap census info for biographical profiling
656 * -------------------------------------------------------------------------- */
657 #ifdef PROFILING
658 static void
659 aggregateCensusInfo( void )
660 {
661 HashTable *acc;
662 nat t;
663 counter *c, *d, *ctrs;
664 Arena *arena;
665
666 if (!doingLDVProfiling()) return;
667
668 // Aggregate the LDV counters when displaying by biography.
669 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV) {
670 int void_total, drag_total;
671
672 // Now we compute void_total and drag_total for each census
673 // After the program has finished, the void_total field of
674 // each census contains the count of words that were *created*
675 // in this era and were eventually void. Conversely, if a
676 // void closure was destroyed in this era, it will be
677 // represented by a negative count of words in void_total.
678 //
679 // To get the count of live words that are void at each
680 // census, just propagate the void_total count forwards:
681
682 void_total = 0;
683 drag_total = 0;
684 for (t = 1; t < era; t++) { // note: start at 1, not 0
685 void_total += censuses[t].void_total;
686 drag_total += censuses[t].drag_total;
687 censuses[t].void_total = void_total;
688 censuses[t].drag_total = drag_total;
689
690 ASSERT( censuses[t].void_total <= censuses[t].not_used );
691 // should be true because: void_total is the count of
692 // live words that are void at this census, which *must*
693 // be less than the number of live words that have not
694 // been used yet.
695
696 ASSERT( censuses[t].drag_total <= censuses[t].used );
697 // similar reasoning as above.
698 }
699
700 return;
701 }
702
703 // otherwise... we're doing a heap profile that is restricted to
704 // some combination of lag, drag, void or use. We've kept all the
705 // census info for all censuses so far, but we still need to
706 // aggregate the counters forwards.
707
708 arena = newArena();
709 acc = allocHashTable();
710 ctrs = NULL;
711
712 for (t = 1; t < era; t++) {
713
714 // first look through all the counters we're aggregating
715 for (c = ctrs; c != NULL; c = c->next) {
716 // if one of the totals is non-zero, then this closure
717 // type must be present in the heap at this census time...
718 d = lookupHashTable(censuses[t].hash, (StgWord)c->identity);
719
720 if (d == NULL) {
721 // if this closure identity isn't present in the
722 // census for this time period, then our running
723 // totals *must* be zero.
724 ASSERT(c->c.ldv.void_total == 0 && c->c.ldv.drag_total == 0);
725
726 // debugCCS(c->identity);
727 // debugBelch(" census=%d void_total=%d drag_total=%d\n",
728 // t, c->c.ldv.void_total, c->c.ldv.drag_total);
729 } else {
730 d->c.ldv.void_total += c->c.ldv.void_total;
731 d->c.ldv.drag_total += c->c.ldv.drag_total;
732 c->c.ldv.void_total = d->c.ldv.void_total;
733 c->c.ldv.drag_total = d->c.ldv.drag_total;
734
735 ASSERT( c->c.ldv.void_total >= 0 );
736 ASSERT( c->c.ldv.drag_total >= 0 );
737 }
738 }
739
740 // now look through the counters in this census to find new ones
741 for (c = censuses[t].ctrs; c != NULL; c = c->next) {
742 d = lookupHashTable(acc, (StgWord)c->identity);
743 if (d == NULL) {
744 d = arenaAlloc( arena, sizeof(counter) );
745 initLDVCtr(d);
746 insertHashTable( acc, (StgWord)c->identity, d );
747 d->identity = c->identity;
748 d->next = ctrs;
749 ctrs = d;
750 d->c.ldv.void_total = c->c.ldv.void_total;
751 d->c.ldv.drag_total = c->c.ldv.drag_total;
752 }
753 ASSERT( c->c.ldv.void_total >= 0 );
754 ASSERT( c->c.ldv.drag_total >= 0 );
755 }
756 }
757
758 freeHashTable(acc, NULL);
759 arenaFree(arena);
760 }
761 #endif
762
763 /* -----------------------------------------------------------------------------
764 * Print out the results of a heap census.
765 * -------------------------------------------------------------------------- */
766 static void
767 dumpCensus( Census *census )
768 {
769 counter *ctr;
770 int count;
771
772 printSample(rtsTrue, census->time);
773
774 #ifdef PROFILING
775 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV) {
776 fprintf(hp_file, "VOID\t%lu\n", (unsigned long)(census->void_total) * sizeof(W_));
777 fprintf(hp_file, "LAG\t%lu\n",
778 (unsigned long)(census->not_used - census->void_total) * sizeof(W_));
779 fprintf(hp_file, "USE\t%lu\n",
780 (unsigned long)(census->used - census->drag_total) * sizeof(W_));
781 fprintf(hp_file, "INHERENT_USE\t%lu\n",
782 (unsigned long)(census->prim) * sizeof(W_));
783 fprintf(hp_file, "DRAG\t%lu\n",
784 (unsigned long)(census->drag_total) * sizeof(W_));
785 printSample(rtsFalse, census->time);
786 return;
787 }
788 #endif
789
790 for (ctr = census->ctrs; ctr != NULL; ctr = ctr->next) {
791
792 #ifdef PROFILING
793 if (RtsFlags.ProfFlags.bioSelector != NULL) {
794 count = 0;
795 if (strMatchesSelector("lag", RtsFlags.ProfFlags.bioSelector))
796 count += ctr->c.ldv.not_used - ctr->c.ldv.void_total;
797 if (strMatchesSelector("drag", RtsFlags.ProfFlags.bioSelector))
798 count += ctr->c.ldv.drag_total;
799 if (strMatchesSelector("void", RtsFlags.ProfFlags.bioSelector))
800 count += ctr->c.ldv.void_total;
801 if (strMatchesSelector("use", RtsFlags.ProfFlags.bioSelector))
802 count += ctr->c.ldv.used - ctr->c.ldv.drag_total;
803 } else
804 #endif
805 {
806 count = ctr->c.resid;
807 }
808
809 ASSERT( count >= 0 );
810
811 if (count == 0) continue;
812
813 #ifdef DEBUG_HEAP_PROF
814 switch (RtsFlags.ProfFlags.doHeapProfile) {
815 case HEAP_BY_INFOPTR:
816 fprintf(hp_file, "%s", lookupGHCName(ctr->identity));
817 break;
818 case HEAP_BY_CLOSURE_TYPE:
819 fprintf(hp_file, "%s", (char *)ctr->identity);
820 break;
821 }
822 #endif
823
824 #ifdef PROFILING
825 switch (RtsFlags.ProfFlags.doHeapProfile) {
826 case HEAP_BY_CCS:
827 fprint_ccs(hp_file, (CostCentreStack *)ctr->identity, RtsFlags.ProfFlags.ccsLength);
828 break;
829 case HEAP_BY_MOD:
830 case HEAP_BY_DESCR:
831 case HEAP_BY_TYPE:
832 fprintf(hp_file, "%s", (char *)ctr->identity);
833 break;
834 case HEAP_BY_RETAINER:
835 {
836 RetainerSet *rs = (RetainerSet *)ctr->identity;
837
838 // it might be the distinguished retainer set rs_MANY:
839 if (rs == &rs_MANY) {
840 fprintf(hp_file, "MANY");
841 break;
842 }
843
844 // Mark this retainer set by negating its id, because it
845 // has appeared in at least one census. We print the
846 // values of all such retainer sets into the log file at
847 // the end. A retainer set may exist but not feature in
848 // any censuses if it arose as the intermediate retainer
849 // set for some closure during retainer set calculation.
850 if (rs->id > 0)
851 rs->id = -(rs->id);
852
853 // report in the unit of bytes: * sizeof(StgWord)
854 printRetainerSetShort(hp_file, rs);
855 break;
856 }
857 default:
858 barf("dumpCensus; doHeapProfile");
859 }
860 #endif
861
862 fprintf(hp_file, "\t%lu\n", (unsigned long)count * sizeof(W_));
863 }
864
865 printSample(rtsFalse, census->time);
866 }
867
868 /* -----------------------------------------------------------------------------
869 * Code to perform a heap census.
870 * -------------------------------------------------------------------------- */
871 static void
872 heapCensusChain( Census *census, bdescr *bd )
873 {
874 StgPtr p;
875 StgInfoTable *info;
876 void *identity;
877 nat size;
878 counter *ctr;
879 nat real_size;
880 rtsBool prim;
881
882 for (; bd != NULL; bd = bd->link) {
883
884 // HACK: ignore pinned blocks, because they contain gaps.
885 // It's not clear exactly what we'd like to do here, since we
886 // can't tell which objects in the block are actually alive.
887 // Perhaps the whole block should be counted as SYSTEM memory.
888 if (bd->flags & BF_PINNED) {
889 continue;
890 }
891
892 p = bd->start;
893 while (p < bd->free) {
894 info = get_itbl((StgClosure *)p);
895 prim = rtsFalse;
896
897 switch (info->type) {
898
899 case THUNK:
900 size = thunk_sizeW_fromITBL(info);
901 break;
902
903 case THUNK_1_1:
904 case THUNK_0_2:
905 case THUNK_2_0:
906 size = sizeofW(StgThunkHeader) + 2;
907 break;
908
909 case THUNK_1_0:
910 case THUNK_0_1:
911 case THUNK_SELECTOR:
912 size = sizeofW(StgThunkHeader) + 1;
913 break;
914
915 case CONSTR:
916 case FUN:
917 case IND_PERM:
918 case IND_OLDGEN:
919 case IND_OLDGEN_PERM:
920 case CAF_BLACKHOLE:
921 case SE_CAF_BLACKHOLE:
922 case SE_BLACKHOLE:
923 case BLACKHOLE:
924 case FUN_1_0:
925 case FUN_0_1:
926 case FUN_1_1:
927 case FUN_0_2:
928 case FUN_2_0:
929 case CONSTR_1_0:
930 case CONSTR_0_1:
931 case CONSTR_1_1:
932 case CONSTR_0_2:
933 case CONSTR_2_0:
934 size = sizeW_fromITBL(info);
935 break;
936
937 case IND:
938 // Special case/Delicate Hack: INDs don't normally
939 // appear, since we're doing this heap census right
940 // after GC. However, GarbageCollect() also does
941 // resurrectThreads(), which can update some
942 // blackholes when it calls raiseAsync() on the
943 // resurrected threads. So we know that any IND will
944 // be the size of a BLACKHOLE.
945 size = BLACKHOLE_sizeW();
946 break;
947
948 case BCO:
949 prim = rtsTrue;
950 size = bco_sizeW((StgBCO *)p);
951 break;
952
953 case MVAR:
954 case WEAK:
955 case STABLE_NAME:
956 case MUT_VAR_CLEAN:
957 case MUT_VAR_DIRTY:
958 prim = rtsTrue;
959 size = sizeW_fromITBL(info);
960 break;
961
962 case AP:
963 size = ap_sizeW((StgAP *)p);
964 break;
965
966 case PAP:
967 size = pap_sizeW((StgPAP *)p);
968 break;
969
970 case AP_STACK:
971 size = ap_stack_sizeW((StgAP_STACK *)p);
972 break;
973
974 case ARR_WORDS:
975 prim = rtsTrue;
976 size = arr_words_sizeW(stgCast(StgArrWords*,p));
977 break;
978
979 case MUT_ARR_PTRS_CLEAN:
980 case MUT_ARR_PTRS_DIRTY:
981 case MUT_ARR_PTRS_FROZEN:
982 case MUT_ARR_PTRS_FROZEN0:
983 prim = rtsTrue;
984 size = mut_arr_ptrs_sizeW((StgMutArrPtrs *)p);
985 break;
986
987 case TSO:
988 prim = rtsTrue;
989 #ifdef DEBUG_HEAP_PROF
990 size = tso_sizeW((StgTSO *)p);
991 break;
992 #else
993 if (RtsFlags.ProfFlags.includeTSOs) {
994 size = tso_sizeW((StgTSO *)p);
995 break;
996 } else {
997 // Skip this TSO and move on to the next object
998 p += tso_sizeW((StgTSO *)p);
999 continue;
1000 }
1001 #endif
1002
1003 case TREC_HEADER:
1004 prim = rtsTrue;
1005 size = sizeofW(StgTRecHeader);
1006 break;
1007
1008 case TVAR_WATCH_QUEUE:
1009 prim = rtsTrue;
1010 size = sizeofW(StgTVarWatchQueue);
1011 break;
1012
1013 case INVARIANT_CHECK_QUEUE:
1014 prim = rtsTrue;
1015 size = sizeofW(StgInvariantCheckQueue);
1016 break;
1017
1018 case ATOMIC_INVARIANT:
1019 prim = rtsTrue;
1020 size = sizeofW(StgAtomicInvariant);
1021 break;
1022
1023 case TVAR:
1024 prim = rtsTrue;
1025 size = sizeofW(StgTVar);
1026 break;
1027
1028 case TREC_CHUNK:
1029 prim = rtsTrue;
1030 size = sizeofW(StgTRecChunk);
1031 break;
1032
1033 default:
1034 barf("heapCensus, unknown object: %d", info->type);
1035 }
1036
1037 identity = NULL;
1038
1039 #ifdef DEBUG_HEAP_PROF
1040 real_size = size;
1041 #else
1042 // subtract the profiling overhead
1043 real_size = size - sizeofW(StgProfHeader);
1044 #endif
1045
1046 if (closureSatisfiesConstraints((StgClosure*)p)) {
1047 #ifdef PROFILING
1048 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV) {
1049 if (prim)
1050 census->prim += real_size;
1051 else if ((LDVW(p) & LDV_STATE_MASK) == LDV_STATE_CREATE)
1052 census->not_used += real_size;
1053 else
1054 census->used += real_size;
1055 } else
1056 #endif
1057 {
1058 identity = closureIdentity((StgClosure *)p);
1059
1060 if (identity != NULL) {
1061 ctr = lookupHashTable( census->hash, (StgWord)identity );
1062 if (ctr != NULL) {
1063 #ifdef PROFILING
1064 if (RtsFlags.ProfFlags.bioSelector != NULL) {
1065 if (prim)
1066 ctr->c.ldv.prim += real_size;
1067 else if ((LDVW(p) & LDV_STATE_MASK) == LDV_STATE_CREATE)
1068 ctr->c.ldv.not_used += real_size;
1069 else
1070 ctr->c.ldv.used += real_size;
1071 } else
1072 #endif
1073 {
1074 ctr->c.resid += real_size;
1075 }
1076 } else {
1077 ctr = arenaAlloc( census->arena, sizeof(counter) );
1078 initLDVCtr(ctr);
1079 insertHashTable( census->hash, (StgWord)identity, ctr );
1080 ctr->identity = identity;
1081 ctr->next = census->ctrs;
1082 census->ctrs = ctr;
1083
1084 #ifdef PROFILING
1085 if (RtsFlags.ProfFlags.bioSelector != NULL) {
1086 if (prim)
1087 ctr->c.ldv.prim = real_size;
1088 else if ((LDVW(p) & LDV_STATE_MASK) == LDV_STATE_CREATE)
1089 ctr->c.ldv.not_used = real_size;
1090 else
1091 ctr->c.ldv.used = real_size;
1092 } else
1093 #endif
1094 {
1095 ctr->c.resid = real_size;
1096 }
1097 }
1098 }
1099 }
1100 }
1101
1102 p += size;
1103 }
1104 }
1105 }
1106
1107 void
1108 heapCensus( void )
1109 {
1110 nat g, s;
1111 Census *census;
1112
1113 census = &censuses[era];
1114 census->time = mut_user_time();
1115
1116 // calculate retainer sets if necessary
1117 #ifdef PROFILING
1118 if (doingRetainerProfiling()) {
1119 retainerProfile();
1120 }
1121 #endif
1122
1123 #ifdef PROFILING
1124 stat_startHeapCensus();
1125 #endif
1126
1127 // Traverse the heap, collecting the census info
1128
1129 // First the small_alloc_list: we have to fix the free pointer at
1130 // the end by calling tidyAllocatedLists() first.
1131 tidyAllocateLists();
1132 heapCensusChain( census, small_alloc_list );
1133
1134 // Now traverse the heap in each generation/step.
1135 if (RtsFlags.GcFlags.generations == 1) {
1136 heapCensusChain( census, g0s0->blocks );
1137 } else {
1138 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
1139 for (s = 0; s < generations[g].n_steps; s++) {
1140 heapCensusChain( census, generations[g].steps[s].blocks );
1141 // Are we interested in large objects? might be
1142 // confusing to include the stack in a heap profile.
1143 heapCensusChain( census, generations[g].steps[s].large_objects );
1144 }
1145 }
1146 }
1147
1148 // dump out the census info
1149 #ifdef PROFILING
1150 // We can't generate any info for LDV profiling until
1151 // the end of the run...
1152 if (!doingLDVProfiling())
1153 dumpCensus( census );
1154 #else
1155 dumpCensus( census );
1156 #endif
1157
1158
1159 // free our storage, unless we're keeping all the census info for
1160 // future restriction by biography.
1161 #ifdef PROFILING
1162 if (RtsFlags.ProfFlags.bioSelector == NULL)
1163 #endif
1164 {
1165 freeHashTable( census->hash, NULL/* don't free the elements */ );
1166 arenaFree( census->arena );
1167 census->hash = NULL;
1168 census->arena = NULL;
1169 }
1170
1171 // we're into the next time period now
1172 nextEra();
1173
1174 #ifdef PROFILING
1175 stat_endHeapCensus();
1176 #endif
1177 }
1178
1179 #endif /* PROFILING || DEBUG_HEAP_PROF */
1180