Remove vectored returns.
[ghc.git] / rts / ProfHeap.c
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 1998-2003
4 *
5 * Support for heap profiling
6 *
7 * ---------------------------------------------------------------------------*/
8
9 #if defined(DEBUG) && !defined(PROFILING)
10 #define DEBUG_HEAP_PROF
11 #else
12 #undef DEBUG_HEAP_PROF
13 #endif
14
15 #if defined(PROFILING) || defined(DEBUG_HEAP_PROF)
16
17 #include "PosixSource.h"
18 #include "Rts.h"
19 #include "RtsUtils.h"
20 #include "RtsFlags.h"
21 #include "Profiling.h"
22 #include "ProfHeap.h"
23 #include "Stats.h"
24 #include "Hash.h"
25 #include "RetainerProfile.h"
26 #include "LdvProfile.h"
27 #include "Arena.h"
28 #include "Printer.h"
29
30 #include <string.h>
31 #include <stdlib.h>
32 #include <math.h>
33
34 /* -----------------------------------------------------------------------------
35 * era stores the current time period. It is the same as the
36 * number of censuses that have been performed.
37 *
38 * RESTRICTION:
39 * era must be no longer than LDV_SHIFT (15 or 30) bits.
40 * Invariants:
41 * era is initialized to 1 in initHeapProfiling().
42 *
43 * max_era is initialized to 2^LDV_SHIFT in initHeapProfiling().
44 * When era reaches max_era, the profiling stops because a closure can
45 * store only up to (max_era - 1) as its creation or last use time.
46 * -------------------------------------------------------------------------- */
47 unsigned int era;
48 static nat max_era;
49
50 /* -----------------------------------------------------------------------------
51 * Counters
52 *
53 * For most heap profiles each closure identity gets a simple count
54 * of live words in the heap at each census. However, if we're
55 * selecting by biography, then we have to keep the various
56 * lag/drag/void counters for each identity.
57 * -------------------------------------------------------------------------- */
58 typedef struct _counter {
59 void *identity;
60 union {
61 nat resid;
62 struct {
63 int prim; // total size of 'inherently used' closures
64 int not_used; // total size of 'never used' closures
65 int used; // total size of 'used at least once' closures
66 int void_total; // current total size of 'destroyed without being used' closures
67 int drag_total; // current total size of 'used at least once and waiting to die'
68 } ldv;
69 } c;
70 struct _counter *next;
71 } counter;
72
73 STATIC_INLINE void
74 initLDVCtr( counter *ctr )
75 {
76 ctr->c.ldv.prim = 0;
77 ctr->c.ldv.not_used = 0;
78 ctr->c.ldv.used = 0;
79 ctr->c.ldv.void_total = 0;
80 ctr->c.ldv.drag_total = 0;
81 }
82
83 typedef struct {
84 double time; // the time in MUT time when the census is made
85 HashTable * hash;
86 counter * ctrs;
87 Arena * arena;
88
89 // for LDV profiling, when just displaying by LDV
90 int prim;
91 int not_used;
92 int used;
93 int void_total;
94 int drag_total;
95 } Census;
96
97 static Census *censuses = NULL;
98 static nat n_censuses = 0;
99
100 #ifdef PROFILING
101 static void aggregateCensusInfo( void );
102 #endif
103
104 static void dumpCensus( Census *census );
105
106 /* -----------------------------------------------------------------------------
107 Closure Type Profiling;
108
109 PROBABLY TOTALLY OUT OF DATE -- ToDo (SDM)
110 -------------------------------------------------------------------------- */
111
112 #ifdef DEBUG_HEAP_PROF
113 static char *type_names[] = {
114 "INVALID_OBJECT"
115 , "CONSTR"
116 , "CONSTR_STATIC"
117 , "CONSTR_NOCAF_STATIC"
118
119 , "FUN"
120 , "FUN_STATIC"
121
122 , "THUNK"
123 , "THUNK_STATIC"
124 , "THUNK_SELECTOR"
125
126 , "BCO"
127 , "AP_STACK"
128 , "AP"
129
130 , "PAP"
131
132 , "IND"
133 , "IND_OLDGEN"
134 , "IND_PERM"
135 , "IND_OLDGEN_PERM"
136 , "IND_STATIC"
137
138 , "RET_BCO"
139 , "RET_SMALL"
140 , "RET_BIG"
141 , "RET_DYN"
142 , "UPDATE_FRAME"
143 , "CATCH_FRAME"
144 , "STOP_FRAME"
145
146 , "BLACKHOLE"
147 , "MVAR"
148
149 , "ARR_WORDS"
150
151 , "MUT_ARR_PTRS_CLEAN"
152 , "MUT_ARR_PTRS_DIRTY"
153 , "MUT_ARR_PTRS_FROZEN"
154 , "MUT_VAR_CLEAN"
155 , "MUT_VAR_DIRTY"
156
157 , "WEAK"
158
159 , "TSO"
160
161 , "BLOCKED_FETCH"
162 , "FETCH_ME"
163
164 , "EVACUATED"
165 };
166
167 #endif /* DEBUG_HEAP_PROF */
168
169 /* -----------------------------------------------------------------------------
170 * Find the "closure identity", which is a unique pointer reresenting
171 * the band to which this closure's heap space is attributed in the
172 * heap profile.
173 * ------------------------------------------------------------------------- */
174 STATIC_INLINE void *
175 closureIdentity( StgClosure *p )
176 {
177 switch (RtsFlags.ProfFlags.doHeapProfile) {
178
179 #ifdef PROFILING
180 case HEAP_BY_CCS:
181 return p->header.prof.ccs;
182 case HEAP_BY_MOD:
183 return p->header.prof.ccs->cc->module;
184 case HEAP_BY_DESCR:
185 return get_itbl(p)->prof.closure_desc;
186 case HEAP_BY_TYPE:
187 return get_itbl(p)->prof.closure_type;
188 case HEAP_BY_RETAINER:
189 // AFAIK, the only closures in the heap which might not have a
190 // valid retainer set are DEAD_WEAK closures.
191 if (isRetainerSetFieldValid(p))
192 return retainerSetOf(p);
193 else
194 return NULL;
195
196 #else // DEBUG
197 case HEAP_BY_INFOPTR:
198 return (void *)((StgClosure *)p)->header.info;
199 case HEAP_BY_CLOSURE_TYPE:
200 return type_names[get_itbl(p)->type];
201
202 #endif
203 default:
204 barf("closureIdentity");
205 }
206 }
207
208 /* --------------------------------------------------------------------------
209 * Profiling type predicates
210 * ----------------------------------------------------------------------- */
211 #ifdef PROFILING
212 STATIC_INLINE rtsBool
213 doingLDVProfiling( void )
214 {
215 return (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV
216 || RtsFlags.ProfFlags.bioSelector != NULL);
217 }
218
219 STATIC_INLINE rtsBool
220 doingRetainerProfiling( void )
221 {
222 return (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER
223 || RtsFlags.ProfFlags.retainerSelector != NULL);
224 }
225 #endif /* PROFILING */
226
227 // Precesses a closure 'c' being destroyed whose size is 'size'.
228 // Make sure that LDV_recordDead() is not invoked on 'inherently used' closures
229 // such as TSO; they should not be involved in computing dragNew or voidNew.
230 //
231 // Even though era is checked in both LdvCensusForDead() and
232 // LdvCensusKillAll(), we still need to make sure that era is > 0 because
233 // LDV_recordDead() may be called from elsewhere in the runtime system. E.g.,
234 // when a thunk is replaced by an indirection object.
235
236 #ifdef PROFILING
237 void
238 LDV_recordDead( StgClosure *c, nat size )
239 {
240 void *id;
241 nat t;
242 counter *ctr;
243
244 if (era > 0 && closureSatisfiesConstraints(c)) {
245 size -= sizeofW(StgProfHeader);
246 ASSERT(LDVW(c) != 0);
247 if ((LDVW((c)) & LDV_STATE_MASK) == LDV_STATE_CREATE) {
248 t = (LDVW((c)) & LDV_CREATE_MASK) >> LDV_SHIFT;
249 if (t < era) {
250 if (RtsFlags.ProfFlags.bioSelector == NULL) {
251 censuses[t].void_total += (int)size;
252 censuses[era].void_total -= (int)size;
253 ASSERT(censuses[t].void_total < censuses[t].not_used);
254 } else {
255 id = closureIdentity(c);
256 ctr = lookupHashTable(censuses[t].hash, (StgWord)id);
257 ASSERT( ctr != NULL );
258 ctr->c.ldv.void_total += (int)size;
259 ctr = lookupHashTable(censuses[era].hash, (StgWord)id);
260 if (ctr == NULL) {
261 ctr = arenaAlloc(censuses[era].arena, sizeof(counter));
262 initLDVCtr(ctr);
263 insertHashTable(censuses[era].hash, (StgWord)id, ctr);
264 ctr->identity = id;
265 ctr->next = censuses[era].ctrs;
266 censuses[era].ctrs = ctr;
267 }
268 ctr->c.ldv.void_total -= (int)size;
269 }
270 }
271 } else {
272 t = LDVW((c)) & LDV_LAST_MASK;
273 if (t + 1 < era) {
274 if (RtsFlags.ProfFlags.bioSelector == NULL) {
275 censuses[t+1].drag_total += size;
276 censuses[era].drag_total -= size;
277 } else {
278 void *id;
279 id = closureIdentity(c);
280 ctr = lookupHashTable(censuses[t+1].hash, (StgWord)id);
281 ASSERT( ctr != NULL );
282 ctr->c.ldv.drag_total += (int)size;
283 ctr = lookupHashTable(censuses[era].hash, (StgWord)id);
284 if (ctr == NULL) {
285 ctr = arenaAlloc(censuses[era].arena, sizeof(counter));
286 initLDVCtr(ctr);
287 insertHashTable(censuses[era].hash, (StgWord)id, ctr);
288 ctr->identity = id;
289 ctr->next = censuses[era].ctrs;
290 censuses[era].ctrs = ctr;
291 }
292 ctr->c.ldv.drag_total -= (int)size;
293 }
294 }
295 }
296 }
297 }
298 #endif
299
300 /* --------------------------------------------------------------------------
301 * Initialize censuses[era];
302 * ----------------------------------------------------------------------- */
303 STATIC_INLINE void
304 initEra(Census *census)
305 {
306 census->hash = allocHashTable();
307 census->ctrs = NULL;
308 census->arena = newArena();
309
310 census->not_used = 0;
311 census->used = 0;
312 census->prim = 0;
313 census->void_total = 0;
314 census->drag_total = 0;
315 }
316
317 STATIC_INLINE void
318 freeEra(Census *census)
319 {
320 arenaFree(census->arena);
321 freeHashTable(census->hash, NULL);
322 }
323
324 /* --------------------------------------------------------------------------
325 * Increases era by 1 and initialize census[era].
326 * Reallocates gi[] and increases its size if needed.
327 * ----------------------------------------------------------------------- */
328 static void
329 nextEra( void )
330 {
331 #ifdef PROFILING
332 if (doingLDVProfiling()) {
333 era++;
334
335 if (era == max_era) {
336 errorBelch("maximum number of censuses reached; use +RTS -i to reduce");
337 stg_exit(EXIT_FAILURE);
338 }
339
340 if (era == n_censuses) {
341 n_censuses *= 2;
342 censuses = stgReallocBytes(censuses, sizeof(Census) * n_censuses,
343 "nextEra");
344 }
345 }
346 #endif /* PROFILING */
347
348 initEra( &censuses[era] );
349 }
350
351 /* -----------------------------------------------------------------------------
352 * DEBUG heap profiling, by info table
353 * -------------------------------------------------------------------------- */
354
355 #ifdef DEBUG_HEAP_PROF
356 FILE *hp_file;
357 static char *hp_filename;
358
359 void initProfiling1( void )
360 {
361 }
362
363 void freeProfiling1( void )
364 {
365 }
366
367 void initProfiling2( void )
368 {
369 if (RtsFlags.ProfFlags.doHeapProfile) {
370 /* Initialise the log file name */
371 hp_filename = stgMallocBytes(strlen(prog_name) + 6, "hpFileName");
372 sprintf(hp_filename, "%s.hp", prog_name);
373
374 /* open the log file */
375 if ((hp_file = fopen(hp_filename, "w")) == NULL) {
376 debugBelch("Can't open profiling report file %s\n",
377 hp_filename);
378 RtsFlags.ProfFlags.doHeapProfile = 0;
379 return;
380 }
381 }
382
383 initHeapProfiling();
384 }
385
386 void endProfiling( void )
387 {
388 endHeapProfiling();
389 }
390 #endif /* DEBUG_HEAP_PROF */
391
392 static void
393 printSample(rtsBool beginSample, StgDouble sampleValue)
394 {
395 StgDouble fractionalPart, integralPart;
396 fractionalPart = modf(sampleValue, &integralPart);
397 fprintf(hp_file, "%s %" FMT_Word64 ".%02" FMT_Word64 "\n",
398 (beginSample ? "BEGIN_SAMPLE" : "END_SAMPLE"),
399 (StgWord64)integralPart, (StgWord64)(fractionalPart * 100));
400 }
401
402 /* --------------------------------------------------------------------------
403 * Initialize the heap profilier
404 * ----------------------------------------------------------------------- */
405 nat
406 initHeapProfiling(void)
407 {
408 if (! RtsFlags.ProfFlags.doHeapProfile) {
409 return 0;
410 }
411
412 #ifdef PROFILING
413 if (doingLDVProfiling() && doingRetainerProfiling()) {
414 errorBelch("cannot mix -hb and -hr");
415 stg_exit(EXIT_FAILURE);
416 }
417 #endif
418
419 // we only count eras if we're doing LDV profiling. Otherwise era
420 // is fixed at zero.
421 #ifdef PROFILING
422 if (doingLDVProfiling()) {
423 era = 1;
424 } else
425 #endif
426 {
427 era = 0;
428 }
429
430 { // max_era = 2^LDV_SHIFT
431 nat p;
432 max_era = 1;
433 for (p = 0; p < LDV_SHIFT; p++)
434 max_era *= 2;
435 }
436
437 n_censuses = 32;
438 censuses = stgMallocBytes(sizeof(Census) * n_censuses, "initHeapProfiling");
439
440 initEra( &censuses[era] );
441
442 /* initProfilingLogFile(); */
443 fprintf(hp_file, "JOB \"%s", prog_name);
444
445 #ifdef PROFILING
446 {
447 int count;
448 for(count = 1; count < prog_argc; count++)
449 fprintf(hp_file, " %s", prog_argv[count]);
450 fprintf(hp_file, " +RTS");
451 for(count = 0; count < rts_argc; count++)
452 fprintf(hp_file, " %s", rts_argv[count]);
453 }
454 #endif /* PROFILING */
455
456 fprintf(hp_file, "\"\n" );
457
458 fprintf(hp_file, "DATE \"%s\"\n", time_str());
459
460 fprintf(hp_file, "SAMPLE_UNIT \"seconds\"\n");
461 fprintf(hp_file, "VALUE_UNIT \"bytes\"\n");
462
463 printSample(rtsTrue, 0);
464 printSample(rtsFalse, 0);
465
466 #ifdef DEBUG_HEAP_PROF
467 DEBUG_LoadSymbols(prog_name);
468 #endif
469
470 #ifdef PROFILING
471 if (doingRetainerProfiling()) {
472 initRetainerProfiling();
473 }
474 #endif
475
476 return 0;
477 }
478
479 void
480 endHeapProfiling(void)
481 {
482 StgDouble seconds;
483
484 if (! RtsFlags.ProfFlags.doHeapProfile) {
485 return;
486 }
487
488 #ifdef PROFILING
489 if (doingRetainerProfiling()) {
490 endRetainerProfiling();
491 }
492 #endif
493
494 #ifdef PROFILING
495 if (doingLDVProfiling()) {
496 nat t;
497 LdvCensusKillAll();
498 aggregateCensusInfo();
499 for (t = 1; t < era; t++) {
500 dumpCensus( &censuses[t] );
501 }
502 }
503 #endif
504
505 {
506 nat t;
507 for (t = 0; t <= era; t++) {
508 freeEra( &censuses[t] );
509 }
510 }
511 stgFree(censuses);
512
513 seconds = mut_user_time();
514 printSample(rtsTrue, seconds);
515 printSample(rtsFalse, seconds);
516 fclose(hp_file);
517 }
518
519
520
521 #ifdef PROFILING
522 static size_t
523 buf_append(char *p, const char *q, char *end)
524 {
525 int m;
526
527 for (m = 0; p < end; p++, q++, m++) {
528 *p = *q;
529 if (*q == '\0') { break; }
530 }
531 return m;
532 }
533
534 static void
535 fprint_ccs(FILE *fp, CostCentreStack *ccs, nat max_length)
536 {
537 char buf[max_length+1], *p, *buf_end;
538
539 // MAIN on its own gets printed as "MAIN", otherwise we ignore MAIN.
540 if (ccs == CCS_MAIN) {
541 fprintf(fp, "MAIN");
542 return;
543 }
544
545 fprintf(fp, "(%ld)", ccs->ccsID);
546
547 p = buf;
548 buf_end = buf + max_length + 1;
549
550 // keep printing components of the stack until we run out of space
551 // in the buffer. If we run out of space, end with "...".
552 for (; ccs != NULL && ccs != CCS_MAIN; ccs = ccs->prevStack) {
553
554 // CAF cost centres print as M.CAF, but we leave the module
555 // name out of all the others to save space.
556 if (!strcmp(ccs->cc->label,"CAF")) {
557 p += buf_append(p, ccs->cc->module, buf_end);
558 p += buf_append(p, ".CAF", buf_end);
559 } else {
560 p += buf_append(p, ccs->cc->label, buf_end);
561 if (ccs->prevStack != NULL && ccs->prevStack != CCS_MAIN) {
562 p += buf_append(p, "/", buf_end);
563 }
564 }
565
566 if (p >= buf_end) {
567 sprintf(buf+max_length-4, "...");
568 break;
569 }
570 }
571 fprintf(fp, "%s", buf);
572 }
573 #endif /* PROFILING */
574
575 rtsBool
576 strMatchesSelector( char* str, char* sel )
577 {
578 char* p;
579 // debugBelch("str_matches_selector %s %s\n", str, sel);
580 while (1) {
581 // Compare str against wherever we've got to in sel.
582 p = str;
583 while (*p != '\0' && *sel != ',' && *sel != '\0' && *p == *sel) {
584 p++; sel++;
585 }
586 // Match if all of str used and have reached the end of a sel fragment.
587 if (*p == '\0' && (*sel == ',' || *sel == '\0'))
588 return rtsTrue;
589
590 // No match. Advance sel to the start of the next elem.
591 while (*sel != ',' && *sel != '\0') sel++;
592 if (*sel == ',') sel++;
593
594 /* Run out of sel ?? */
595 if (*sel == '\0') return rtsFalse;
596 }
597 }
598
599 /* -----------------------------------------------------------------------------
600 * Figure out whether a closure should be counted in this census, by
601 * testing against all the specified constraints.
602 * -------------------------------------------------------------------------- */
603 rtsBool
604 closureSatisfiesConstraints( StgClosure* p )
605 {
606 #ifdef DEBUG_HEAP_PROF
607 (void)p; /* keep gcc -Wall happy */
608 return rtsTrue;
609 #else
610 rtsBool b;
611
612 // The CCS has a selected field to indicate whether this closure is
613 // deselected by not being mentioned in the module, CC, or CCS
614 // selectors.
615 if (!p->header.prof.ccs->selected) {
616 return rtsFalse;
617 }
618
619 if (RtsFlags.ProfFlags.descrSelector) {
620 b = strMatchesSelector( (get_itbl((StgClosure *)p))->prof.closure_desc,
621 RtsFlags.ProfFlags.descrSelector );
622 if (!b) return rtsFalse;
623 }
624 if (RtsFlags.ProfFlags.typeSelector) {
625 b = strMatchesSelector( (get_itbl((StgClosure *)p))->prof.closure_type,
626 RtsFlags.ProfFlags.typeSelector );
627 if (!b) return rtsFalse;
628 }
629 if (RtsFlags.ProfFlags.retainerSelector) {
630 RetainerSet *rs;
631 nat i;
632 // We must check that the retainer set is valid here. One
633 // reason it might not be valid is if this closure is a
634 // a newly deceased weak pointer (i.e. a DEAD_WEAK), since
635 // these aren't reached by the retainer profiler's traversal.
636 if (isRetainerSetFieldValid((StgClosure *)p)) {
637 rs = retainerSetOf((StgClosure *)p);
638 if (rs != NULL) {
639 for (i = 0; i < rs->num; i++) {
640 b = strMatchesSelector( rs->element[i]->cc->label,
641 RtsFlags.ProfFlags.retainerSelector );
642 if (b) return rtsTrue;
643 }
644 }
645 }
646 return rtsFalse;
647 }
648 return rtsTrue;
649 #endif /* PROFILING */
650 }
651
652 /* -----------------------------------------------------------------------------
653 * Aggregate the heap census info for biographical profiling
654 * -------------------------------------------------------------------------- */
655 #ifdef PROFILING
656 static void
657 aggregateCensusInfo( void )
658 {
659 HashTable *acc;
660 nat t;
661 counter *c, *d, *ctrs;
662 Arena *arena;
663
664 if (!doingLDVProfiling()) return;
665
666 // Aggregate the LDV counters when displaying by biography.
667 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV) {
668 int void_total, drag_total;
669
670 // Now we compute void_total and drag_total for each census
671 // After the program has finished, the void_total field of
672 // each census contains the count of words that were *created*
673 // in this era and were eventually void. Conversely, if a
674 // void closure was destroyed in this era, it will be
675 // represented by a negative count of words in void_total.
676 //
677 // To get the count of live words that are void at each
678 // census, just propagate the void_total count forwards:
679
680 void_total = 0;
681 drag_total = 0;
682 for (t = 1; t < era; t++) { // note: start at 1, not 0
683 void_total += censuses[t].void_total;
684 drag_total += censuses[t].drag_total;
685 censuses[t].void_total = void_total;
686 censuses[t].drag_total = drag_total;
687
688 ASSERT( censuses[t].void_total <= censuses[t].not_used );
689 // should be true because: void_total is the count of
690 // live words that are void at this census, which *must*
691 // be less than the number of live words that have not
692 // been used yet.
693
694 ASSERT( censuses[t].drag_total <= censuses[t].used );
695 // similar reasoning as above.
696 }
697
698 return;
699 }
700
701 // otherwise... we're doing a heap profile that is restricted to
702 // some combination of lag, drag, void or use. We've kept all the
703 // census info for all censuses so far, but we still need to
704 // aggregate the counters forwards.
705
706 arena = newArena();
707 acc = allocHashTable();
708 ctrs = NULL;
709
710 for (t = 1; t < era; t++) {
711
712 // first look through all the counters we're aggregating
713 for (c = ctrs; c != NULL; c = c->next) {
714 // if one of the totals is non-zero, then this closure
715 // type must be present in the heap at this census time...
716 d = lookupHashTable(censuses[t].hash, (StgWord)c->identity);
717
718 if (d == NULL) {
719 // if this closure identity isn't present in the
720 // census for this time period, then our running
721 // totals *must* be zero.
722 ASSERT(c->c.ldv.void_total == 0 && c->c.ldv.drag_total == 0);
723
724 // debugCCS(c->identity);
725 // debugBelch(" census=%d void_total=%d drag_total=%d\n",
726 // t, c->c.ldv.void_total, c->c.ldv.drag_total);
727 } else {
728 d->c.ldv.void_total += c->c.ldv.void_total;
729 d->c.ldv.drag_total += c->c.ldv.drag_total;
730 c->c.ldv.void_total = d->c.ldv.void_total;
731 c->c.ldv.drag_total = d->c.ldv.drag_total;
732
733 ASSERT( c->c.ldv.void_total >= 0 );
734 ASSERT( c->c.ldv.drag_total >= 0 );
735 }
736 }
737
738 // now look through the counters in this census to find new ones
739 for (c = censuses[t].ctrs; c != NULL; c = c->next) {
740 d = lookupHashTable(acc, (StgWord)c->identity);
741 if (d == NULL) {
742 d = arenaAlloc( arena, sizeof(counter) );
743 initLDVCtr(d);
744 insertHashTable( acc, (StgWord)c->identity, d );
745 d->identity = c->identity;
746 d->next = ctrs;
747 ctrs = d;
748 d->c.ldv.void_total = c->c.ldv.void_total;
749 d->c.ldv.drag_total = c->c.ldv.drag_total;
750 }
751 ASSERT( c->c.ldv.void_total >= 0 );
752 ASSERT( c->c.ldv.drag_total >= 0 );
753 }
754 }
755
756 freeHashTable(acc, NULL);
757 arenaFree(arena);
758 }
759 #endif
760
761 /* -----------------------------------------------------------------------------
762 * Print out the results of a heap census.
763 * -------------------------------------------------------------------------- */
764 static void
765 dumpCensus( Census *census )
766 {
767 counter *ctr;
768 int count;
769
770 printSample(rtsTrue, census->time);
771
772 #ifdef PROFILING
773 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV) {
774 fprintf(hp_file, "VOID\t%lu\n", (unsigned long)(census->void_total) * sizeof(W_));
775 fprintf(hp_file, "LAG\t%lu\n",
776 (unsigned long)(census->not_used - census->void_total) * sizeof(W_));
777 fprintf(hp_file, "USE\t%lu\n",
778 (unsigned long)(census->used - census->drag_total) * sizeof(W_));
779 fprintf(hp_file, "INHERENT_USE\t%lu\n",
780 (unsigned long)(census->prim) * sizeof(W_));
781 fprintf(hp_file, "DRAG\t%lu\n",
782 (unsigned long)(census->drag_total) * sizeof(W_));
783 printSample(rtsFalse, census->time);
784 return;
785 }
786 #endif
787
788 for (ctr = census->ctrs; ctr != NULL; ctr = ctr->next) {
789
790 #ifdef PROFILING
791 if (RtsFlags.ProfFlags.bioSelector != NULL) {
792 count = 0;
793 if (strMatchesSelector("lag", RtsFlags.ProfFlags.bioSelector))
794 count += ctr->c.ldv.not_used - ctr->c.ldv.void_total;
795 if (strMatchesSelector("drag", RtsFlags.ProfFlags.bioSelector))
796 count += ctr->c.ldv.drag_total;
797 if (strMatchesSelector("void", RtsFlags.ProfFlags.bioSelector))
798 count += ctr->c.ldv.void_total;
799 if (strMatchesSelector("use", RtsFlags.ProfFlags.bioSelector))
800 count += ctr->c.ldv.used - ctr->c.ldv.drag_total;
801 } else
802 #endif
803 {
804 count = ctr->c.resid;
805 }
806
807 ASSERT( count >= 0 );
808
809 if (count == 0) continue;
810
811 #ifdef DEBUG_HEAP_PROF
812 switch (RtsFlags.ProfFlags.doHeapProfile) {
813 case HEAP_BY_INFOPTR:
814 fprintf(hp_file, "%s", lookupGHCName(ctr->identity));
815 break;
816 case HEAP_BY_CLOSURE_TYPE:
817 fprintf(hp_file, "%s", (char *)ctr->identity);
818 break;
819 }
820 #endif
821
822 #ifdef PROFILING
823 switch (RtsFlags.ProfFlags.doHeapProfile) {
824 case HEAP_BY_CCS:
825 fprint_ccs(hp_file, (CostCentreStack *)ctr->identity, RtsFlags.ProfFlags.ccsLength);
826 break;
827 case HEAP_BY_MOD:
828 case HEAP_BY_DESCR:
829 case HEAP_BY_TYPE:
830 fprintf(hp_file, "%s", (char *)ctr->identity);
831 break;
832 case HEAP_BY_RETAINER:
833 {
834 RetainerSet *rs = (RetainerSet *)ctr->identity;
835
836 // it might be the distinguished retainer set rs_MANY:
837 if (rs == &rs_MANY) {
838 fprintf(hp_file, "MANY");
839 break;
840 }
841
842 // Mark this retainer set by negating its id, because it
843 // has appeared in at least one census. We print the
844 // values of all such retainer sets into the log file at
845 // the end. A retainer set may exist but not feature in
846 // any censuses if it arose as the intermediate retainer
847 // set for some closure during retainer set calculation.
848 if (rs->id > 0)
849 rs->id = -(rs->id);
850
851 // report in the unit of bytes: * sizeof(StgWord)
852 printRetainerSetShort(hp_file, rs);
853 break;
854 }
855 default:
856 barf("dumpCensus; doHeapProfile");
857 }
858 #endif
859
860 fprintf(hp_file, "\t%lu\n", (unsigned long)count * sizeof(W_));
861 }
862
863 printSample(rtsFalse, census->time);
864 }
865
866 /* -----------------------------------------------------------------------------
867 * Code to perform a heap census.
868 * -------------------------------------------------------------------------- */
869 static void
870 heapCensusChain( Census *census, bdescr *bd )
871 {
872 StgPtr p;
873 StgInfoTable *info;
874 void *identity;
875 nat size;
876 counter *ctr;
877 nat real_size;
878 rtsBool prim;
879
880 for (; bd != NULL; bd = bd->link) {
881
882 // HACK: ignore pinned blocks, because they contain gaps.
883 // It's not clear exactly what we'd like to do here, since we
884 // can't tell which objects in the block are actually alive.
885 // Perhaps the whole block should be counted as SYSTEM memory.
886 if (bd->flags & BF_PINNED) {
887 continue;
888 }
889
890 p = bd->start;
891 while (p < bd->free) {
892 info = get_itbl((StgClosure *)p);
893 prim = rtsFalse;
894
895 switch (info->type) {
896
897 case THUNK:
898 size = thunk_sizeW_fromITBL(info);
899 break;
900
901 case THUNK_1_1:
902 case THUNK_0_2:
903 case THUNK_2_0:
904 size = sizeofW(StgThunkHeader) + 2;
905 break;
906
907 case THUNK_1_0:
908 case THUNK_0_1:
909 case THUNK_SELECTOR:
910 size = sizeofW(StgThunkHeader) + 1;
911 break;
912
913 case CONSTR:
914 case FUN:
915 case IND_PERM:
916 case IND_OLDGEN:
917 case IND_OLDGEN_PERM:
918 case CAF_BLACKHOLE:
919 case SE_CAF_BLACKHOLE:
920 case SE_BLACKHOLE:
921 case BLACKHOLE:
922 case FUN_1_0:
923 case FUN_0_1:
924 case FUN_1_1:
925 case FUN_0_2:
926 case FUN_2_0:
927 case CONSTR_1_0:
928 case CONSTR_0_1:
929 case CONSTR_1_1:
930 case CONSTR_0_2:
931 case CONSTR_2_0:
932 size = sizeW_fromITBL(info);
933 break;
934
935 case IND:
936 // Special case/Delicate Hack: INDs don't normally
937 // appear, since we're doing this heap census right
938 // after GC. However, GarbageCollect() also does
939 // resurrectThreads(), which can update some
940 // blackholes when it calls raiseAsync() on the
941 // resurrected threads. So we know that any IND will
942 // be the size of a BLACKHOLE.
943 size = BLACKHOLE_sizeW();
944 break;
945
946 case BCO:
947 prim = rtsTrue;
948 size = bco_sizeW((StgBCO *)p);
949 break;
950
951 case MVAR:
952 case WEAK:
953 case STABLE_NAME:
954 case MUT_VAR_CLEAN:
955 case MUT_VAR_DIRTY:
956 prim = rtsTrue;
957 size = sizeW_fromITBL(info);
958 break;
959
960 case AP:
961 size = ap_sizeW((StgAP *)p);
962 break;
963
964 case PAP:
965 size = pap_sizeW((StgPAP *)p);
966 break;
967
968 case AP_STACK:
969 size = ap_stack_sizeW((StgAP_STACK *)p);
970 break;
971
972 case ARR_WORDS:
973 prim = rtsTrue;
974 size = arr_words_sizeW(stgCast(StgArrWords*,p));
975 break;
976
977 case MUT_ARR_PTRS_CLEAN:
978 case MUT_ARR_PTRS_DIRTY:
979 case MUT_ARR_PTRS_FROZEN:
980 case MUT_ARR_PTRS_FROZEN0:
981 prim = rtsTrue;
982 size = mut_arr_ptrs_sizeW((StgMutArrPtrs *)p);
983 break;
984
985 case TSO:
986 prim = rtsTrue;
987 #ifdef DEBUG_HEAP_PROF
988 size = tso_sizeW((StgTSO *)p);
989 break;
990 #else
991 if (RtsFlags.ProfFlags.includeTSOs) {
992 size = tso_sizeW((StgTSO *)p);
993 break;
994 } else {
995 // Skip this TSO and move on to the next object
996 p += tso_sizeW((StgTSO *)p);
997 continue;
998 }
999 #endif
1000
1001 case TREC_HEADER:
1002 prim = rtsTrue;
1003 size = sizeofW(StgTRecHeader);
1004 break;
1005
1006 case TVAR_WATCH_QUEUE:
1007 prim = rtsTrue;
1008 size = sizeofW(StgTVarWatchQueue);
1009 break;
1010
1011 case INVARIANT_CHECK_QUEUE:
1012 prim = rtsTrue;
1013 size = sizeofW(StgInvariantCheckQueue);
1014 break;
1015
1016 case ATOMIC_INVARIANT:
1017 prim = rtsTrue;
1018 size = sizeofW(StgAtomicInvariant);
1019 break;
1020
1021 case TVAR:
1022 prim = rtsTrue;
1023 size = sizeofW(StgTVar);
1024 break;
1025
1026 case TREC_CHUNK:
1027 prim = rtsTrue;
1028 size = sizeofW(StgTRecChunk);
1029 break;
1030
1031 default:
1032 barf("heapCensus, unknown object: %d", info->type);
1033 }
1034
1035 identity = NULL;
1036
1037 #ifdef DEBUG_HEAP_PROF
1038 real_size = size;
1039 #else
1040 // subtract the profiling overhead
1041 real_size = size - sizeofW(StgProfHeader);
1042 #endif
1043
1044 if (closureSatisfiesConstraints((StgClosure*)p)) {
1045 #ifdef PROFILING
1046 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV) {
1047 if (prim)
1048 census->prim += real_size;
1049 else if ((LDVW(p) & LDV_STATE_MASK) == LDV_STATE_CREATE)
1050 census->not_used += real_size;
1051 else
1052 census->used += real_size;
1053 } else
1054 #endif
1055 {
1056 identity = closureIdentity((StgClosure *)p);
1057
1058 if (identity != NULL) {
1059 ctr = lookupHashTable( census->hash, (StgWord)identity );
1060 if (ctr != NULL) {
1061 #ifdef PROFILING
1062 if (RtsFlags.ProfFlags.bioSelector != NULL) {
1063 if (prim)
1064 ctr->c.ldv.prim += real_size;
1065 else if ((LDVW(p) & LDV_STATE_MASK) == LDV_STATE_CREATE)
1066 ctr->c.ldv.not_used += real_size;
1067 else
1068 ctr->c.ldv.used += real_size;
1069 } else
1070 #endif
1071 {
1072 ctr->c.resid += real_size;
1073 }
1074 } else {
1075 ctr = arenaAlloc( census->arena, sizeof(counter) );
1076 initLDVCtr(ctr);
1077 insertHashTable( census->hash, (StgWord)identity, ctr );
1078 ctr->identity = identity;
1079 ctr->next = census->ctrs;
1080 census->ctrs = ctr;
1081
1082 #ifdef PROFILING
1083 if (RtsFlags.ProfFlags.bioSelector != NULL) {
1084 if (prim)
1085 ctr->c.ldv.prim = real_size;
1086 else if ((LDVW(p) & LDV_STATE_MASK) == LDV_STATE_CREATE)
1087 ctr->c.ldv.not_used = real_size;
1088 else
1089 ctr->c.ldv.used = real_size;
1090 } else
1091 #endif
1092 {
1093 ctr->c.resid = real_size;
1094 }
1095 }
1096 }
1097 }
1098 }
1099
1100 p += size;
1101 }
1102 }
1103 }
1104
1105 void
1106 heapCensus( void )
1107 {
1108 nat g, s;
1109 Census *census;
1110
1111 census = &censuses[era];
1112 census->time = mut_user_time();
1113
1114 // calculate retainer sets if necessary
1115 #ifdef PROFILING
1116 if (doingRetainerProfiling()) {
1117 retainerProfile();
1118 }
1119 #endif
1120
1121 #ifdef PROFILING
1122 stat_startHeapCensus();
1123 #endif
1124
1125 // Traverse the heap, collecting the census info
1126
1127 // First the small_alloc_list: we have to fix the free pointer at
1128 // the end by calling tidyAllocatedLists() first.
1129 tidyAllocateLists();
1130 heapCensusChain( census, small_alloc_list );
1131
1132 // Now traverse the heap in each generation/step.
1133 if (RtsFlags.GcFlags.generations == 1) {
1134 heapCensusChain( census, g0s0->blocks );
1135 } else {
1136 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
1137 for (s = 0; s < generations[g].n_steps; s++) {
1138 heapCensusChain( census, generations[g].steps[s].blocks );
1139 // Are we interested in large objects? might be
1140 // confusing to include the stack in a heap profile.
1141 heapCensusChain( census, generations[g].steps[s].large_objects );
1142 }
1143 }
1144 }
1145
1146 // dump out the census info
1147 #ifdef PROFILING
1148 // We can't generate any info for LDV profiling until
1149 // the end of the run...
1150 if (!doingLDVProfiling())
1151 dumpCensus( census );
1152 #else
1153 dumpCensus( census );
1154 #endif
1155
1156
1157 // free our storage, unless we're keeping all the census info for
1158 // future restriction by biography.
1159 #ifdef PROFILING
1160 if (RtsFlags.ProfFlags.bioSelector == NULL)
1161 #endif
1162 {
1163 freeHashTable( census->hash, NULL/* don't free the elements */ );
1164 arenaFree( census->arena );
1165 census->hash = NULL;
1166 census->arena = NULL;
1167 }
1168
1169 // we're into the next time period now
1170 nextEra();
1171
1172 #ifdef PROFILING
1173 stat_endHeapCensus();
1174 #endif
1175 }
1176
1177 #endif /* PROFILING || DEBUG_HEAP_PROF */
1178