Linker: some extra debugging / logging
[ghc.git] / rts / ProfHeap.c
1 /* ----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 1998-2003
4 *
5 * Support for heap profiling
6 *
7 * --------------------------------------------------------------------------*/
8
9 #include "PosixSource.h"
10 #include "Rts.h"
11
12 #include "Capability.h"
13 #include "RtsFlags.h"
14 #include "RtsUtils.h"
15 #include "Profiling.h"
16 #include "ProfHeap.h"
17 #include "Stats.h"
18 #include "Hash.h"
19 #include "RetainerProfile.h"
20 #include "LdvProfile.h"
21 #include "Arena.h"
22 #include "Printer.h"
23 #include "sm/GCThread.h"
24
25 #include <string.h>
26
27 /* -----------------------------------------------------------------------------
28 * era stores the current time period. It is the same as the
29 * number of censuses that have been performed.
30 *
31 * RESTRICTION:
32 * era must be no longer than LDV_SHIFT (15 or 30) bits.
33 * Invariants:
34 * era is initialized to 1 in initHeapProfiling().
35 *
36 * max_era is initialized to 2^LDV_SHIFT in initHeapProfiling().
37 * When era reaches max_era, the profiling stops because a closure can
38 * store only up to (max_era - 1) as its creation or last use time.
39 * -------------------------------------------------------------------------- */
40 unsigned int era;
41 static uint32_t max_era;
42
43 /* -----------------------------------------------------------------------------
44 * Counters
45 *
46 * For most heap profiles each closure identity gets a simple count
47 * of live words in the heap at each census. However, if we're
48 * selecting by biography, then we have to keep the various
49 * lag/drag/void counters for each identity.
50 * -------------------------------------------------------------------------- */
51 typedef struct _counter {
52 const void *identity;
53 union {
54 ssize_t resid;
55 struct {
56 // Total sizes of:
57 ssize_t prim; // 'inherently used' closures
58 ssize_t not_used; // 'never used' closures
59 ssize_t used; // 'used at least once' closures
60 ssize_t void_total; // 'destroyed without being used' closures
61 ssize_t drag_total; // 'used at least once and waiting to die'
62 } ldv;
63 } c;
64 struct _counter *next;
65 } counter;
66
67 STATIC_INLINE void
68 initLDVCtr( counter *ctr )
69 {
70 ctr->c.ldv.prim = 0;
71 ctr->c.ldv.not_used = 0;
72 ctr->c.ldv.used = 0;
73 ctr->c.ldv.void_total = 0;
74 ctr->c.ldv.drag_total = 0;
75 }
76
77 typedef struct {
78 double time; // the time in MUT time when the census is made
79 HashTable * hash;
80 counter * ctrs;
81 Arena * arena;
82
83 // for LDV profiling, when just displaying by LDV
84 ssize_t prim;
85 ssize_t not_used;
86 ssize_t used;
87 ssize_t void_total;
88 ssize_t drag_total;
89 } Census;
90
91 static Census *censuses = NULL;
92 static uint32_t n_censuses = 0;
93
94 #ifdef PROFILING
95 static void aggregateCensusInfo( void );
96 #endif
97
98 static void dumpCensus( Census *census );
99
100 static rtsBool closureSatisfiesConstraints( const StgClosure* p );
101
102 /* ----------------------------------------------------------------------------
103 * Find the "closure identity", which is a unique pointer representing
104 * the band to which this closure's heap space is attributed in the
105 * heap profile.
106 * ------------------------------------------------------------------------- */
107 static const void *
108 closureIdentity( const StgClosure *p )
109 {
110 switch (RtsFlags.ProfFlags.doHeapProfile) {
111
112 #ifdef PROFILING
113 case HEAP_BY_CCS:
114 return p->header.prof.ccs;
115 case HEAP_BY_MOD:
116 return p->header.prof.ccs->cc->module;
117 case HEAP_BY_DESCR:
118 return GET_PROF_DESC(get_itbl(p));
119 case HEAP_BY_TYPE:
120 return GET_PROF_TYPE(get_itbl(p));
121 case HEAP_BY_RETAINER:
122 // AFAIK, the only closures in the heap which might not have a
123 // valid retainer set are DEAD_WEAK closures.
124 if (isRetainerSetFieldValid(p))
125 return retainerSetOf(p);
126 else
127 return NULL;
128
129 #else
130 case HEAP_BY_CLOSURE_TYPE:
131 {
132 const StgInfoTable *info;
133 info = get_itbl(p);
134 switch (info->type) {
135 case CONSTR:
136 case CONSTR_1_0:
137 case CONSTR_0_1:
138 case CONSTR_2_0:
139 case CONSTR_1_1:
140 case CONSTR_0_2:
141 case CONSTR_STATIC:
142 case CONSTR_NOCAF_STATIC:
143 return GET_CON_DESC(itbl_to_con_itbl(info));
144 default:
145 return closure_type_names[info->type];
146 }
147 }
148
149 #endif
150 default:
151 barf("closureIdentity");
152 }
153 }
154
155 /* --------------------------------------------------------------------------
156 * Profiling type predicates
157 * ----------------------------------------------------------------------- */
158 #ifdef PROFILING
159 STATIC_INLINE rtsBool
160 doingLDVProfiling( void )
161 {
162 return (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV
163 || RtsFlags.ProfFlags.bioSelector != NULL);
164 }
165
166 rtsBool
167 doingRetainerProfiling( void )
168 {
169 return (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER
170 || RtsFlags.ProfFlags.retainerSelector != NULL);
171 }
172 #endif /* PROFILING */
173
174 // Precesses a closure 'c' being destroyed whose size is 'size'.
175 // Make sure that LDV_recordDead() is not invoked on 'inherently used' closures
176 // such as TSO; they should not be involved in computing dragNew or voidNew.
177 //
178 // Even though era is checked in both LdvCensusForDead() and
179 // LdvCensusKillAll(), we still need to make sure that era is > 0 because
180 // LDV_recordDead() may be called from elsewhere in the runtime system. E.g.,
181 // when a thunk is replaced by an indirection object.
182
183 #ifdef PROFILING
184 void
185 LDV_recordDead( const StgClosure *c, uint32_t size )
186 {
187 const void *id;
188 uint32_t t;
189 counter *ctr;
190
191 if (era > 0 && closureSatisfiesConstraints(c)) {
192 size -= sizeofW(StgProfHeader);
193 ASSERT(LDVW(c) != 0);
194 if ((LDVW((c)) & LDV_STATE_MASK) == LDV_STATE_CREATE) {
195 t = (LDVW((c)) & LDV_CREATE_MASK) >> LDV_SHIFT;
196 if (t < era) {
197 if (RtsFlags.ProfFlags.bioSelector == NULL) {
198 censuses[t].void_total += size;
199 censuses[era].void_total -= size;
200 ASSERT(censuses[t].void_total < censuses[t].not_used);
201 } else {
202 id = closureIdentity(c);
203 ctr = lookupHashTable(censuses[t].hash, (StgWord)id);
204 ASSERT( ctr != NULL );
205 ctr->c.ldv.void_total += size;
206 ctr = lookupHashTable(censuses[era].hash, (StgWord)id);
207 if (ctr == NULL) {
208 ctr = arenaAlloc(censuses[era].arena, sizeof(counter));
209 initLDVCtr(ctr);
210 insertHashTable(censuses[era].hash, (StgWord)id, ctr);
211 ctr->identity = id;
212 ctr->next = censuses[era].ctrs;
213 censuses[era].ctrs = ctr;
214 }
215 ctr->c.ldv.void_total -= size;
216 }
217 }
218 } else {
219 t = LDVW((c)) & LDV_LAST_MASK;
220 if (t + 1 < era) {
221 if (RtsFlags.ProfFlags.bioSelector == NULL) {
222 censuses[t+1].drag_total += size;
223 censuses[era].drag_total -= size;
224 } else {
225 const void *id;
226 id = closureIdentity(c);
227 ctr = lookupHashTable(censuses[t+1].hash, (StgWord)id);
228 ASSERT( ctr != NULL );
229 ctr->c.ldv.drag_total += size;
230 ctr = lookupHashTable(censuses[era].hash, (StgWord)id);
231 if (ctr == NULL) {
232 ctr = arenaAlloc(censuses[era].arena, sizeof(counter));
233 initLDVCtr(ctr);
234 insertHashTable(censuses[era].hash, (StgWord)id, ctr);
235 ctr->identity = id;
236 ctr->next = censuses[era].ctrs;
237 censuses[era].ctrs = ctr;
238 }
239 ctr->c.ldv.drag_total -= size;
240 }
241 }
242 }
243 }
244 }
245 #endif
246
247 /* --------------------------------------------------------------------------
248 * Initialize censuses[era];
249 * ----------------------------------------------------------------------- */
250
251 STATIC_INLINE void
252 initEra(Census *census)
253 {
254 census->hash = allocHashTable();
255 census->ctrs = NULL;
256 census->arena = newArena();
257
258 census->not_used = 0;
259 census->used = 0;
260 census->prim = 0;
261 census->void_total = 0;
262 census->drag_total = 0;
263 }
264
265 STATIC_INLINE void
266 freeEra(Census *census)
267 {
268 arenaFree(census->arena);
269 freeHashTable(census->hash, NULL);
270 }
271
272 /* --------------------------------------------------------------------------
273 * Increases era by 1 and initialize census[era].
274 * Reallocates gi[] and increases its size if needed.
275 * ----------------------------------------------------------------------- */
276
277 static void
278 nextEra( void )
279 {
280 #ifdef PROFILING
281 if (doingLDVProfiling()) {
282 era++;
283
284 if (era == max_era) {
285 errorBelch("Maximum number of censuses reached.");
286 if (rtsConfig.rts_opts_suggestions == rtsTrue) {
287 if (rtsConfig.rts_opts_enabled == RtsOptsAll) {
288 errorBelch("Use `+RTS -i' to reduce censuses.");
289 } else {
290 errorBelch("Relink with -rtsopts and "
291 "use `+RTS -i' to reduce censuses.");
292 }
293 }
294 stg_exit(EXIT_FAILURE);
295 }
296
297 if (era == n_censuses) {
298 n_censuses *= 2;
299 censuses = stgReallocBytes(censuses, sizeof(Census) * n_censuses,
300 "nextEra");
301 }
302 }
303 #endif /* PROFILING */
304
305 initEra( &censuses[era] );
306 }
307
308 /* ----------------------------------------------------------------------------
309 * Heap profiling by info table
310 * ------------------------------------------------------------------------- */
311
312 #if !defined(PROFILING)
313 FILE *hp_file;
314 static char *hp_filename;
315
316 void freeProfiling (void)
317 {
318 }
319
320 void initProfiling (void)
321 {
322 char *prog;
323
324 prog = stgMallocBytes(strlen(prog_name) + 1, "initProfiling2");
325 strcpy(prog, prog_name);
326 #ifdef mingw32_HOST_OS
327 // on Windows, drop the .exe suffix if there is one
328 {
329 char *suff;
330 suff = strrchr(prog,'.');
331 if (suff != NULL && !strcmp(suff,".exe")) {
332 *suff = '\0';
333 }
334 }
335 #endif
336
337 if (RtsFlags.ProfFlags.doHeapProfile) {
338 /* Initialise the log file name */
339 hp_filename = stgMallocBytes(strlen(prog) + 6, "hpFileName");
340 sprintf(hp_filename, "%s.hp", prog);
341
342 /* open the log file */
343 if ((hp_file = fopen(hp_filename, "w")) == NULL) {
344 debugBelch("Can't open profiling report file %s\n",
345 hp_filename);
346 RtsFlags.ProfFlags.doHeapProfile = 0;
347 stgFree(prog);
348 return;
349 }
350 }
351
352 stgFree(prog);
353
354 initHeapProfiling();
355 }
356
357 void endProfiling( void )
358 {
359 endHeapProfiling();
360 }
361 #endif /* !PROFILING */
362
363 static void
364 printSample(rtsBool beginSample, StgDouble sampleValue)
365 {
366 fprintf(hp_file, "%s %f\n",
367 (beginSample ? "BEGIN_SAMPLE" : "END_SAMPLE"),
368 sampleValue);
369 if (!beginSample) {
370 fflush(hp_file);
371 }
372 }
373
374 /* --------------------------------------------------------------------------
375 * Initialize the heap profilier
376 * ----------------------------------------------------------------------- */
377 uint32_t
378 initHeapProfiling(void)
379 {
380 if (! RtsFlags.ProfFlags.doHeapProfile) {
381 return 0;
382 }
383
384 #ifdef PROFILING
385 if (doingLDVProfiling() && doingRetainerProfiling()) {
386 errorBelch("cannot mix -hb and -hr");
387 stg_exit(EXIT_FAILURE);
388 }
389 #endif
390
391 // we only count eras if we're doing LDV profiling. Otherwise era
392 // is fixed at zero.
393 #ifdef PROFILING
394 if (doingLDVProfiling()) {
395 era = 1;
396 } else
397 #endif
398 {
399 era = 0;
400 }
401
402 // max_era = 2^LDV_SHIFT
403 max_era = 1 << LDV_SHIFT;
404
405 n_censuses = 32;
406 censuses = stgMallocBytes(sizeof(Census) * n_censuses, "initHeapProfiling");
407
408 initEra( &censuses[era] );
409
410 /* initProfilingLogFile(); */
411 fprintf(hp_file, "JOB \"%s", prog_name);
412
413 #ifdef PROFILING
414 {
415 int count;
416 for(count = 1; count < prog_argc; count++)
417 fprintf(hp_file, " %s", prog_argv[count]);
418 fprintf(hp_file, " +RTS");
419 for(count = 0; count < rts_argc; count++)
420 fprintf(hp_file, " %s", rts_argv[count]);
421 }
422 #endif /* PROFILING */
423
424 fprintf(hp_file, "\"\n" );
425
426 fprintf(hp_file, "DATE \"%s\"\n", time_str());
427
428 fprintf(hp_file, "SAMPLE_UNIT \"seconds\"\n");
429 fprintf(hp_file, "VALUE_UNIT \"bytes\"\n");
430
431 printSample(rtsTrue, 0);
432 printSample(rtsFalse, 0);
433
434 #ifdef PROFILING
435 if (doingRetainerProfiling()) {
436 initRetainerProfiling();
437 }
438 #endif
439
440 return 0;
441 }
442
443 void
444 endHeapProfiling(void)
445 {
446 StgDouble seconds;
447
448 if (! RtsFlags.ProfFlags.doHeapProfile) {
449 return;
450 }
451
452 #ifdef PROFILING
453 if (doingRetainerProfiling()) {
454 endRetainerProfiling();
455 }
456 #endif
457
458 #ifdef PROFILING
459 if (doingLDVProfiling()) {
460 uint32_t t;
461 LdvCensusKillAll();
462 aggregateCensusInfo();
463 for (t = 1; t < era; t++) {
464 dumpCensus( &censuses[t] );
465 }
466 }
467 #endif
468
469 #ifdef PROFILING
470 if (doingLDVProfiling()) {
471 uint32_t t;
472 if (RtsFlags.ProfFlags.bioSelector != NULL) {
473 for (t = 1; t <= era; t++) {
474 freeEra( &censuses[t] );
475 }
476 } else {
477 freeEra( &censuses[era] );
478 }
479 } else {
480 freeEra( &censuses[0] );
481 }
482 #else
483 freeEra( &censuses[0] );
484 #endif
485
486 stgFree(censuses);
487
488 seconds = mut_user_time();
489 printSample(rtsTrue, seconds);
490 printSample(rtsFalse, seconds);
491 fclose(hp_file);
492 }
493
494
495
496 #ifdef PROFILING
497 static size_t
498 buf_append(char *p, const char *q, char *end)
499 {
500 int m;
501
502 for (m = 0; p < end; p++, q++, m++) {
503 *p = *q;
504 if (*q == '\0') { break; }
505 }
506 return m;
507 }
508
509 static void
510 fprint_ccs(FILE *fp, CostCentreStack *ccs, uint32_t max_length)
511 {
512 char buf[max_length+1], *p, *buf_end;
513
514 // MAIN on its own gets printed as "MAIN", otherwise we ignore MAIN.
515 if (ccs == CCS_MAIN) {
516 fprintf(fp, "MAIN");
517 return;
518 }
519
520 fprintf(fp, "(%" FMT_Int ")", ccs->ccsID);
521
522 p = buf;
523 buf_end = buf + max_length + 1;
524
525 // keep printing components of the stack until we run out of space
526 // in the buffer. If we run out of space, end with "...".
527 for (; ccs != NULL && ccs != CCS_MAIN; ccs = ccs->prevStack) {
528
529 // CAF cost centres print as M.CAF, but we leave the module
530 // name out of all the others to save space.
531 if (!strcmp(ccs->cc->label,"CAF")) {
532 p += buf_append(p, ccs->cc->module, buf_end);
533 p += buf_append(p, ".CAF", buf_end);
534 } else {
535 p += buf_append(p, ccs->cc->label, buf_end);
536 if (ccs->prevStack != NULL && ccs->prevStack != CCS_MAIN) {
537 p += buf_append(p, "/", buf_end);
538 }
539 }
540
541 if (p >= buf_end) {
542 sprintf(buf+max_length-4, "...");
543 break;
544 }
545 }
546 fprintf(fp, "%s", buf);
547 }
548
549 rtsBool
550 strMatchesSelector( const char* str, const char* sel )
551 {
552 const char* p;
553 // debugBelch("str_matches_selector %s %s\n", str, sel);
554 while (1) {
555 // Compare str against wherever we've got to in sel.
556 p = str;
557 while (*p != '\0' && *sel != ',' && *sel != '\0' && *p == *sel) {
558 p++; sel++;
559 }
560 // Match if all of str used and have reached the end of a sel fragment.
561 if (*p == '\0' && (*sel == ',' || *sel == '\0'))
562 return rtsTrue;
563
564 // No match. Advance sel to the start of the next elem.
565 while (*sel != ',' && *sel != '\0') sel++;
566 if (*sel == ',') sel++;
567
568 /* Run out of sel ?? */
569 if (*sel == '\0') return rtsFalse;
570 }
571 }
572
573 #endif /* PROFILING */
574
575 /* -----------------------------------------------------------------------------
576 * Figure out whether a closure should be counted in this census, by
577 * testing against all the specified constraints.
578 * -------------------------------------------------------------------------- */
579 static rtsBool
580 closureSatisfiesConstraints( const StgClosure* p )
581 {
582 #if !defined(PROFILING)
583 (void)p; /* keep gcc -Wall happy */
584 return rtsTrue;
585 #else
586 rtsBool b;
587
588 // The CCS has a selected field to indicate whether this closure is
589 // deselected by not being mentioned in the module, CC, or CCS
590 // selectors.
591 if (!p->header.prof.ccs->selected) {
592 return rtsFalse;
593 }
594
595 if (RtsFlags.ProfFlags.descrSelector) {
596 b = strMatchesSelector( (GET_PROF_DESC(get_itbl((StgClosure *)p))),
597 RtsFlags.ProfFlags.descrSelector );
598 if (!b) return rtsFalse;
599 }
600 if (RtsFlags.ProfFlags.typeSelector) {
601 b = strMatchesSelector( (GET_PROF_TYPE(get_itbl((StgClosure *)p))),
602 RtsFlags.ProfFlags.typeSelector );
603 if (!b) return rtsFalse;
604 }
605 if (RtsFlags.ProfFlags.retainerSelector) {
606 RetainerSet *rs;
607 uint32_t i;
608 // We must check that the retainer set is valid here. One
609 // reason it might not be valid is if this closure is a
610 // a newly deceased weak pointer (i.e. a DEAD_WEAK), since
611 // these aren't reached by the retainer profiler's traversal.
612 if (isRetainerSetFieldValid((StgClosure *)p)) {
613 rs = retainerSetOf((StgClosure *)p);
614 if (rs != NULL) {
615 for (i = 0; i < rs->num; i++) {
616 b = strMatchesSelector( rs->element[i]->cc->label,
617 RtsFlags.ProfFlags.retainerSelector );
618 if (b) return rtsTrue;
619 }
620 }
621 }
622 return rtsFalse;
623 }
624 return rtsTrue;
625 #endif /* PROFILING */
626 }
627
628 /* -----------------------------------------------------------------------------
629 * Aggregate the heap census info for biographical profiling
630 * -------------------------------------------------------------------------- */
631 #ifdef PROFILING
632 static void
633 aggregateCensusInfo( void )
634 {
635 HashTable *acc;
636 uint32_t t;
637 counter *c, *d, *ctrs;
638 Arena *arena;
639
640 if (!doingLDVProfiling()) return;
641
642 // Aggregate the LDV counters when displaying by biography.
643 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV) {
644 long void_total, drag_total;
645
646 // Now we compute void_total and drag_total for each census
647 // After the program has finished, the void_total field of
648 // each census contains the count of words that were *created*
649 // in this era and were eventually void. Conversely, if a
650 // void closure was destroyed in this era, it will be
651 // represented by a negative count of words in void_total.
652 //
653 // To get the count of live words that are void at each
654 // census, just propagate the void_total count forwards:
655
656 void_total = 0;
657 drag_total = 0;
658 for (t = 1; t < era; t++) { // note: start at 1, not 0
659 void_total += censuses[t].void_total;
660 drag_total += censuses[t].drag_total;
661 censuses[t].void_total = void_total;
662 censuses[t].drag_total = drag_total;
663
664 ASSERT( censuses[t].void_total <= censuses[t].not_used );
665 // should be true because: void_total is the count of
666 // live words that are void at this census, which *must*
667 // be less than the number of live words that have not
668 // been used yet.
669
670 ASSERT( censuses[t].drag_total <= censuses[t].used );
671 // similar reasoning as above.
672 }
673
674 return;
675 }
676
677 // otherwise... we're doing a heap profile that is restricted to
678 // some combination of lag, drag, void or use. We've kept all the
679 // census info for all censuses so far, but we still need to
680 // aggregate the counters forwards.
681
682 arena = newArena();
683 acc = allocHashTable();
684 ctrs = NULL;
685
686 for (t = 1; t < era; t++) {
687
688 // first look through all the counters we're aggregating
689 for (c = ctrs; c != NULL; c = c->next) {
690 // if one of the totals is non-zero, then this closure
691 // type must be present in the heap at this census time...
692 d = lookupHashTable(censuses[t].hash, (StgWord)c->identity);
693
694 if (d == NULL) {
695 // if this closure identity isn't present in the
696 // census for this time period, then our running
697 // totals *must* be zero.
698 ASSERT(c->c.ldv.void_total == 0 && c->c.ldv.drag_total == 0);
699
700 // debugCCS(c->identity);
701 // debugBelch(" census=%d void_total=%d drag_total=%d\n",
702 // t, c->c.ldv.void_total, c->c.ldv.drag_total);
703 } else {
704 d->c.ldv.void_total += c->c.ldv.void_total;
705 d->c.ldv.drag_total += c->c.ldv.drag_total;
706 c->c.ldv.void_total = d->c.ldv.void_total;
707 c->c.ldv.drag_total = d->c.ldv.drag_total;
708
709 ASSERT( c->c.ldv.void_total >= 0 );
710 ASSERT( c->c.ldv.drag_total >= 0 );
711 }
712 }
713
714 // now look through the counters in this census to find new ones
715 for (c = censuses[t].ctrs; c != NULL; c = c->next) {
716 d = lookupHashTable(acc, (StgWord)c->identity);
717 if (d == NULL) {
718 d = arenaAlloc( arena, sizeof(counter) );
719 initLDVCtr(d);
720 insertHashTable( acc, (StgWord)c->identity, d );
721 d->identity = c->identity;
722 d->next = ctrs;
723 ctrs = d;
724 d->c.ldv.void_total = c->c.ldv.void_total;
725 d->c.ldv.drag_total = c->c.ldv.drag_total;
726 }
727 ASSERT( c->c.ldv.void_total >= 0 );
728 ASSERT( c->c.ldv.drag_total >= 0 );
729 }
730 }
731
732 freeHashTable(acc, NULL);
733 arenaFree(arena);
734 }
735 #endif
736
737 /* -----------------------------------------------------------------------------
738 * Print out the results of a heap census.
739 * -------------------------------------------------------------------------- */
740 static void
741 dumpCensus( Census *census )
742 {
743 counter *ctr;
744 ssize_t count;
745
746 printSample(rtsTrue, census->time);
747
748 #ifdef PROFILING
749 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV) {
750 fprintf(hp_file, "VOID\t%lu\n", (unsigned long)(census->void_total) * sizeof(W_));
751 fprintf(hp_file, "LAG\t%lu\n",
752 (unsigned long)(census->not_used - census->void_total) * sizeof(W_));
753 fprintf(hp_file, "USE\t%lu\n",
754 (unsigned long)(census->used - census->drag_total) * sizeof(W_));
755 fprintf(hp_file, "INHERENT_USE\t%lu\n",
756 (unsigned long)(census->prim) * sizeof(W_));
757 fprintf(hp_file, "DRAG\t%lu\n",
758 (unsigned long)(census->drag_total) * sizeof(W_));
759 printSample(rtsFalse, census->time);
760 return;
761 }
762 #endif
763
764 for (ctr = census->ctrs; ctr != NULL; ctr = ctr->next) {
765
766 #ifdef PROFILING
767 if (RtsFlags.ProfFlags.bioSelector != NULL) {
768 count = 0;
769 if (strMatchesSelector("lag", RtsFlags.ProfFlags.bioSelector))
770 count += ctr->c.ldv.not_used - ctr->c.ldv.void_total;
771 if (strMatchesSelector("drag", RtsFlags.ProfFlags.bioSelector))
772 count += ctr->c.ldv.drag_total;
773 if (strMatchesSelector("void", RtsFlags.ProfFlags.bioSelector))
774 count += ctr->c.ldv.void_total;
775 if (strMatchesSelector("use", RtsFlags.ProfFlags.bioSelector))
776 count += ctr->c.ldv.used - ctr->c.ldv.drag_total;
777 } else
778 #endif
779 {
780 count = ctr->c.resid;
781 }
782
783 ASSERT( count >= 0 );
784
785 if (count == 0) continue;
786
787 #if !defined(PROFILING)
788 switch (RtsFlags.ProfFlags.doHeapProfile) {
789 case HEAP_BY_CLOSURE_TYPE:
790 fprintf(hp_file, "%s", (char *)ctr->identity);
791 break;
792 }
793 #endif
794
795 #ifdef PROFILING
796 switch (RtsFlags.ProfFlags.doHeapProfile) {
797 case HEAP_BY_CCS:
798 fprint_ccs(hp_file, (CostCentreStack *)ctr->identity, RtsFlags.ProfFlags.ccsLength);
799 break;
800 case HEAP_BY_MOD:
801 case HEAP_BY_DESCR:
802 case HEAP_BY_TYPE:
803 fprintf(hp_file, "%s", (char *)ctr->identity);
804 break;
805 case HEAP_BY_RETAINER:
806 {
807 RetainerSet *rs = (RetainerSet *)ctr->identity;
808
809 // it might be the distinguished retainer set rs_MANY:
810 if (rs == &rs_MANY) {
811 fprintf(hp_file, "MANY");
812 break;
813 }
814
815 // Mark this retainer set by negating its id, because it
816 // has appeared in at least one census. We print the
817 // values of all such retainer sets into the log file at
818 // the end. A retainer set may exist but not feature in
819 // any censuses if it arose as the intermediate retainer
820 // set for some closure during retainer set calculation.
821 if (rs->id > 0)
822 rs->id = -(rs->id);
823
824 // report in the unit of bytes: * sizeof(StgWord)
825 printRetainerSetShort(hp_file, rs, RtsFlags.ProfFlags.ccsLength);
826 break;
827 }
828 default:
829 barf("dumpCensus; doHeapProfile");
830 }
831 #endif
832
833 fprintf(hp_file, "\t%" FMT_Word "\n", (W_)count * sizeof(W_));
834 }
835
836 printSample(rtsFalse, census->time);
837 }
838
839
840 static void heapProfObject(Census *census, StgClosure *p, size_t size,
841 rtsBool prim
842 #ifndef PROFILING
843 STG_UNUSED
844 #endif
845 )
846 {
847 const void *identity;
848 size_t real_size;
849 counter *ctr;
850
851 identity = NULL;
852
853 #ifdef PROFILING
854 // subtract the profiling overhead
855 real_size = size - sizeofW(StgProfHeader);
856 #else
857 real_size = size;
858 #endif
859
860 if (closureSatisfiesConstraints((StgClosure*)p)) {
861 #ifdef PROFILING
862 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV) {
863 if (prim)
864 census->prim += real_size;
865 else if ((LDVW(p) & LDV_STATE_MASK) == LDV_STATE_CREATE)
866 census->not_used += real_size;
867 else
868 census->used += real_size;
869 } else
870 #endif
871 {
872 identity = closureIdentity((StgClosure *)p);
873
874 if (identity != NULL) {
875 ctr = lookupHashTable(census->hash, (StgWord)identity);
876 if (ctr != NULL) {
877 #ifdef PROFILING
878 if (RtsFlags.ProfFlags.bioSelector != NULL) {
879 if (prim)
880 ctr->c.ldv.prim += real_size;
881 else if ((LDVW(p) & LDV_STATE_MASK) == LDV_STATE_CREATE)
882 ctr->c.ldv.not_used += real_size;
883 else
884 ctr->c.ldv.used += real_size;
885 } else
886 #endif
887 {
888 ctr->c.resid += real_size;
889 }
890 } else {
891 ctr = arenaAlloc( census->arena, sizeof(counter) );
892 initLDVCtr(ctr);
893 insertHashTable( census->hash, (StgWord)identity, ctr );
894 ctr->identity = identity;
895 ctr->next = census->ctrs;
896 census->ctrs = ctr;
897
898 #ifdef PROFILING
899 if (RtsFlags.ProfFlags.bioSelector != NULL) {
900 if (prim)
901 ctr->c.ldv.prim = real_size;
902 else if ((LDVW(p) & LDV_STATE_MASK) == LDV_STATE_CREATE)
903 ctr->c.ldv.not_used = real_size;
904 else
905 ctr->c.ldv.used = real_size;
906 } else
907 #endif
908 {
909 ctr->c.resid = real_size;
910 }
911 }
912 }
913 }
914 }
915 }
916
917 /* -----------------------------------------------------------------------------
918 * Code to perform a heap census.
919 * -------------------------------------------------------------------------- */
920 static void
921 heapCensusChain( Census *census, bdescr *bd )
922 {
923 StgPtr p;
924 const StgInfoTable *info;
925 size_t size;
926 rtsBool prim;
927
928 for (; bd != NULL; bd = bd->link) {
929
930 // HACK: pretend a pinned block is just one big ARR_WORDS
931 // owned by CCS_PINNED. These blocks can be full of holes due
932 // to alignment constraints so we can't traverse the memory
933 // and do a proper census.
934 if (bd->flags & BF_PINNED) {
935 StgClosure arr;
936 SET_HDR(&arr, &stg_ARR_WORDS_info, CCS_PINNED);
937 heapProfObject(census, &arr, bd->blocks * BLOCK_SIZE_W, rtsTrue);
938 continue;
939 }
940
941 p = bd->start;
942
943 // When we shrink a large ARR_WORDS, we do not adjust the free pointer
944 // of the associated block descriptor, thus introducing slop at the end
945 // of the object. This slop remains after GC, violating the assumption
946 // of the loop below that all slop has been eliminated (#11627).
947 // Consequently, we handle large ARR_WORDS objects as a special case.
948 if (bd->flags & BF_LARGE
949 && get_itbl((StgClosure *)p)->type == ARR_WORDS) {
950 size = arr_words_sizeW((StgArrBytes *)p);
951 prim = rtsTrue;
952 heapProfObject(census, (StgClosure *)p, size, prim);
953 continue;
954 }
955
956 while (p < bd->free) {
957 info = get_itbl((const StgClosure *)p);
958 prim = rtsFalse;
959
960 switch (info->type) {
961
962 case THUNK:
963 size = thunk_sizeW_fromITBL(info);
964 break;
965
966 case THUNK_1_1:
967 case THUNK_0_2:
968 case THUNK_2_0:
969 size = sizeofW(StgThunkHeader) + 2;
970 break;
971
972 case THUNK_1_0:
973 case THUNK_0_1:
974 case THUNK_SELECTOR:
975 size = sizeofW(StgThunkHeader) + 1;
976 break;
977
978 case CONSTR:
979 case FUN:
980 case BLACKHOLE:
981 case BLOCKING_QUEUE:
982 case FUN_1_0:
983 case FUN_0_1:
984 case FUN_1_1:
985 case FUN_0_2:
986 case FUN_2_0:
987 case CONSTR_1_0:
988 case CONSTR_0_1:
989 case CONSTR_1_1:
990 case CONSTR_0_2:
991 case CONSTR_2_0:
992 size = sizeW_fromITBL(info);
993 break;
994
995 case IND:
996 // Special case/Delicate Hack: INDs don't normally
997 // appear, since we're doing this heap census right
998 // after GC. However, GarbageCollect() also does
999 // resurrectThreads(), which can update some
1000 // blackholes when it calls raiseAsync() on the
1001 // resurrected threads. So we know that any IND will
1002 // be the size of a BLACKHOLE.
1003 size = BLACKHOLE_sizeW();
1004 break;
1005
1006 case BCO:
1007 prim = rtsTrue;
1008 size = bco_sizeW((StgBCO *)p);
1009 break;
1010
1011 case MVAR_CLEAN:
1012 case MVAR_DIRTY:
1013 case TVAR:
1014 case WEAK:
1015 case PRIM:
1016 case MUT_PRIM:
1017 case MUT_VAR_CLEAN:
1018 case MUT_VAR_DIRTY:
1019 prim = rtsTrue;
1020 size = sizeW_fromITBL(info);
1021 break;
1022
1023 case AP:
1024 size = ap_sizeW((StgAP *)p);
1025 break;
1026
1027 case PAP:
1028 size = pap_sizeW((StgPAP *)p);
1029 break;
1030
1031 case AP_STACK:
1032 size = ap_stack_sizeW((StgAP_STACK *)p);
1033 break;
1034
1035 case ARR_WORDS:
1036 prim = rtsTrue;
1037 size = arr_words_sizeW((StgArrBytes*)p);
1038 break;
1039
1040 case MUT_ARR_PTRS_CLEAN:
1041 case MUT_ARR_PTRS_DIRTY:
1042 case MUT_ARR_PTRS_FROZEN:
1043 case MUT_ARR_PTRS_FROZEN0:
1044 prim = rtsTrue;
1045 size = mut_arr_ptrs_sizeW((StgMutArrPtrs *)p);
1046 break;
1047
1048 case SMALL_MUT_ARR_PTRS_CLEAN:
1049 case SMALL_MUT_ARR_PTRS_DIRTY:
1050 case SMALL_MUT_ARR_PTRS_FROZEN:
1051 case SMALL_MUT_ARR_PTRS_FROZEN0:
1052 prim = rtsTrue;
1053 size = small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs *)p);
1054 break;
1055
1056 case TSO:
1057 prim = rtsTrue;
1058 #ifdef PROFILING
1059 if (RtsFlags.ProfFlags.includeTSOs) {
1060 size = sizeofW(StgTSO);
1061 break;
1062 } else {
1063 // Skip this TSO and move on to the next object
1064 p += sizeofW(StgTSO);
1065 continue;
1066 }
1067 #else
1068 size = sizeofW(StgTSO);
1069 break;
1070 #endif
1071
1072 case STACK:
1073 prim = rtsTrue;
1074 #ifdef PROFILING
1075 if (RtsFlags.ProfFlags.includeTSOs) {
1076 size = stack_sizeW((StgStack*)p);
1077 break;
1078 } else {
1079 // Skip this TSO and move on to the next object
1080 p += stack_sizeW((StgStack*)p);
1081 continue;
1082 }
1083 #else
1084 size = stack_sizeW((StgStack*)p);
1085 break;
1086 #endif
1087
1088 case TREC_CHUNK:
1089 prim = rtsTrue;
1090 size = sizeofW(StgTRecChunk);
1091 break;
1092
1093 default:
1094 barf("heapCensus, unknown object: %d", info->type);
1095 }
1096
1097 heapProfObject(census,(StgClosure*)p,size,prim);
1098
1099 p += size;
1100 }
1101 }
1102 }
1103
1104 void heapCensus (Time t)
1105 {
1106 uint32_t g, n;
1107 Census *census;
1108 gen_workspace *ws;
1109
1110 census = &censuses[era];
1111 census->time = mut_user_time_until(t);
1112
1113 // calculate retainer sets if necessary
1114 #ifdef PROFILING
1115 if (doingRetainerProfiling()) {
1116 retainerProfile();
1117 }
1118 #endif
1119
1120 #ifdef PROFILING
1121 stat_startHeapCensus();
1122 #endif
1123
1124 // Traverse the heap, collecting the census info
1125 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
1126 heapCensusChain( census, generations[g].blocks );
1127 // Are we interested in large objects? might be
1128 // confusing to include the stack in a heap profile.
1129 heapCensusChain( census, generations[g].large_objects );
1130
1131 for (n = 0; n < n_capabilities; n++) {
1132 ws = &gc_threads[n]->gens[g];
1133 heapCensusChain(census, ws->todo_bd);
1134 heapCensusChain(census, ws->part_list);
1135 heapCensusChain(census, ws->scavd_list);
1136 }
1137 }
1138
1139 // dump out the census info
1140 #ifdef PROFILING
1141 // We can't generate any info for LDV profiling until
1142 // the end of the run...
1143 if (!doingLDVProfiling())
1144 dumpCensus( census );
1145 #else
1146 dumpCensus( census );
1147 #endif
1148
1149
1150 // free our storage, unless we're keeping all the census info for
1151 // future restriction by biography.
1152 #ifdef PROFILING
1153 if (RtsFlags.ProfFlags.bioSelector == NULL)
1154 {
1155 freeEra(census);
1156 census->hash = NULL;
1157 census->arena = NULL;
1158 }
1159 #endif
1160
1161 // we're into the next time period now
1162 nextEra();
1163
1164 #ifdef PROFILING
1165 stat_endHeapCensus();
1166 #endif
1167 }