Don't redefine typedef names
[ghc.git] / rts / ProfHeap.c
1 /* ----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 1998-2003
4 *
5 * Support for heap profiling
6 *
7 * --------------------------------------------------------------------------*/
8
9 #include "PosixSource.h"
10 #include "Rts.h"
11
12 #include "Capability.h"
13 #include "RtsFlags.h"
14 #include "RtsUtils.h"
15 #include "Profiling.h"
16 #include "ProfHeap.h"
17 #include "Stats.h"
18 #include "Hash.h"
19 #include "RetainerProfile.h"
20 #include "LdvProfile.h"
21 #include "Arena.h"
22 #include "Printer.h"
23 #include "Trace.h"
24 #include "sm/GCThread.h"
25
26 #include <string.h>
27
28 /* -----------------------------------------------------------------------------
29 * era stores the current time period. It is the same as the
30 * number of censuses that have been performed.
31 *
32 * RESTRICTION:
33 * era must be no longer than LDV_SHIFT (15 or 30) bits.
34 * Invariants:
35 * era is initialized to 1 in initHeapProfiling().
36 *
37 * max_era is initialized to 2^LDV_SHIFT in initHeapProfiling().
38 * When era reaches max_era, the profiling stops because a closure can
39 * store only up to (max_era - 1) as its creation or last use time.
40 * -------------------------------------------------------------------------- */
41 unsigned int era;
42 static uint32_t max_era;
43
44 /* -----------------------------------------------------------------------------
45 * Counters
46 *
47 * For most heap profiles each closure identity gets a simple count
48 * of live words in the heap at each census. However, if we're
49 * selecting by biography, then we have to keep the various
50 * lag/drag/void counters for each identity.
51 * -------------------------------------------------------------------------- */
52 typedef struct _counter {
53 const void *identity;
54 union {
55 ssize_t resid;
56 struct {
57 // Total sizes of:
58 ssize_t prim; // 'inherently used' closures
59 ssize_t not_used; // 'never used' closures
60 ssize_t used; // 'used at least once' closures
61 ssize_t void_total; // 'destroyed without being used' closures
62 ssize_t drag_total; // 'used at least once and waiting to die'
63 } ldv;
64 } c;
65 struct _counter *next;
66 } counter;
67
68 STATIC_INLINE void
69 initLDVCtr( counter *ctr )
70 {
71 ctr->c.ldv.prim = 0;
72 ctr->c.ldv.not_used = 0;
73 ctr->c.ldv.used = 0;
74 ctr->c.ldv.void_total = 0;
75 ctr->c.ldv.drag_total = 0;
76 }
77
78 typedef struct {
79 double time; // the time in MUT time when the census is made
80 HashTable * hash;
81 counter * ctrs;
82 Arena * arena;
83
84 // for LDV profiling, when just displaying by LDV
85 ssize_t prim;
86 ssize_t not_used;
87 ssize_t used;
88 ssize_t void_total;
89 ssize_t drag_total;
90 } Census;
91
92 static Census *censuses = NULL;
93 static uint32_t n_censuses = 0;
94
95 #ifdef PROFILING
96 static void aggregateCensusInfo( void );
97 #endif
98
99 static void dumpCensus( Census *census );
100
101 static bool closureSatisfiesConstraints( const StgClosure* p );
102
103 /* ----------------------------------------------------------------------------
104 * Find the "closure identity", which is a unique pointer representing
105 * the band to which this closure's heap space is attributed in the
106 * heap profile.
107 * ------------------------------------------------------------------------- */
108 static const void *
109 closureIdentity( const StgClosure *p )
110 {
111 switch (RtsFlags.ProfFlags.doHeapProfile) {
112
113 #ifdef PROFILING
114 case HEAP_BY_CCS:
115 return p->header.prof.ccs;
116 case HEAP_BY_MOD:
117 return p->header.prof.ccs->cc->module;
118 case HEAP_BY_DESCR:
119 return GET_PROF_DESC(get_itbl(p));
120 case HEAP_BY_TYPE:
121 return GET_PROF_TYPE(get_itbl(p));
122 case HEAP_BY_RETAINER:
123 // AFAIK, the only closures in the heap which might not have a
124 // valid retainer set are DEAD_WEAK closures.
125 if (isRetainerSetFieldValid(p))
126 return retainerSetOf(p);
127 else
128 return NULL;
129
130 #else
131 case HEAP_BY_CLOSURE_TYPE:
132 {
133 const StgInfoTable *info;
134 info = get_itbl(p);
135 switch (info->type) {
136 case CONSTR:
137 case CONSTR_1_0:
138 case CONSTR_0_1:
139 case CONSTR_2_0:
140 case CONSTR_1_1:
141 case CONSTR_0_2:
142 case CONSTR_NOCAF:
143 return GET_CON_DESC(itbl_to_con_itbl(info));
144 default:
145 return closure_type_names[info->type];
146 }
147 }
148
149 #endif
150 default:
151 barf("closureIdentity");
152 }
153 }
154
155 /* --------------------------------------------------------------------------
156 * Profiling type predicates
157 * ----------------------------------------------------------------------- */
158 #ifdef PROFILING
159 STATIC_INLINE bool
160 doingLDVProfiling( void )
161 {
162 return (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV
163 || RtsFlags.ProfFlags.bioSelector != NULL);
164 }
165
166 bool
167 doingRetainerProfiling( void )
168 {
169 return (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER
170 || RtsFlags.ProfFlags.retainerSelector != NULL);
171 }
172 #endif /* PROFILING */
173
174 // Precesses a closure 'c' being destroyed whose size is 'size'.
175 // Make sure that LDV_recordDead() is not invoked on 'inherently used' closures
176 // such as TSO; they should not be involved in computing dragNew or voidNew.
177 //
178 // Even though era is checked in both LdvCensusForDead() and
179 // LdvCensusKillAll(), we still need to make sure that era is > 0 because
180 // LDV_recordDead() may be called from elsewhere in the runtime system. E.g.,
181 // when a thunk is replaced by an indirection object.
182
183 #ifdef PROFILING
184 void
185 LDV_recordDead( const StgClosure *c, uint32_t size )
186 {
187 const void *id;
188 uint32_t t;
189 counter *ctr;
190
191 if (era > 0 && closureSatisfiesConstraints(c)) {
192 size -= sizeofW(StgProfHeader);
193 ASSERT(LDVW(c) != 0);
194 if ((LDVW((c)) & LDV_STATE_MASK) == LDV_STATE_CREATE) {
195 t = (LDVW((c)) & LDV_CREATE_MASK) >> LDV_SHIFT;
196 if (t < era) {
197 if (RtsFlags.ProfFlags.bioSelector == NULL) {
198 censuses[t].void_total += size;
199 censuses[era].void_total -= size;
200 ASSERT(censuses[t].void_total < censuses[t].not_used);
201 } else {
202 id = closureIdentity(c);
203 ctr = lookupHashTable(censuses[t].hash, (StgWord)id);
204 ASSERT( ctr != NULL );
205 ctr->c.ldv.void_total += size;
206 ctr = lookupHashTable(censuses[era].hash, (StgWord)id);
207 if (ctr == NULL) {
208 ctr = arenaAlloc(censuses[era].arena, sizeof(counter));
209 initLDVCtr(ctr);
210 insertHashTable(censuses[era].hash, (StgWord)id, ctr);
211 ctr->identity = id;
212 ctr->next = censuses[era].ctrs;
213 censuses[era].ctrs = ctr;
214 }
215 ctr->c.ldv.void_total -= size;
216 }
217 }
218 } else {
219 t = LDVW((c)) & LDV_LAST_MASK;
220 if (t + 1 < era) {
221 if (RtsFlags.ProfFlags.bioSelector == NULL) {
222 censuses[t+1].drag_total += size;
223 censuses[era].drag_total -= size;
224 } else {
225 const void *id;
226 id = closureIdentity(c);
227 ctr = lookupHashTable(censuses[t+1].hash, (StgWord)id);
228 ASSERT( ctr != NULL );
229 ctr->c.ldv.drag_total += size;
230 ctr = lookupHashTable(censuses[era].hash, (StgWord)id);
231 if (ctr == NULL) {
232 ctr = arenaAlloc(censuses[era].arena, sizeof(counter));
233 initLDVCtr(ctr);
234 insertHashTable(censuses[era].hash, (StgWord)id, ctr);
235 ctr->identity = id;
236 ctr->next = censuses[era].ctrs;
237 censuses[era].ctrs = ctr;
238 }
239 ctr->c.ldv.drag_total -= size;
240 }
241 }
242 }
243 }
244 }
245 #endif
246
247 /* --------------------------------------------------------------------------
248 * Initialize censuses[era];
249 * ----------------------------------------------------------------------- */
250
251 STATIC_INLINE void
252 initEra(Census *census)
253 {
254 census->hash = allocHashTable();
255 census->ctrs = NULL;
256 census->arena = newArena();
257
258 census->not_used = 0;
259 census->used = 0;
260 census->prim = 0;
261 census->void_total = 0;
262 census->drag_total = 0;
263 }
264
265 STATIC_INLINE void
266 freeEra(Census *census)
267 {
268 arenaFree(census->arena);
269 freeHashTable(census->hash, NULL);
270 }
271
272 /* --------------------------------------------------------------------------
273 * Increases era by 1 and initialize census[era].
274 * Reallocates gi[] and increases its size if needed.
275 * ----------------------------------------------------------------------- */
276
277 static void
278 nextEra( void )
279 {
280 #ifdef PROFILING
281 if (doingLDVProfiling()) {
282 era++;
283
284 if (era == max_era) {
285 errorBelch("Maximum number of censuses reached.");
286 if (rtsConfig.rts_opts_suggestions == true) {
287 if (rtsConfig.rts_opts_enabled == RtsOptsAll) {
288 errorBelch("Use `+RTS -i' to reduce censuses.");
289 } else {
290 errorBelch("Relink with -rtsopts and "
291 "use `+RTS -i' to reduce censuses.");
292 }
293 }
294 stg_exit(EXIT_FAILURE);
295 }
296
297 if (era == n_censuses) {
298 n_censuses *= 2;
299 censuses = stgReallocBytes(censuses, sizeof(Census) * n_censuses,
300 "nextEra");
301 }
302 }
303 #endif /* PROFILING */
304
305 initEra( &censuses[era] );
306 }
307
308 /* ----------------------------------------------------------------------------
309 * Heap profiling by info table
310 * ------------------------------------------------------------------------- */
311
312 #if !defined(PROFILING)
313 FILE *hp_file;
314 static char *hp_filename;
315
316 void freeProfiling (void)
317 {
318 }
319
320 void initProfiling (void)
321 {
322 char *prog;
323
324 prog = stgMallocBytes(strlen(prog_name) + 1, "initProfiling2");
325 strcpy(prog, prog_name);
326 #ifdef mingw32_HOST_OS
327 // on Windows, drop the .exe suffix if there is one
328 {
329 char *suff;
330 suff = strrchr(prog,'.');
331 if (suff != NULL && !strcmp(suff,".exe")) {
332 *suff = '\0';
333 }
334 }
335 #endif
336
337 if (RtsFlags.ProfFlags.doHeapProfile) {
338 /* Initialise the log file name */
339 hp_filename = stgMallocBytes(strlen(prog) + 6, "hpFileName");
340 sprintf(hp_filename, "%s.hp", prog);
341
342 /* open the log file */
343 if ((hp_file = fopen(hp_filename, "w")) == NULL) {
344 debugBelch("Can't open profiling report file %s\n",
345 hp_filename);
346 RtsFlags.ProfFlags.doHeapProfile = 0;
347 stgFree(prog);
348 return;
349 }
350 }
351
352 stgFree(prog);
353
354 initHeapProfiling();
355 }
356
357 void endProfiling( void )
358 {
359 endHeapProfiling();
360 }
361 #endif /* !PROFILING */
362
363 static void
364 printSample(bool beginSample, StgDouble sampleValue)
365 {
366 fprintf(hp_file, "%s %f\n",
367 (beginSample ? "BEGIN_SAMPLE" : "END_SAMPLE"),
368 sampleValue);
369 if (!beginSample) {
370 fflush(hp_file);
371 }
372 }
373
374 static void
375 dumpCostCentresToEventLog(void)
376 {
377 #ifdef PROFILING
378 CostCentre *cc, *next;
379 for (cc = CC_LIST; cc != NULL; cc = next) {
380 next = cc->link;
381 traceHeapProfCostCentre(cc->ccID, cc->label, cc->module,
382 cc->srcloc, cc->is_caf);
383 }
384 #endif
385 }
386
387 /* --------------------------------------------------------------------------
388 * Initialize the heap profilier
389 * ----------------------------------------------------------------------- */
390 uint32_t
391 initHeapProfiling(void)
392 {
393 if (! RtsFlags.ProfFlags.doHeapProfile) {
394 return 0;
395 }
396
397 #ifdef PROFILING
398 if (doingLDVProfiling() && doingRetainerProfiling()) {
399 errorBelch("cannot mix -hb and -hr");
400 stg_exit(EXIT_FAILURE);
401 }
402 #ifdef THREADED_RTS
403 // See Trac #12019.
404 if (doingLDVProfiling() && RtsFlags.ParFlags.nCapabilities > 1) {
405 errorBelch("-hb cannot be used with multiple capabilities");
406 stg_exit(EXIT_FAILURE);
407 }
408 #endif
409 #endif
410
411 // we only count eras if we're doing LDV profiling. Otherwise era
412 // is fixed at zero.
413 #ifdef PROFILING
414 if (doingLDVProfiling()) {
415 era = 1;
416 } else
417 #endif
418 {
419 era = 0;
420 }
421
422 // max_era = 2^LDV_SHIFT
423 max_era = 1 << LDV_SHIFT;
424
425 n_censuses = 32;
426 censuses = stgMallocBytes(sizeof(Census) * n_censuses, "initHeapProfiling");
427
428 initEra( &censuses[era] );
429
430 /* initProfilingLogFile(); */
431 fprintf(hp_file, "JOB \"%s", prog_name);
432
433 #ifdef PROFILING
434 {
435 int count;
436 for(count = 1; count < prog_argc; count++)
437 fprintf(hp_file, " %s", prog_argv[count]);
438 fprintf(hp_file, " +RTS");
439 for(count = 0; count < rts_argc; count++)
440 fprintf(hp_file, " %s", rts_argv[count]);
441 }
442 #endif /* PROFILING */
443
444 fprintf(hp_file, "\"\n" );
445
446 fprintf(hp_file, "DATE \"%s\"\n", time_str());
447
448 fprintf(hp_file, "SAMPLE_UNIT \"seconds\"\n");
449 fprintf(hp_file, "VALUE_UNIT \"bytes\"\n");
450
451 printSample(true, 0);
452 printSample(false, 0);
453
454 #ifdef PROFILING
455 if (doingRetainerProfiling()) {
456 initRetainerProfiling();
457 }
458 #endif
459
460 traceHeapProfBegin(0);
461 dumpCostCentresToEventLog();
462
463 return 0;
464 }
465
466 void
467 endHeapProfiling(void)
468 {
469 StgDouble seconds;
470
471 if (! RtsFlags.ProfFlags.doHeapProfile) {
472 return;
473 }
474
475 #ifdef PROFILING
476 if (doingRetainerProfiling()) {
477 endRetainerProfiling();
478 }
479 #endif
480
481 #ifdef PROFILING
482 if (doingLDVProfiling()) {
483 uint32_t t;
484 LdvCensusKillAll();
485 aggregateCensusInfo();
486 for (t = 1; t < era; t++) {
487 dumpCensus( &censuses[t] );
488 }
489 }
490 #endif
491
492 #ifdef PROFILING
493 if (doingLDVProfiling()) {
494 uint32_t t;
495 if (RtsFlags.ProfFlags.bioSelector != NULL) {
496 for (t = 1; t <= era; t++) {
497 freeEra( &censuses[t] );
498 }
499 } else {
500 freeEra( &censuses[era] );
501 }
502 } else {
503 freeEra( &censuses[0] );
504 }
505 #else
506 freeEra( &censuses[0] );
507 #endif
508
509 stgFree(censuses);
510
511 seconds = mut_user_time();
512 printSample(true, seconds);
513 printSample(false, seconds);
514 fclose(hp_file);
515 }
516
517
518
519 #ifdef PROFILING
520 static size_t
521 buf_append(char *p, const char *q, char *end)
522 {
523 int m;
524
525 for (m = 0; p < end; p++, q++, m++) {
526 *p = *q;
527 if (*q == '\0') { break; }
528 }
529 return m;
530 }
531
532 static void
533 fprint_ccs(FILE *fp, CostCentreStack *ccs, uint32_t max_length)
534 {
535 char buf[max_length+1], *p, *buf_end;
536
537 // MAIN on its own gets printed as "MAIN", otherwise we ignore MAIN.
538 if (ccs == CCS_MAIN) {
539 fprintf(fp, "MAIN");
540 return;
541 }
542
543 fprintf(fp, "(%" FMT_Int ")", ccs->ccsID);
544
545 p = buf;
546 buf_end = buf + max_length + 1;
547
548 // keep printing components of the stack until we run out of space
549 // in the buffer. If we run out of space, end with "...".
550 for (; ccs != NULL && ccs != CCS_MAIN; ccs = ccs->prevStack) {
551
552 // CAF cost centres print as M.CAF, but we leave the module
553 // name out of all the others to save space.
554 if (!strcmp(ccs->cc->label,"CAF")) {
555 p += buf_append(p, ccs->cc->module, buf_end);
556 p += buf_append(p, ".CAF", buf_end);
557 } else {
558 p += buf_append(p, ccs->cc->label, buf_end);
559 if (ccs->prevStack != NULL && ccs->prevStack != CCS_MAIN) {
560 p += buf_append(p, "/", buf_end);
561 }
562 }
563
564 if (p >= buf_end) {
565 sprintf(buf+max_length-4, "...");
566 break;
567 }
568 }
569 fprintf(fp, "%s", buf);
570 }
571
572 bool
573 strMatchesSelector( const char* str, const char* sel )
574 {
575 const char* p;
576 // debugBelch("str_matches_selector %s %s\n", str, sel);
577 while (1) {
578 // Compare str against wherever we've got to in sel.
579 p = str;
580 while (*p != '\0' && *sel != ',' && *sel != '\0' && *p == *sel) {
581 p++; sel++;
582 }
583 // Match if all of str used and have reached the end of a sel fragment.
584 if (*p == '\0' && (*sel == ',' || *sel == '\0'))
585 return true;
586
587 // No match. Advance sel to the start of the next elem.
588 while (*sel != ',' && *sel != '\0') sel++;
589 if (*sel == ',') sel++;
590
591 /* Run out of sel ?? */
592 if (*sel == '\0') return false;
593 }
594 }
595
596 #endif /* PROFILING */
597
598 /* -----------------------------------------------------------------------------
599 * Figure out whether a closure should be counted in this census, by
600 * testing against all the specified constraints.
601 * -------------------------------------------------------------------------- */
602 static bool
603 closureSatisfiesConstraints( const StgClosure* p )
604 {
605 #if !defined(PROFILING)
606 (void)p; /* keep gcc -Wall happy */
607 return true;
608 #else
609 bool b;
610
611 // The CCS has a selected field to indicate whether this closure is
612 // deselected by not being mentioned in the module, CC, or CCS
613 // selectors.
614 if (!p->header.prof.ccs->selected) {
615 return false;
616 }
617
618 if (RtsFlags.ProfFlags.descrSelector) {
619 b = strMatchesSelector( (GET_PROF_DESC(get_itbl((StgClosure *)p))),
620 RtsFlags.ProfFlags.descrSelector );
621 if (!b) return false;
622 }
623 if (RtsFlags.ProfFlags.typeSelector) {
624 b = strMatchesSelector( (GET_PROF_TYPE(get_itbl((StgClosure *)p))),
625 RtsFlags.ProfFlags.typeSelector );
626 if (!b) return false;
627 }
628 if (RtsFlags.ProfFlags.retainerSelector) {
629 RetainerSet *rs;
630 uint32_t i;
631 // We must check that the retainer set is valid here. One
632 // reason it might not be valid is if this closure is a
633 // a newly deceased weak pointer (i.e. a DEAD_WEAK), since
634 // these aren't reached by the retainer profiler's traversal.
635 if (isRetainerSetFieldValid((StgClosure *)p)) {
636 rs = retainerSetOf((StgClosure *)p);
637 if (rs != NULL) {
638 for (i = 0; i < rs->num; i++) {
639 b = strMatchesSelector( rs->element[i]->cc->label,
640 RtsFlags.ProfFlags.retainerSelector );
641 if (b) return true;
642 }
643 }
644 }
645 return false;
646 }
647 return true;
648 #endif /* PROFILING */
649 }
650
651 /* -----------------------------------------------------------------------------
652 * Aggregate the heap census info for biographical profiling
653 * -------------------------------------------------------------------------- */
654 #ifdef PROFILING
655 static void
656 aggregateCensusInfo( void )
657 {
658 HashTable *acc;
659 uint32_t t;
660 counter *c, *d, *ctrs;
661 Arena *arena;
662
663 if (!doingLDVProfiling()) return;
664
665 // Aggregate the LDV counters when displaying by biography.
666 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV) {
667 long void_total, drag_total;
668
669 // Now we compute void_total and drag_total for each census
670 // After the program has finished, the void_total field of
671 // each census contains the count of words that were *created*
672 // in this era and were eventually void. Conversely, if a
673 // void closure was destroyed in this era, it will be
674 // represented by a negative count of words in void_total.
675 //
676 // To get the count of live words that are void at each
677 // census, just propagate the void_total count forwards:
678
679 void_total = 0;
680 drag_total = 0;
681 for (t = 1; t < era; t++) { // note: start at 1, not 0
682 void_total += censuses[t].void_total;
683 drag_total += censuses[t].drag_total;
684 censuses[t].void_total = void_total;
685 censuses[t].drag_total = drag_total;
686
687 ASSERT( censuses[t].void_total <= censuses[t].not_used );
688 // should be true because: void_total is the count of
689 // live words that are void at this census, which *must*
690 // be less than the number of live words that have not
691 // been used yet.
692
693 ASSERT( censuses[t].drag_total <= censuses[t].used );
694 // similar reasoning as above.
695 }
696
697 return;
698 }
699
700 // otherwise... we're doing a heap profile that is restricted to
701 // some combination of lag, drag, void or use. We've kept all the
702 // census info for all censuses so far, but we still need to
703 // aggregate the counters forwards.
704
705 arena = newArena();
706 acc = allocHashTable();
707 ctrs = NULL;
708
709 for (t = 1; t < era; t++) {
710
711 // first look through all the counters we're aggregating
712 for (c = ctrs; c != NULL; c = c->next) {
713 // if one of the totals is non-zero, then this closure
714 // type must be present in the heap at this census time...
715 d = lookupHashTable(censuses[t].hash, (StgWord)c->identity);
716
717 if (d == NULL) {
718 // if this closure identity isn't present in the
719 // census for this time period, then our running
720 // totals *must* be zero.
721 ASSERT(c->c.ldv.void_total == 0 && c->c.ldv.drag_total == 0);
722
723 // debugCCS(c->identity);
724 // debugBelch(" census=%d void_total=%d drag_total=%d\n",
725 // t, c->c.ldv.void_total, c->c.ldv.drag_total);
726 } else {
727 d->c.ldv.void_total += c->c.ldv.void_total;
728 d->c.ldv.drag_total += c->c.ldv.drag_total;
729 c->c.ldv.void_total = d->c.ldv.void_total;
730 c->c.ldv.drag_total = d->c.ldv.drag_total;
731
732 ASSERT( c->c.ldv.void_total >= 0 );
733 ASSERT( c->c.ldv.drag_total >= 0 );
734 }
735 }
736
737 // now look through the counters in this census to find new ones
738 for (c = censuses[t].ctrs; c != NULL; c = c->next) {
739 d = lookupHashTable(acc, (StgWord)c->identity);
740 if (d == NULL) {
741 d = arenaAlloc( arena, sizeof(counter) );
742 initLDVCtr(d);
743 insertHashTable( acc, (StgWord)c->identity, d );
744 d->identity = c->identity;
745 d->next = ctrs;
746 ctrs = d;
747 d->c.ldv.void_total = c->c.ldv.void_total;
748 d->c.ldv.drag_total = c->c.ldv.drag_total;
749 }
750 ASSERT( c->c.ldv.void_total >= 0 );
751 ASSERT( c->c.ldv.drag_total >= 0 );
752 }
753 }
754
755 freeHashTable(acc, NULL);
756 arenaFree(arena);
757 }
758 #endif
759
760 /* -----------------------------------------------------------------------------
761 * Print out the results of a heap census.
762 * -------------------------------------------------------------------------- */
763 static void
764 dumpCensus( Census *census )
765 {
766 counter *ctr;
767 ssize_t count;
768
769 printSample(true, census->time);
770 traceHeapProfSampleBegin(era);
771
772 #ifdef PROFILING
773 /* change typecast to uint64_t to remove
774 * print formatting warning. See #12636 */
775 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV) {
776 fprintf(hp_file, "VOID\t%" FMT_Word64 "\n",
777 (uint64_t)(census->void_total *
778 sizeof(W_)));
779 fprintf(hp_file, "LAG\t%" FMT_Word64 "\n",
780 (uint64_t)((census->not_used - census->void_total) *
781 sizeof(W_)));
782 fprintf(hp_file, "USE\t%" FMT_Word64 "\n",
783 (uint64_t)((census->used - census->drag_total) *
784 sizeof(W_)));
785 fprintf(hp_file, "INHERENT_USE\t%" FMT_Word64 "\n",
786 (uint64_t)(census->prim * sizeof(W_)));
787 fprintf(hp_file, "DRAG\t%" FMT_Word64 "\n",
788 (uint64_t)(census->drag_total * sizeof(W_)));
789 printSample(false, census->time);
790 return;
791 }
792 #endif
793
794 for (ctr = census->ctrs; ctr != NULL; ctr = ctr->next) {
795
796 #ifdef PROFILING
797 if (RtsFlags.ProfFlags.bioSelector != NULL) {
798 count = 0;
799 if (strMatchesSelector("lag", RtsFlags.ProfFlags.bioSelector))
800 count += ctr->c.ldv.not_used - ctr->c.ldv.void_total;
801 if (strMatchesSelector("drag", RtsFlags.ProfFlags.bioSelector))
802 count += ctr->c.ldv.drag_total;
803 if (strMatchesSelector("void", RtsFlags.ProfFlags.bioSelector))
804 count += ctr->c.ldv.void_total;
805 if (strMatchesSelector("use", RtsFlags.ProfFlags.bioSelector))
806 count += ctr->c.ldv.used - ctr->c.ldv.drag_total;
807 } else
808 #endif
809 {
810 count = ctr->c.resid;
811 }
812
813 ASSERT( count >= 0 );
814
815 if (count == 0) continue;
816
817 #if !defined(PROFILING)
818 switch (RtsFlags.ProfFlags.doHeapProfile) {
819 case HEAP_BY_CLOSURE_TYPE:
820 fprintf(hp_file, "%s", (char *)ctr->identity);
821 traceHeapProfSampleString(0, (char *)ctr->identity,
822 count * sizeof(W_));
823 break;
824 }
825 #endif
826
827 #ifdef PROFILING
828 switch (RtsFlags.ProfFlags.doHeapProfile) {
829 case HEAP_BY_CCS:
830 fprint_ccs(hp_file, (CostCentreStack *)ctr->identity,
831 RtsFlags.ProfFlags.ccsLength);
832 traceHeapProfSampleCostCentre(0, (CostCentreStack *)ctr->identity,
833 count * sizeof(W_));
834 break;
835 case HEAP_BY_MOD:
836 case HEAP_BY_DESCR:
837 case HEAP_BY_TYPE:
838 fprintf(hp_file, "%s", (char *)ctr->identity);
839 traceHeapProfSampleString(0, (char *)ctr->identity,
840 count * sizeof(W_));
841 break;
842 case HEAP_BY_RETAINER:
843 {
844 RetainerSet *rs = (RetainerSet *)ctr->identity;
845
846 // it might be the distinguished retainer set rs_MANY:
847 if (rs == &rs_MANY) {
848 fprintf(hp_file, "MANY");
849 break;
850 }
851
852 // Mark this retainer set by negating its id, because it
853 // has appeared in at least one census. We print the
854 // values of all such retainer sets into the log file at
855 // the end. A retainer set may exist but not feature in
856 // any censuses if it arose as the intermediate retainer
857 // set for some closure during retainer set calculation.
858 if (rs->id > 0)
859 rs->id = -(rs->id);
860
861 // report in the unit of bytes: * sizeof(StgWord)
862 printRetainerSetShort(hp_file, rs, RtsFlags.ProfFlags.ccsLength);
863 break;
864 }
865 default:
866 barf("dumpCensus; doHeapProfile");
867 }
868 #endif
869
870 fprintf(hp_file, "\t%" FMT_Word "\n", (W_)count * sizeof(W_));
871 }
872
873 printSample(false, census->time);
874 }
875
876
877 static void heapProfObject(Census *census, StgClosure *p, size_t size,
878 bool prim
879 #ifndef PROFILING
880 STG_UNUSED
881 #endif
882 )
883 {
884 const void *identity;
885 size_t real_size;
886 counter *ctr;
887
888 identity = NULL;
889
890 #ifdef PROFILING
891 // subtract the profiling overhead
892 real_size = size - sizeofW(StgProfHeader);
893 #else
894 real_size = size;
895 #endif
896
897 if (closureSatisfiesConstraints((StgClosure*)p)) {
898 #ifdef PROFILING
899 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV) {
900 if (prim)
901 census->prim += real_size;
902 else if ((LDVW(p) & LDV_STATE_MASK) == LDV_STATE_CREATE)
903 census->not_used += real_size;
904 else
905 census->used += real_size;
906 } else
907 #endif
908 {
909 identity = closureIdentity((StgClosure *)p);
910
911 if (identity != NULL) {
912 ctr = lookupHashTable(census->hash, (StgWord)identity);
913 if (ctr != NULL) {
914 #ifdef PROFILING
915 if (RtsFlags.ProfFlags.bioSelector != NULL) {
916 if (prim)
917 ctr->c.ldv.prim += real_size;
918 else if ((LDVW(p) & LDV_STATE_MASK) == LDV_STATE_CREATE)
919 ctr->c.ldv.not_used += real_size;
920 else
921 ctr->c.ldv.used += real_size;
922 } else
923 #endif
924 {
925 ctr->c.resid += real_size;
926 }
927 } else {
928 ctr = arenaAlloc( census->arena, sizeof(counter) );
929 initLDVCtr(ctr);
930 insertHashTable( census->hash, (StgWord)identity, ctr );
931 ctr->identity = identity;
932 ctr->next = census->ctrs;
933 census->ctrs = ctr;
934
935 #ifdef PROFILING
936 if (RtsFlags.ProfFlags.bioSelector != NULL) {
937 if (prim)
938 ctr->c.ldv.prim = real_size;
939 else if ((LDVW(p) & LDV_STATE_MASK) == LDV_STATE_CREATE)
940 ctr->c.ldv.not_used = real_size;
941 else
942 ctr->c.ldv.used = real_size;
943 } else
944 #endif
945 {
946 ctr->c.resid = real_size;
947 }
948 }
949 }
950 }
951 }
952 }
953
954 // Compact objects require special handling code because they
955 // are not stored consecutively in memory (rather, each object
956 // is a list of objects), and that would break the while loop
957 // below. But we know that each block holds at most one object
958 // so we don't need the loop.
959 //
960 // See Note [Compact Normal Forms] for details.
961 static void
962 heapCensusCompactList(Census *census, bdescr *bd)
963 {
964 for (; bd != NULL; bd = bd->link) {
965 StgCompactNFDataBlock *block = (StgCompactNFDataBlock*)bd->start;
966 StgCompactNFData *str = block->owner;
967 heapProfObject(census, (StgClosure*)str,
968 compact_nfdata_full_sizeW(str), true);
969 }
970 }
971
972 /* -----------------------------------------------------------------------------
973 * Code to perform a heap census.
974 * -------------------------------------------------------------------------- */
975 static void
976 heapCensusChain( Census *census, bdescr *bd )
977 {
978 StgPtr p;
979 const StgInfoTable *info;
980 size_t size;
981 bool prim;
982
983 for (; bd != NULL; bd = bd->link) {
984
985 // HACK: pretend a pinned block is just one big ARR_WORDS
986 // owned by CCS_PINNED. These blocks can be full of holes due
987 // to alignment constraints so we can't traverse the memory
988 // and do a proper census.
989 if (bd->flags & BF_PINNED) {
990 StgClosure arr;
991 SET_HDR(&arr, &stg_ARR_WORDS_info, CCS_PINNED);
992 heapProfObject(census, &arr, bd->blocks * BLOCK_SIZE_W, true);
993 continue;
994 }
995
996 p = bd->start;
997
998 // When we shrink a large ARR_WORDS, we do not adjust the free pointer
999 // of the associated block descriptor, thus introducing slop at the end
1000 // of the object. This slop remains after GC, violating the assumption
1001 // of the loop below that all slop has been eliminated (#11627).
1002 // Consequently, we handle large ARR_WORDS objects as a special case.
1003 if (bd->flags & BF_LARGE
1004 && get_itbl((StgClosure *)p)->type == ARR_WORDS) {
1005 size = arr_words_sizeW((StgArrBytes *)p);
1006 prim = true;
1007 heapProfObject(census, (StgClosure *)p, size, prim);
1008 continue;
1009 }
1010
1011 while (p < bd->free) {
1012 info = get_itbl((const StgClosure *)p);
1013 prim = false;
1014
1015 switch (info->type) {
1016
1017 case THUNK:
1018 size = thunk_sizeW_fromITBL(info);
1019 break;
1020
1021 case THUNK_1_1:
1022 case THUNK_0_2:
1023 case THUNK_2_0:
1024 size = sizeofW(StgThunkHeader) + 2;
1025 break;
1026
1027 case THUNK_1_0:
1028 case THUNK_0_1:
1029 case THUNK_SELECTOR:
1030 size = sizeofW(StgThunkHeader) + 1;
1031 break;
1032
1033 case FUN:
1034 case BLACKHOLE:
1035 case BLOCKING_QUEUE:
1036 case FUN_1_0:
1037 case FUN_0_1:
1038 case FUN_1_1:
1039 case FUN_0_2:
1040 case FUN_2_0:
1041 case CONSTR:
1042 case CONSTR_NOCAF:
1043 case CONSTR_1_0:
1044 case CONSTR_0_1:
1045 case CONSTR_1_1:
1046 case CONSTR_0_2:
1047 case CONSTR_2_0:
1048 size = sizeW_fromITBL(info);
1049 break;
1050
1051 case IND:
1052 // Special case/Delicate Hack: INDs don't normally
1053 // appear, since we're doing this heap census right
1054 // after GC. However, GarbageCollect() also does
1055 // resurrectThreads(), which can update some
1056 // blackholes when it calls raiseAsync() on the
1057 // resurrected threads. So we know that any IND will
1058 // be the size of a BLACKHOLE.
1059 size = BLACKHOLE_sizeW();
1060 break;
1061
1062 case BCO:
1063 prim = true;
1064 size = bco_sizeW((StgBCO *)p);
1065 break;
1066
1067 case MVAR_CLEAN:
1068 case MVAR_DIRTY:
1069 case TVAR:
1070 case WEAK:
1071 case PRIM:
1072 case MUT_PRIM:
1073 case MUT_VAR_CLEAN:
1074 case MUT_VAR_DIRTY:
1075 prim = true;
1076 size = sizeW_fromITBL(info);
1077 break;
1078
1079 case AP:
1080 size = ap_sizeW((StgAP *)p);
1081 break;
1082
1083 case PAP:
1084 size = pap_sizeW((StgPAP *)p);
1085 break;
1086
1087 case AP_STACK:
1088 size = ap_stack_sizeW((StgAP_STACK *)p);
1089 break;
1090
1091 case ARR_WORDS:
1092 prim = true;
1093 size = arr_words_sizeW((StgArrBytes*)p);
1094 break;
1095
1096 case MUT_ARR_PTRS_CLEAN:
1097 case MUT_ARR_PTRS_DIRTY:
1098 case MUT_ARR_PTRS_FROZEN:
1099 case MUT_ARR_PTRS_FROZEN0:
1100 prim = true;
1101 size = mut_arr_ptrs_sizeW((StgMutArrPtrs *)p);
1102 break;
1103
1104 case SMALL_MUT_ARR_PTRS_CLEAN:
1105 case SMALL_MUT_ARR_PTRS_DIRTY:
1106 case SMALL_MUT_ARR_PTRS_FROZEN:
1107 case SMALL_MUT_ARR_PTRS_FROZEN0:
1108 prim = true;
1109 size = small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs *)p);
1110 break;
1111
1112 case TSO:
1113 prim = true;
1114 #ifdef PROFILING
1115 if (RtsFlags.ProfFlags.includeTSOs) {
1116 size = sizeofW(StgTSO);
1117 break;
1118 } else {
1119 // Skip this TSO and move on to the next object
1120 p += sizeofW(StgTSO);
1121 continue;
1122 }
1123 #else
1124 size = sizeofW(StgTSO);
1125 break;
1126 #endif
1127
1128 case STACK:
1129 prim = true;
1130 #ifdef PROFILING
1131 if (RtsFlags.ProfFlags.includeTSOs) {
1132 size = stack_sizeW((StgStack*)p);
1133 break;
1134 } else {
1135 // Skip this TSO and move on to the next object
1136 p += stack_sizeW((StgStack*)p);
1137 continue;
1138 }
1139 #else
1140 size = stack_sizeW((StgStack*)p);
1141 break;
1142 #endif
1143
1144 case TREC_CHUNK:
1145 prim = true;
1146 size = sizeofW(StgTRecChunk);
1147 break;
1148
1149 case COMPACT_NFDATA:
1150 barf("heapCensus, found compact object in the wrong list");
1151 break;
1152
1153 default:
1154 barf("heapCensus, unknown object: %d", info->type);
1155 }
1156
1157 heapProfObject(census,(StgClosure*)p,size,prim);
1158
1159 p += size;
1160 }
1161 }
1162 }
1163
1164 void heapCensus (Time t)
1165 {
1166 uint32_t g, n;
1167 Census *census;
1168 gen_workspace *ws;
1169
1170 census = &censuses[era];
1171 census->time = mut_user_time_until(t);
1172
1173 // calculate retainer sets if necessary
1174 #ifdef PROFILING
1175 if (doingRetainerProfiling()) {
1176 retainerProfile();
1177 }
1178 #endif
1179
1180 #ifdef PROFILING
1181 stat_startHeapCensus();
1182 #endif
1183
1184 // Traverse the heap, collecting the census info
1185 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
1186 heapCensusChain( census, generations[g].blocks );
1187 // Are we interested in large objects? might be
1188 // confusing to include the stack in a heap profile.
1189 heapCensusChain( census, generations[g].large_objects );
1190 heapCensusCompactList ( census, generations[g].compact_objects );
1191
1192 for (n = 0; n < n_capabilities; n++) {
1193 ws = &gc_threads[n]->gens[g];
1194 heapCensusChain(census, ws->todo_bd);
1195 heapCensusChain(census, ws->part_list);
1196 heapCensusChain(census, ws->scavd_list);
1197 }
1198 }
1199
1200 // dump out the census info
1201 #ifdef PROFILING
1202 // We can't generate any info for LDV profiling until
1203 // the end of the run...
1204 if (!doingLDVProfiling())
1205 dumpCensus( census );
1206 #else
1207 dumpCensus( census );
1208 #endif
1209
1210
1211 // free our storage, unless we're keeping all the census info for
1212 // future restriction by biography.
1213 #ifdef PROFILING
1214 if (RtsFlags.ProfFlags.bioSelector == NULL)
1215 {
1216 freeEra(census);
1217 census->hash = NULL;
1218 census->arena = NULL;
1219 }
1220 #endif
1221
1222 // we're into the next time period now
1223 nextEra();
1224
1225 #ifdef PROFILING
1226 stat_endHeapCensus();
1227 #endif
1228 }