rts: Disable -hb with multiple capabilities
[ghc.git] / rts / ProfHeap.c
1 /* ----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 1998-2003
4 *
5 * Support for heap profiling
6 *
7 * --------------------------------------------------------------------------*/
8
9 #include "PosixSource.h"
10 #include "Rts.h"
11
12 #include "Capability.h"
13 #include "RtsFlags.h"
14 #include "RtsUtils.h"
15 #include "Profiling.h"
16 #include "ProfHeap.h"
17 #include "Stats.h"
18 #include "Hash.h"
19 #include "RetainerProfile.h"
20 #include "LdvProfile.h"
21 #include "Arena.h"
22 #include "Printer.h"
23 #include "Trace.h"
24 #include "sm/GCThread.h"
25
26 #include <string.h>
27
28 /* -----------------------------------------------------------------------------
29 * era stores the current time period. It is the same as the
30 * number of censuses that have been performed.
31 *
32 * RESTRICTION:
33 * era must be no longer than LDV_SHIFT (15 or 30) bits.
34 * Invariants:
35 * era is initialized to 1 in initHeapProfiling().
36 *
37 * max_era is initialized to 2^LDV_SHIFT in initHeapProfiling().
38 * When era reaches max_era, the profiling stops because a closure can
39 * store only up to (max_era - 1) as its creation or last use time.
40 * -------------------------------------------------------------------------- */
41 unsigned int era;
42 static uint32_t max_era;
43
44 /* -----------------------------------------------------------------------------
45 * Counters
46 *
47 * For most heap profiles each closure identity gets a simple count
48 * of live words in the heap at each census. However, if we're
49 * selecting by biography, then we have to keep the various
50 * lag/drag/void counters for each identity.
51 * -------------------------------------------------------------------------- */
52 typedef struct _counter {
53 const void *identity;
54 union {
55 ssize_t resid;
56 struct {
57 // Total sizes of:
58 ssize_t prim; // 'inherently used' closures
59 ssize_t not_used; // 'never used' closures
60 ssize_t used; // 'used at least once' closures
61 ssize_t void_total; // 'destroyed without being used' closures
62 ssize_t drag_total; // 'used at least once and waiting to die'
63 } ldv;
64 } c;
65 struct _counter *next;
66 } counter;
67
68 STATIC_INLINE void
69 initLDVCtr( counter *ctr )
70 {
71 ctr->c.ldv.prim = 0;
72 ctr->c.ldv.not_used = 0;
73 ctr->c.ldv.used = 0;
74 ctr->c.ldv.void_total = 0;
75 ctr->c.ldv.drag_total = 0;
76 }
77
78 typedef struct {
79 double time; // the time in MUT time when the census is made
80 HashTable * hash;
81 counter * ctrs;
82 Arena * arena;
83
84 // for LDV profiling, when just displaying by LDV
85 ssize_t prim;
86 ssize_t not_used;
87 ssize_t used;
88 ssize_t void_total;
89 ssize_t drag_total;
90 } Census;
91
92 static Census *censuses = NULL;
93 static uint32_t n_censuses = 0;
94
95 #ifdef PROFILING
96 static void aggregateCensusInfo( void );
97 #endif
98
99 static void dumpCensus( Census *census );
100
101 static rtsBool closureSatisfiesConstraints( const StgClosure* p );
102
103 /* ----------------------------------------------------------------------------
104 * Find the "closure identity", which is a unique pointer representing
105 * the band to which this closure's heap space is attributed in the
106 * heap profile.
107 * ------------------------------------------------------------------------- */
108 static const void *
109 closureIdentity( const StgClosure *p )
110 {
111 switch (RtsFlags.ProfFlags.doHeapProfile) {
112
113 #ifdef PROFILING
114 case HEAP_BY_CCS:
115 return p->header.prof.ccs;
116 case HEAP_BY_MOD:
117 return p->header.prof.ccs->cc->module;
118 case HEAP_BY_DESCR:
119 return GET_PROF_DESC(get_itbl(p));
120 case HEAP_BY_TYPE:
121 return GET_PROF_TYPE(get_itbl(p));
122 case HEAP_BY_RETAINER:
123 // AFAIK, the only closures in the heap which might not have a
124 // valid retainer set are DEAD_WEAK closures.
125 if (isRetainerSetFieldValid(p))
126 return retainerSetOf(p);
127 else
128 return NULL;
129
130 #else
131 case HEAP_BY_CLOSURE_TYPE:
132 {
133 const StgInfoTable *info;
134 info = get_itbl(p);
135 switch (info->type) {
136 case CONSTR:
137 case CONSTR_1_0:
138 case CONSTR_0_1:
139 case CONSTR_2_0:
140 case CONSTR_1_1:
141 case CONSTR_0_2:
142 case CONSTR_STATIC:
143 case CONSTR_NOCAF_STATIC:
144 return GET_CON_DESC(itbl_to_con_itbl(info));
145 default:
146 return closure_type_names[info->type];
147 }
148 }
149
150 #endif
151 default:
152 barf("closureIdentity");
153 }
154 }
155
156 /* --------------------------------------------------------------------------
157 * Profiling type predicates
158 * ----------------------------------------------------------------------- */
159 #ifdef PROFILING
160 STATIC_INLINE rtsBool
161 doingLDVProfiling( void )
162 {
163 return (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV
164 || RtsFlags.ProfFlags.bioSelector != NULL);
165 }
166
167 rtsBool
168 doingRetainerProfiling( void )
169 {
170 return (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER
171 || RtsFlags.ProfFlags.retainerSelector != NULL);
172 }
173 #endif /* PROFILING */
174
175 // Precesses a closure 'c' being destroyed whose size is 'size'.
176 // Make sure that LDV_recordDead() is not invoked on 'inherently used' closures
177 // such as TSO; they should not be involved in computing dragNew or voidNew.
178 //
179 // Even though era is checked in both LdvCensusForDead() and
180 // LdvCensusKillAll(), we still need to make sure that era is > 0 because
181 // LDV_recordDead() may be called from elsewhere in the runtime system. E.g.,
182 // when a thunk is replaced by an indirection object.
183
184 #ifdef PROFILING
185 void
186 LDV_recordDead( const StgClosure *c, uint32_t size )
187 {
188 const void *id;
189 uint32_t t;
190 counter *ctr;
191
192 if (era > 0 && closureSatisfiesConstraints(c)) {
193 size -= sizeofW(StgProfHeader);
194 ASSERT(LDVW(c) != 0);
195 if ((LDVW((c)) & LDV_STATE_MASK) == LDV_STATE_CREATE) {
196 t = (LDVW((c)) & LDV_CREATE_MASK) >> LDV_SHIFT;
197 if (t < era) {
198 if (RtsFlags.ProfFlags.bioSelector == NULL) {
199 censuses[t].void_total += size;
200 censuses[era].void_total -= size;
201 ASSERT(censuses[t].void_total < censuses[t].not_used);
202 } else {
203 id = closureIdentity(c);
204 ctr = lookupHashTable(censuses[t].hash, (StgWord)id);
205 ASSERT( ctr != NULL );
206 ctr->c.ldv.void_total += size;
207 ctr = lookupHashTable(censuses[era].hash, (StgWord)id);
208 if (ctr == NULL) {
209 ctr = arenaAlloc(censuses[era].arena, sizeof(counter));
210 initLDVCtr(ctr);
211 insertHashTable(censuses[era].hash, (StgWord)id, ctr);
212 ctr->identity = id;
213 ctr->next = censuses[era].ctrs;
214 censuses[era].ctrs = ctr;
215 }
216 ctr->c.ldv.void_total -= size;
217 }
218 }
219 } else {
220 t = LDVW((c)) & LDV_LAST_MASK;
221 if (t + 1 < era) {
222 if (RtsFlags.ProfFlags.bioSelector == NULL) {
223 censuses[t+1].drag_total += size;
224 censuses[era].drag_total -= size;
225 } else {
226 const void *id;
227 id = closureIdentity(c);
228 ctr = lookupHashTable(censuses[t+1].hash, (StgWord)id);
229 ASSERT( ctr != NULL );
230 ctr->c.ldv.drag_total += size;
231 ctr = lookupHashTable(censuses[era].hash, (StgWord)id);
232 if (ctr == NULL) {
233 ctr = arenaAlloc(censuses[era].arena, sizeof(counter));
234 initLDVCtr(ctr);
235 insertHashTable(censuses[era].hash, (StgWord)id, ctr);
236 ctr->identity = id;
237 ctr->next = censuses[era].ctrs;
238 censuses[era].ctrs = ctr;
239 }
240 ctr->c.ldv.drag_total -= size;
241 }
242 }
243 }
244 }
245 }
246 #endif
247
248 /* --------------------------------------------------------------------------
249 * Initialize censuses[era];
250 * ----------------------------------------------------------------------- */
251
252 STATIC_INLINE void
253 initEra(Census *census)
254 {
255 census->hash = allocHashTable();
256 census->ctrs = NULL;
257 census->arena = newArena();
258
259 census->not_used = 0;
260 census->used = 0;
261 census->prim = 0;
262 census->void_total = 0;
263 census->drag_total = 0;
264 }
265
266 STATIC_INLINE void
267 freeEra(Census *census)
268 {
269 arenaFree(census->arena);
270 freeHashTable(census->hash, NULL);
271 }
272
273 /* --------------------------------------------------------------------------
274 * Increases era by 1 and initialize census[era].
275 * Reallocates gi[] and increases its size if needed.
276 * ----------------------------------------------------------------------- */
277
278 static void
279 nextEra( void )
280 {
281 #ifdef PROFILING
282 if (doingLDVProfiling()) {
283 era++;
284
285 if (era == max_era) {
286 errorBelch("Maximum number of censuses reached.");
287 if (rtsConfig.rts_opts_suggestions == rtsTrue) {
288 if (rtsConfig.rts_opts_enabled == RtsOptsAll) {
289 errorBelch("Use `+RTS -i' to reduce censuses.");
290 } else {
291 errorBelch("Relink with -rtsopts and "
292 "use `+RTS -i' to reduce censuses.");
293 }
294 }
295 stg_exit(EXIT_FAILURE);
296 }
297
298 if (era == n_censuses) {
299 n_censuses *= 2;
300 censuses = stgReallocBytes(censuses, sizeof(Census) * n_censuses,
301 "nextEra");
302 }
303 }
304 #endif /* PROFILING */
305
306 initEra( &censuses[era] );
307 }
308
309 /* ----------------------------------------------------------------------------
310 * Heap profiling by info table
311 * ------------------------------------------------------------------------- */
312
313 #if !defined(PROFILING)
314 FILE *hp_file;
315 static char *hp_filename;
316
317 void freeProfiling (void)
318 {
319 }
320
321 void initProfiling (void)
322 {
323 char *prog;
324
325 prog = stgMallocBytes(strlen(prog_name) + 1, "initProfiling2");
326 strcpy(prog, prog_name);
327 #ifdef mingw32_HOST_OS
328 // on Windows, drop the .exe suffix if there is one
329 {
330 char *suff;
331 suff = strrchr(prog,'.');
332 if (suff != NULL && !strcmp(suff,".exe")) {
333 *suff = '\0';
334 }
335 }
336 #endif
337
338 if (RtsFlags.ProfFlags.doHeapProfile) {
339 /* Initialise the log file name */
340 hp_filename = stgMallocBytes(strlen(prog) + 6, "hpFileName");
341 sprintf(hp_filename, "%s.hp", prog);
342
343 /* open the log file */
344 if ((hp_file = fopen(hp_filename, "w")) == NULL) {
345 debugBelch("Can't open profiling report file %s\n",
346 hp_filename);
347 RtsFlags.ProfFlags.doHeapProfile = 0;
348 stgFree(prog);
349 return;
350 }
351 }
352
353 stgFree(prog);
354
355 initHeapProfiling();
356 }
357
358 void endProfiling( void )
359 {
360 endHeapProfiling();
361 }
362 #endif /* !PROFILING */
363
364 static void
365 printSample(rtsBool beginSample, StgDouble sampleValue)
366 {
367 fprintf(hp_file, "%s %f\n",
368 (beginSample ? "BEGIN_SAMPLE" : "END_SAMPLE"),
369 sampleValue);
370 if (!beginSample) {
371 fflush(hp_file);
372 }
373 }
374
375 static void
376 dumpCostCentresToEventLog(void)
377 {
378 #ifdef PROFILING
379 CostCentre *cc, *next;
380 for (cc = CC_LIST; cc != NULL; cc = next) {
381 next = cc->link;
382 traceHeapProfCostCentre(cc->ccID, cc->label, cc->module,
383 cc->srcloc, cc->is_caf);
384 }
385 #endif
386 }
387
388 /* --------------------------------------------------------------------------
389 * Initialize the heap profilier
390 * ----------------------------------------------------------------------- */
391 uint32_t
392 initHeapProfiling(void)
393 {
394 if (! RtsFlags.ProfFlags.doHeapProfile) {
395 return 0;
396 }
397
398 #ifdef PROFILING
399 if (doingLDVProfiling() && doingRetainerProfiling()) {
400 errorBelch("cannot mix -hb and -hr");
401 stg_exit(EXIT_FAILURE);
402 }
403 #ifdef THREADED_RTS
404 // See Trac #12019.
405 if (doingLDVProfiling() && RtsFlags.ParFlags.nCapabilities > 1) {
406 errorBelch("-hb cannot be used with multiple capabilities");
407 stg_exit(EXIT_FAILURE);
408 }
409 #endif
410 #endif
411
412 // we only count eras if we're doing LDV profiling. Otherwise era
413 // is fixed at zero.
414 #ifdef PROFILING
415 if (doingLDVProfiling()) {
416 era = 1;
417 } else
418 #endif
419 {
420 era = 0;
421 }
422
423 // max_era = 2^LDV_SHIFT
424 max_era = 1 << LDV_SHIFT;
425
426 n_censuses = 32;
427 censuses = stgMallocBytes(sizeof(Census) * n_censuses, "initHeapProfiling");
428
429 initEra( &censuses[era] );
430
431 /* initProfilingLogFile(); */
432 fprintf(hp_file, "JOB \"%s", prog_name);
433
434 #ifdef PROFILING
435 {
436 int count;
437 for(count = 1; count < prog_argc; count++)
438 fprintf(hp_file, " %s", prog_argv[count]);
439 fprintf(hp_file, " +RTS");
440 for(count = 0; count < rts_argc; count++)
441 fprintf(hp_file, " %s", rts_argv[count]);
442 }
443 #endif /* PROFILING */
444
445 fprintf(hp_file, "\"\n" );
446
447 fprintf(hp_file, "DATE \"%s\"\n", time_str());
448
449 fprintf(hp_file, "SAMPLE_UNIT \"seconds\"\n");
450 fprintf(hp_file, "VALUE_UNIT \"bytes\"\n");
451
452 printSample(rtsTrue, 0);
453 printSample(rtsFalse, 0);
454
455 #ifdef PROFILING
456 if (doingRetainerProfiling()) {
457 initRetainerProfiling();
458 }
459 #endif
460
461 traceHeapProfBegin(0);
462 dumpCostCentresToEventLog();
463
464 return 0;
465 }
466
467 void
468 endHeapProfiling(void)
469 {
470 StgDouble seconds;
471
472 if (! RtsFlags.ProfFlags.doHeapProfile) {
473 return;
474 }
475
476 #ifdef PROFILING
477 if (doingRetainerProfiling()) {
478 endRetainerProfiling();
479 }
480 #endif
481
482 #ifdef PROFILING
483 if (doingLDVProfiling()) {
484 uint32_t t;
485 LdvCensusKillAll();
486 aggregateCensusInfo();
487 for (t = 1; t < era; t++) {
488 dumpCensus( &censuses[t] );
489 }
490 }
491 #endif
492
493 #ifdef PROFILING
494 if (doingLDVProfiling()) {
495 uint32_t t;
496 if (RtsFlags.ProfFlags.bioSelector != NULL) {
497 for (t = 1; t <= era; t++) {
498 freeEra( &censuses[t] );
499 }
500 } else {
501 freeEra( &censuses[era] );
502 }
503 } else {
504 freeEra( &censuses[0] );
505 }
506 #else
507 freeEra( &censuses[0] );
508 #endif
509
510 stgFree(censuses);
511
512 seconds = mut_user_time();
513 printSample(rtsTrue, seconds);
514 printSample(rtsFalse, seconds);
515 fclose(hp_file);
516 }
517
518
519
520 #ifdef PROFILING
521 static size_t
522 buf_append(char *p, const char *q, char *end)
523 {
524 int m;
525
526 for (m = 0; p < end; p++, q++, m++) {
527 *p = *q;
528 if (*q == '\0') { break; }
529 }
530 return m;
531 }
532
533 static void
534 fprint_ccs(FILE *fp, CostCentreStack *ccs, uint32_t max_length)
535 {
536 char buf[max_length+1], *p, *buf_end;
537
538 // MAIN on its own gets printed as "MAIN", otherwise we ignore MAIN.
539 if (ccs == CCS_MAIN) {
540 fprintf(fp, "MAIN");
541 return;
542 }
543
544 fprintf(fp, "(%" FMT_Int ")", ccs->ccsID);
545
546 p = buf;
547 buf_end = buf + max_length + 1;
548
549 // keep printing components of the stack until we run out of space
550 // in the buffer. If we run out of space, end with "...".
551 for (; ccs != NULL && ccs != CCS_MAIN; ccs = ccs->prevStack) {
552
553 // CAF cost centres print as M.CAF, but we leave the module
554 // name out of all the others to save space.
555 if (!strcmp(ccs->cc->label,"CAF")) {
556 p += buf_append(p, ccs->cc->module, buf_end);
557 p += buf_append(p, ".CAF", buf_end);
558 } else {
559 p += buf_append(p, ccs->cc->label, buf_end);
560 if (ccs->prevStack != NULL && ccs->prevStack != CCS_MAIN) {
561 p += buf_append(p, "/", buf_end);
562 }
563 }
564
565 if (p >= buf_end) {
566 sprintf(buf+max_length-4, "...");
567 break;
568 }
569 }
570 fprintf(fp, "%s", buf);
571 }
572
573 rtsBool
574 strMatchesSelector( const char* str, const char* sel )
575 {
576 const char* p;
577 // debugBelch("str_matches_selector %s %s\n", str, sel);
578 while (1) {
579 // Compare str against wherever we've got to in sel.
580 p = str;
581 while (*p != '\0' && *sel != ',' && *sel != '\0' && *p == *sel) {
582 p++; sel++;
583 }
584 // Match if all of str used and have reached the end of a sel fragment.
585 if (*p == '\0' && (*sel == ',' || *sel == '\0'))
586 return rtsTrue;
587
588 // No match. Advance sel to the start of the next elem.
589 while (*sel != ',' && *sel != '\0') sel++;
590 if (*sel == ',') sel++;
591
592 /* Run out of sel ?? */
593 if (*sel == '\0') return rtsFalse;
594 }
595 }
596
597 #endif /* PROFILING */
598
599 /* -----------------------------------------------------------------------------
600 * Figure out whether a closure should be counted in this census, by
601 * testing against all the specified constraints.
602 * -------------------------------------------------------------------------- */
603 static rtsBool
604 closureSatisfiesConstraints( const StgClosure* p )
605 {
606 #if !defined(PROFILING)
607 (void)p; /* keep gcc -Wall happy */
608 return rtsTrue;
609 #else
610 rtsBool b;
611
612 // The CCS has a selected field to indicate whether this closure is
613 // deselected by not being mentioned in the module, CC, or CCS
614 // selectors.
615 if (!p->header.prof.ccs->selected) {
616 return rtsFalse;
617 }
618
619 if (RtsFlags.ProfFlags.descrSelector) {
620 b = strMatchesSelector( (GET_PROF_DESC(get_itbl((StgClosure *)p))),
621 RtsFlags.ProfFlags.descrSelector );
622 if (!b) return rtsFalse;
623 }
624 if (RtsFlags.ProfFlags.typeSelector) {
625 b = strMatchesSelector( (GET_PROF_TYPE(get_itbl((StgClosure *)p))),
626 RtsFlags.ProfFlags.typeSelector );
627 if (!b) return rtsFalse;
628 }
629 if (RtsFlags.ProfFlags.retainerSelector) {
630 RetainerSet *rs;
631 uint32_t i;
632 // We must check that the retainer set is valid here. One
633 // reason it might not be valid is if this closure is a
634 // a newly deceased weak pointer (i.e. a DEAD_WEAK), since
635 // these aren't reached by the retainer profiler's traversal.
636 if (isRetainerSetFieldValid((StgClosure *)p)) {
637 rs = retainerSetOf((StgClosure *)p);
638 if (rs != NULL) {
639 for (i = 0; i < rs->num; i++) {
640 b = strMatchesSelector( rs->element[i]->cc->label,
641 RtsFlags.ProfFlags.retainerSelector );
642 if (b) return rtsTrue;
643 }
644 }
645 }
646 return rtsFalse;
647 }
648 return rtsTrue;
649 #endif /* PROFILING */
650 }
651
652 /* -----------------------------------------------------------------------------
653 * Aggregate the heap census info for biographical profiling
654 * -------------------------------------------------------------------------- */
655 #ifdef PROFILING
656 static void
657 aggregateCensusInfo( void )
658 {
659 HashTable *acc;
660 uint32_t t;
661 counter *c, *d, *ctrs;
662 Arena *arena;
663
664 if (!doingLDVProfiling()) return;
665
666 // Aggregate the LDV counters when displaying by biography.
667 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV) {
668 long void_total, drag_total;
669
670 // Now we compute void_total and drag_total for each census
671 // After the program has finished, the void_total field of
672 // each census contains the count of words that were *created*
673 // in this era and were eventually void. Conversely, if a
674 // void closure was destroyed in this era, it will be
675 // represented by a negative count of words in void_total.
676 //
677 // To get the count of live words that are void at each
678 // census, just propagate the void_total count forwards:
679
680 void_total = 0;
681 drag_total = 0;
682 for (t = 1; t < era; t++) { // note: start at 1, not 0
683 void_total += censuses[t].void_total;
684 drag_total += censuses[t].drag_total;
685 censuses[t].void_total = void_total;
686 censuses[t].drag_total = drag_total;
687
688 ASSERT( censuses[t].void_total <= censuses[t].not_used );
689 // should be true because: void_total is the count of
690 // live words that are void at this census, which *must*
691 // be less than the number of live words that have not
692 // been used yet.
693
694 ASSERT( censuses[t].drag_total <= censuses[t].used );
695 // similar reasoning as above.
696 }
697
698 return;
699 }
700
701 // otherwise... we're doing a heap profile that is restricted to
702 // some combination of lag, drag, void or use. We've kept all the
703 // census info for all censuses so far, but we still need to
704 // aggregate the counters forwards.
705
706 arena = newArena();
707 acc = allocHashTable();
708 ctrs = NULL;
709
710 for (t = 1; t < era; t++) {
711
712 // first look through all the counters we're aggregating
713 for (c = ctrs; c != NULL; c = c->next) {
714 // if one of the totals is non-zero, then this closure
715 // type must be present in the heap at this census time...
716 d = lookupHashTable(censuses[t].hash, (StgWord)c->identity);
717
718 if (d == NULL) {
719 // if this closure identity isn't present in the
720 // census for this time period, then our running
721 // totals *must* be zero.
722 ASSERT(c->c.ldv.void_total == 0 && c->c.ldv.drag_total == 0);
723
724 // debugCCS(c->identity);
725 // debugBelch(" census=%d void_total=%d drag_total=%d\n",
726 // t, c->c.ldv.void_total, c->c.ldv.drag_total);
727 } else {
728 d->c.ldv.void_total += c->c.ldv.void_total;
729 d->c.ldv.drag_total += c->c.ldv.drag_total;
730 c->c.ldv.void_total = d->c.ldv.void_total;
731 c->c.ldv.drag_total = d->c.ldv.drag_total;
732
733 ASSERT( c->c.ldv.void_total >= 0 );
734 ASSERT( c->c.ldv.drag_total >= 0 );
735 }
736 }
737
738 // now look through the counters in this census to find new ones
739 for (c = censuses[t].ctrs; c != NULL; c = c->next) {
740 d = lookupHashTable(acc, (StgWord)c->identity);
741 if (d == NULL) {
742 d = arenaAlloc( arena, sizeof(counter) );
743 initLDVCtr(d);
744 insertHashTable( acc, (StgWord)c->identity, d );
745 d->identity = c->identity;
746 d->next = ctrs;
747 ctrs = d;
748 d->c.ldv.void_total = c->c.ldv.void_total;
749 d->c.ldv.drag_total = c->c.ldv.drag_total;
750 }
751 ASSERT( c->c.ldv.void_total >= 0 );
752 ASSERT( c->c.ldv.drag_total >= 0 );
753 }
754 }
755
756 freeHashTable(acc, NULL);
757 arenaFree(arena);
758 }
759 #endif
760
761 /* -----------------------------------------------------------------------------
762 * Print out the results of a heap census.
763 * -------------------------------------------------------------------------- */
764 static void
765 dumpCensus( Census *census )
766 {
767 counter *ctr;
768 ssize_t count;
769
770 printSample(rtsTrue, census->time);
771 traceHeapProfSampleBegin(era);
772
773 #ifdef PROFILING
774 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV) {
775 fprintf(hp_file, "VOID\t%lu\n",
776 (unsigned long)(census->void_total) * sizeof(W_));
777 fprintf(hp_file, "LAG\t%lu\n",
778 (unsigned long)(census->not_used - census->void_total) * sizeof(W_));
779 fprintf(hp_file, "USE\t%lu\n",
780 (unsigned long)(census->used - census->drag_total) * sizeof(W_));
781 fprintf(hp_file, "INHERENT_USE\t%lu\n",
782 (unsigned long)(census->prim) * sizeof(W_));
783 fprintf(hp_file, "DRAG\t%lu\n",
784 (unsigned long)(census->drag_total) * sizeof(W_));
785 printSample(rtsFalse, census->time);
786 return;
787 }
788 #endif
789
790 for (ctr = census->ctrs; ctr != NULL; ctr = ctr->next) {
791
792 #ifdef PROFILING
793 if (RtsFlags.ProfFlags.bioSelector != NULL) {
794 count = 0;
795 if (strMatchesSelector("lag", RtsFlags.ProfFlags.bioSelector))
796 count += ctr->c.ldv.not_used - ctr->c.ldv.void_total;
797 if (strMatchesSelector("drag", RtsFlags.ProfFlags.bioSelector))
798 count += ctr->c.ldv.drag_total;
799 if (strMatchesSelector("void", RtsFlags.ProfFlags.bioSelector))
800 count += ctr->c.ldv.void_total;
801 if (strMatchesSelector("use", RtsFlags.ProfFlags.bioSelector))
802 count += ctr->c.ldv.used - ctr->c.ldv.drag_total;
803 } else
804 #endif
805 {
806 count = ctr->c.resid;
807 }
808
809 ASSERT( count >= 0 );
810
811 if (count == 0) continue;
812
813 #if !defined(PROFILING)
814 switch (RtsFlags.ProfFlags.doHeapProfile) {
815 case HEAP_BY_CLOSURE_TYPE:
816 fprintf(hp_file, "%s", (char *)ctr->identity);
817 traceHeapProfSampleString(0, (char *)ctr->identity,
818 count * sizeof(W_));
819 break;
820 }
821 #endif
822
823 #ifdef PROFILING
824 switch (RtsFlags.ProfFlags.doHeapProfile) {
825 case HEAP_BY_CCS:
826 fprint_ccs(hp_file, (CostCentreStack *)ctr->identity,
827 RtsFlags.ProfFlags.ccsLength);
828 traceHeapProfSampleCostCentre(0, (CostCentreStack *)ctr->identity,
829 count * sizeof(W_));
830 break;
831 case HEAP_BY_MOD:
832 case HEAP_BY_DESCR:
833 case HEAP_BY_TYPE:
834 fprintf(hp_file, "%s", (char *)ctr->identity);
835 traceHeapProfSampleString(0, (char *)ctr->identity,
836 count * sizeof(W_));
837 break;
838 case HEAP_BY_RETAINER:
839 {
840 RetainerSet *rs = (RetainerSet *)ctr->identity;
841
842 // it might be the distinguished retainer set rs_MANY:
843 if (rs == &rs_MANY) {
844 fprintf(hp_file, "MANY");
845 break;
846 }
847
848 // Mark this retainer set by negating its id, because it
849 // has appeared in at least one census. We print the
850 // values of all such retainer sets into the log file at
851 // the end. A retainer set may exist but not feature in
852 // any censuses if it arose as the intermediate retainer
853 // set for some closure during retainer set calculation.
854 if (rs->id > 0)
855 rs->id = -(rs->id);
856
857 // report in the unit of bytes: * sizeof(StgWord)
858 printRetainerSetShort(hp_file, rs, RtsFlags.ProfFlags.ccsLength);
859 break;
860 }
861 default:
862 barf("dumpCensus; doHeapProfile");
863 }
864 #endif
865
866 fprintf(hp_file, "\t%" FMT_Word "\n", (W_)count * sizeof(W_));
867 }
868
869 printSample(rtsFalse, census->time);
870 }
871
872
873 static void heapProfObject(Census *census, StgClosure *p, size_t size,
874 rtsBool prim
875 #ifndef PROFILING
876 STG_UNUSED
877 #endif
878 )
879 {
880 const void *identity;
881 size_t real_size;
882 counter *ctr;
883
884 identity = NULL;
885
886 #ifdef PROFILING
887 // subtract the profiling overhead
888 real_size = size - sizeofW(StgProfHeader);
889 #else
890 real_size = size;
891 #endif
892
893 if (closureSatisfiesConstraints((StgClosure*)p)) {
894 #ifdef PROFILING
895 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV) {
896 if (prim)
897 census->prim += real_size;
898 else if ((LDVW(p) & LDV_STATE_MASK) == LDV_STATE_CREATE)
899 census->not_used += real_size;
900 else
901 census->used += real_size;
902 } else
903 #endif
904 {
905 identity = closureIdentity((StgClosure *)p);
906
907 if (identity != NULL) {
908 ctr = lookupHashTable(census->hash, (StgWord)identity);
909 if (ctr != NULL) {
910 #ifdef PROFILING
911 if (RtsFlags.ProfFlags.bioSelector != NULL) {
912 if (prim)
913 ctr->c.ldv.prim += real_size;
914 else if ((LDVW(p) & LDV_STATE_MASK) == LDV_STATE_CREATE)
915 ctr->c.ldv.not_used += real_size;
916 else
917 ctr->c.ldv.used += real_size;
918 } else
919 #endif
920 {
921 ctr->c.resid += real_size;
922 }
923 } else {
924 ctr = arenaAlloc( census->arena, sizeof(counter) );
925 initLDVCtr(ctr);
926 insertHashTable( census->hash, (StgWord)identity, ctr );
927 ctr->identity = identity;
928 ctr->next = census->ctrs;
929 census->ctrs = ctr;
930
931 #ifdef PROFILING
932 if (RtsFlags.ProfFlags.bioSelector != NULL) {
933 if (prim)
934 ctr->c.ldv.prim = real_size;
935 else if ((LDVW(p) & LDV_STATE_MASK) == LDV_STATE_CREATE)
936 ctr->c.ldv.not_used = real_size;
937 else
938 ctr->c.ldv.used = real_size;
939 } else
940 #endif
941 {
942 ctr->c.resid = real_size;
943 }
944 }
945 }
946 }
947 }
948 }
949
950 // Compact objects require special handling code because they
951 // are not stored consecutively in memory (rather, each object
952 // is a list of objects), and that would break the while loop
953 // below. But we know that each block holds at most one object
954 // so we don't need the loop.
955 //
956 // See Note [Compact Normal Forms] for details.
957 static void
958 heapCensusCompactList(Census *census, bdescr *bd)
959 {
960 for (; bd != NULL; bd = bd->link) {
961 StgCompactNFDataBlock *block = (StgCompactNFDataBlock*)bd->start;
962 StgCompactNFData *str = block->owner;
963 heapProfObject(census, (StgClosure*)str,
964 compact_nfdata_full_sizeW(str), rtsTrue);
965 }
966 }
967
968 /* -----------------------------------------------------------------------------
969 * Code to perform a heap census.
970 * -------------------------------------------------------------------------- */
971 static void
972 heapCensusChain( Census *census, bdescr *bd )
973 {
974 StgPtr p;
975 const StgInfoTable *info;
976 size_t size;
977 rtsBool prim;
978
979 for (; bd != NULL; bd = bd->link) {
980
981 // HACK: pretend a pinned block is just one big ARR_WORDS
982 // owned by CCS_PINNED. These blocks can be full of holes due
983 // to alignment constraints so we can't traverse the memory
984 // and do a proper census.
985 if (bd->flags & BF_PINNED) {
986 StgClosure arr;
987 SET_HDR(&arr, &stg_ARR_WORDS_info, CCS_PINNED);
988 heapProfObject(census, &arr, bd->blocks * BLOCK_SIZE_W, rtsTrue);
989 continue;
990 }
991
992 p = bd->start;
993
994 // When we shrink a large ARR_WORDS, we do not adjust the free pointer
995 // of the associated block descriptor, thus introducing slop at the end
996 // of the object. This slop remains after GC, violating the assumption
997 // of the loop below that all slop has been eliminated (#11627).
998 // Consequently, we handle large ARR_WORDS objects as a special case.
999 if (bd->flags & BF_LARGE
1000 && get_itbl((StgClosure *)p)->type == ARR_WORDS) {
1001 size = arr_words_sizeW((StgArrBytes *)p);
1002 prim = rtsTrue;
1003 heapProfObject(census, (StgClosure *)p, size, prim);
1004 continue;
1005 }
1006
1007 while (p < bd->free) {
1008 info = get_itbl((const StgClosure *)p);
1009 prim = rtsFalse;
1010
1011 switch (info->type) {
1012
1013 case THUNK:
1014 size = thunk_sizeW_fromITBL(info);
1015 break;
1016
1017 case THUNK_1_1:
1018 case THUNK_0_2:
1019 case THUNK_2_0:
1020 size = sizeofW(StgThunkHeader) + 2;
1021 break;
1022
1023 case THUNK_1_0:
1024 case THUNK_0_1:
1025 case THUNK_SELECTOR:
1026 size = sizeofW(StgThunkHeader) + 1;
1027 break;
1028
1029 case CONSTR:
1030 case FUN:
1031 case BLACKHOLE:
1032 case BLOCKING_QUEUE:
1033 case FUN_1_0:
1034 case FUN_0_1:
1035 case FUN_1_1:
1036 case FUN_0_2:
1037 case FUN_2_0:
1038 case CONSTR_1_0:
1039 case CONSTR_0_1:
1040 case CONSTR_1_1:
1041 case CONSTR_0_2:
1042 case CONSTR_2_0:
1043 size = sizeW_fromITBL(info);
1044 break;
1045
1046 case IND:
1047 // Special case/Delicate Hack: INDs don't normally
1048 // appear, since we're doing this heap census right
1049 // after GC. However, GarbageCollect() also does
1050 // resurrectThreads(), which can update some
1051 // blackholes when it calls raiseAsync() on the
1052 // resurrected threads. So we know that any IND will
1053 // be the size of a BLACKHOLE.
1054 size = BLACKHOLE_sizeW();
1055 break;
1056
1057 case BCO:
1058 prim = rtsTrue;
1059 size = bco_sizeW((StgBCO *)p);
1060 break;
1061
1062 case MVAR_CLEAN:
1063 case MVAR_DIRTY:
1064 case TVAR:
1065 case WEAK:
1066 case PRIM:
1067 case MUT_PRIM:
1068 case MUT_VAR_CLEAN:
1069 case MUT_VAR_DIRTY:
1070 prim = rtsTrue;
1071 size = sizeW_fromITBL(info);
1072 break;
1073
1074 case AP:
1075 size = ap_sizeW((StgAP *)p);
1076 break;
1077
1078 case PAP:
1079 size = pap_sizeW((StgPAP *)p);
1080 break;
1081
1082 case AP_STACK:
1083 size = ap_stack_sizeW((StgAP_STACK *)p);
1084 break;
1085
1086 case ARR_WORDS:
1087 prim = rtsTrue;
1088 size = arr_words_sizeW((StgArrBytes*)p);
1089 break;
1090
1091 case MUT_ARR_PTRS_CLEAN:
1092 case MUT_ARR_PTRS_DIRTY:
1093 case MUT_ARR_PTRS_FROZEN:
1094 case MUT_ARR_PTRS_FROZEN0:
1095 prim = rtsTrue;
1096 size = mut_arr_ptrs_sizeW((StgMutArrPtrs *)p);
1097 break;
1098
1099 case SMALL_MUT_ARR_PTRS_CLEAN:
1100 case SMALL_MUT_ARR_PTRS_DIRTY:
1101 case SMALL_MUT_ARR_PTRS_FROZEN:
1102 case SMALL_MUT_ARR_PTRS_FROZEN0:
1103 prim = rtsTrue;
1104 size = small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs *)p);
1105 break;
1106
1107 case TSO:
1108 prim = rtsTrue;
1109 #ifdef PROFILING
1110 if (RtsFlags.ProfFlags.includeTSOs) {
1111 size = sizeofW(StgTSO);
1112 break;
1113 } else {
1114 // Skip this TSO and move on to the next object
1115 p += sizeofW(StgTSO);
1116 continue;
1117 }
1118 #else
1119 size = sizeofW(StgTSO);
1120 break;
1121 #endif
1122
1123 case STACK:
1124 prim = rtsTrue;
1125 #ifdef PROFILING
1126 if (RtsFlags.ProfFlags.includeTSOs) {
1127 size = stack_sizeW((StgStack*)p);
1128 break;
1129 } else {
1130 // Skip this TSO and move on to the next object
1131 p += stack_sizeW((StgStack*)p);
1132 continue;
1133 }
1134 #else
1135 size = stack_sizeW((StgStack*)p);
1136 break;
1137 #endif
1138
1139 case TREC_CHUNK:
1140 prim = rtsTrue;
1141 size = sizeofW(StgTRecChunk);
1142 break;
1143
1144 case COMPACT_NFDATA:
1145 barf("heapCensus, found compact object in the wrong list");
1146 break;
1147
1148 default:
1149 barf("heapCensus, unknown object: %d", info->type);
1150 }
1151
1152 heapProfObject(census,(StgClosure*)p,size,prim);
1153
1154 p += size;
1155 }
1156 }
1157 }
1158
1159 void heapCensus (Time t)
1160 {
1161 uint32_t g, n;
1162 Census *census;
1163 gen_workspace *ws;
1164
1165 census = &censuses[era];
1166 census->time = mut_user_time_until(t);
1167
1168 // calculate retainer sets if necessary
1169 #ifdef PROFILING
1170 if (doingRetainerProfiling()) {
1171 retainerProfile();
1172 }
1173 #endif
1174
1175 #ifdef PROFILING
1176 stat_startHeapCensus();
1177 #endif
1178
1179 // Traverse the heap, collecting the census info
1180 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
1181 heapCensusChain( census, generations[g].blocks );
1182 // Are we interested in large objects? might be
1183 // confusing to include the stack in a heap profile.
1184 heapCensusChain( census, generations[g].large_objects );
1185 heapCensusCompactList ( census, generations[g].compact_objects );
1186
1187 for (n = 0; n < n_capabilities; n++) {
1188 ws = &gc_threads[n]->gens[g];
1189 heapCensusChain(census, ws->todo_bd);
1190 heapCensusChain(census, ws->part_list);
1191 heapCensusChain(census, ws->scavd_list);
1192 }
1193 }
1194
1195 // dump out the census info
1196 #ifdef PROFILING
1197 // We can't generate any info for LDV profiling until
1198 // the end of the run...
1199 if (!doingLDVProfiling())
1200 dumpCensus( census );
1201 #else
1202 dumpCensus( census );
1203 #endif
1204
1205
1206 // free our storage, unless we're keeping all the census info for
1207 // future restriction by biography.
1208 #ifdef PROFILING
1209 if (RtsFlags.ProfFlags.bioSelector == NULL)
1210 {
1211 freeEra(census);
1212 census->hash = NULL;
1213 census->arena = NULL;
1214 }
1215 #endif
1216
1217 // we're into the next time period now
1218 nextEra();
1219
1220 #ifdef PROFILING
1221 stat_endHeapCensus();
1222 #endif
1223 }