Use https links in user-facing startup and error messages
[ghc.git] / rts / ProfHeap.c
1 /* ----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 1998-2003
4 *
5 * Support for heap profiling
6 *
7 * --------------------------------------------------------------------------*/
8
9 #include "PosixSource.h"
10 #include "Rts.h"
11
12 #include "Capability.h"
13 #include "RtsFlags.h"
14 #include "RtsUtils.h"
15 #include "Profiling.h"
16 #include "ProfHeap.h"
17 #include "Stats.h"
18 #include "Hash.h"
19 #include "RetainerProfile.h"
20 #include "LdvProfile.h"
21 #include "Arena.h"
22 #include "Printer.h"
23 #include "Trace.h"
24 #include "sm/GCThread.h"
25
26 #include <fs_rts.h>
27 #include <string.h>
28
29 /* -----------------------------------------------------------------------------
30 * era stores the current time period. It is the same as the
31 * number of censuses that have been performed.
32 *
33 * RESTRICTION:
34 * era must be no longer than LDV_SHIFT (15 or 30) bits.
35 * Invariants:
36 * era is initialized to 1 in initHeapProfiling().
37 *
38 * max_era is initialized to 2^LDV_SHIFT in initHeapProfiling().
39 * When era reaches max_era, the profiling stops because a closure can
40 * store only up to (max_era - 1) as its creation or last use time.
41 * -------------------------------------------------------------------------- */
42 unsigned int era;
43 static uint32_t max_era;
44
45 /* -----------------------------------------------------------------------------
46 * Counters
47 *
48 * For most heap profiles each closure identity gets a simple count
49 * of live words in the heap at each census. However, if we're
50 * selecting by biography, then we have to keep the various
51 * lag/drag/void counters for each identity.
52 * -------------------------------------------------------------------------- */
53 typedef struct _counter {
54 const void *identity;
55 union {
56 ssize_t resid;
57 struct {
58 // Total sizes of:
59 ssize_t prim; // 'inherently used' closures
60 ssize_t not_used; // 'never used' closures
61 ssize_t used; // 'used at least once' closures
62 ssize_t void_total; // 'destroyed without being used' closures
63 ssize_t drag_total; // 'used at least once and waiting to die'
64 } ldv;
65 } c;
66 struct _counter *next;
67 } counter;
68
69 STATIC_INLINE void
70 initLDVCtr( counter *ctr )
71 {
72 ctr->c.ldv.prim = 0;
73 ctr->c.ldv.not_used = 0;
74 ctr->c.ldv.used = 0;
75 ctr->c.ldv.void_total = 0;
76 ctr->c.ldv.drag_total = 0;
77 }
78
79 typedef struct {
80 double time; // the time in MUT time when the census is made
81 HashTable * hash;
82 counter * ctrs;
83 Arena * arena;
84
85 // for LDV profiling, when just displaying by LDV
86 ssize_t prim;
87 ssize_t not_used;
88 ssize_t used;
89 ssize_t void_total;
90 ssize_t drag_total;
91 } Census;
92
93 static Census *censuses = NULL;
94 static uint32_t n_censuses = 0;
95
96 #if defined(PROFILING)
97 static void aggregateCensusInfo( void );
98 #endif
99
100 static void dumpCensus( Census *census );
101
102 static bool closureSatisfiesConstraints( const StgClosure* p );
103
104 /* ----------------------------------------------------------------------------
105 * Find the "closure identity", which is a unique pointer representing
106 * the band to which this closure's heap space is attributed in the
107 * heap profile.
108 * ------------------------------------------------------------------------- */
109 static const void *
110 closureIdentity( const StgClosure *p )
111 {
112 switch (RtsFlags.ProfFlags.doHeapProfile) {
113
114 #if defined(PROFILING)
115 case HEAP_BY_CCS:
116 return p->header.prof.ccs;
117 case HEAP_BY_MOD:
118 return p->header.prof.ccs->cc->module;
119 case HEAP_BY_DESCR:
120 return GET_PROF_DESC(get_itbl(p));
121 case HEAP_BY_TYPE:
122 return GET_PROF_TYPE(get_itbl(p));
123 case HEAP_BY_RETAINER:
124 // AFAIK, the only closures in the heap which might not have a
125 // valid retainer set are DEAD_WEAK closures.
126 if (isRetainerSetFieldValid(p))
127 return retainerSetOf(p);
128 else
129 return NULL;
130 #endif
131
132 case HEAP_BY_CLOSURE_TYPE:
133 {
134 const StgInfoTable *info;
135 info = get_itbl(p);
136 switch (info->type) {
137 case CONSTR:
138 case CONSTR_1_0:
139 case CONSTR_0_1:
140 case CONSTR_2_0:
141 case CONSTR_1_1:
142 case CONSTR_0_2:
143 case CONSTR_NOCAF:
144 return GET_CON_DESC(itbl_to_con_itbl(info));
145 default:
146 return closure_type_names[info->type];
147 }
148 }
149
150 default:
151 barf("closureIdentity");
152 }
153 }
154
155 /* --------------------------------------------------------------------------
156 * Profiling type predicates
157 * ----------------------------------------------------------------------- */
158 #if defined(PROFILING)
159 STATIC_INLINE bool
160 doingLDVProfiling( void )
161 {
162 return (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV
163 || RtsFlags.ProfFlags.bioSelector != NULL);
164 }
165
166 bool
167 doingRetainerProfiling( void )
168 {
169 return (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER
170 || RtsFlags.ProfFlags.retainerSelector != NULL);
171 }
172 #endif /* PROFILING */
173
174 // Processes a closure 'c' being destroyed whose size is 'size'.
175 // Make sure that LDV_recordDead() is not invoked on 'inherently used' closures
176 // such as TSO; they should not be involved in computing dragNew or voidNew.
177 //
178 // Even though era is checked in both LdvCensusForDead() and
179 // LdvCensusKillAll(), we still need to make sure that era is > 0 because
180 // LDV_recordDead() may be called from elsewhere in the runtime system. E.g.,
181 // when a thunk is replaced by an indirection object.
182
183 #if defined(PROFILING)
184 void
185 LDV_recordDead( const StgClosure *c, uint32_t size )
186 {
187 const void *id;
188 uint32_t t;
189 counter *ctr;
190
191 if (era > 0 && closureSatisfiesConstraints(c)) {
192 size -= sizeofW(StgProfHeader);
193 ASSERT(LDVW(c) != 0);
194 if ((LDVW((c)) & LDV_STATE_MASK) == LDV_STATE_CREATE) {
195 t = (LDVW((c)) & LDV_CREATE_MASK) >> LDV_SHIFT;
196 if (t < era) {
197 if (RtsFlags.ProfFlags.bioSelector == NULL) {
198 censuses[t].void_total += size;
199 censuses[era].void_total -= size;
200 ASSERT(censuses[t].void_total < censuses[t].not_used);
201 } else {
202 id = closureIdentity(c);
203 ctr = lookupHashTable(censuses[t].hash, (StgWord)id);
204 ASSERT( ctr != NULL );
205 ctr->c.ldv.void_total += size;
206 ctr = lookupHashTable(censuses[era].hash, (StgWord)id);
207 if (ctr == NULL) {
208 ctr = arenaAlloc(censuses[era].arena, sizeof(counter));
209 initLDVCtr(ctr);
210 insertHashTable(censuses[era].hash, (StgWord)id, ctr);
211 ctr->identity = id;
212 ctr->next = censuses[era].ctrs;
213 censuses[era].ctrs = ctr;
214 }
215 ctr->c.ldv.void_total -= size;
216 }
217 }
218 } else {
219 t = LDVW((c)) & LDV_LAST_MASK;
220 if (t + 1 < era) {
221 if (RtsFlags.ProfFlags.bioSelector == NULL) {
222 censuses[t+1].drag_total += size;
223 censuses[era].drag_total -= size;
224 } else {
225 const void *id;
226 id = closureIdentity(c);
227 ctr = lookupHashTable(censuses[t+1].hash, (StgWord)id);
228 ASSERT( ctr != NULL );
229 ctr->c.ldv.drag_total += size;
230 ctr = lookupHashTable(censuses[era].hash, (StgWord)id);
231 if (ctr == NULL) {
232 ctr = arenaAlloc(censuses[era].arena, sizeof(counter));
233 initLDVCtr(ctr);
234 insertHashTable(censuses[era].hash, (StgWord)id, ctr);
235 ctr->identity = id;
236 ctr->next = censuses[era].ctrs;
237 censuses[era].ctrs = ctr;
238 }
239 ctr->c.ldv.drag_total -= size;
240 }
241 }
242 }
243 }
244 }
245 #endif
246
247 /* --------------------------------------------------------------------------
248 * Initialize censuses[era];
249 * ----------------------------------------------------------------------- */
250
251 STATIC_INLINE void
252 initEra(Census *census)
253 {
254 census->hash = allocHashTable();
255 census->ctrs = NULL;
256 census->arena = newArena();
257
258 census->not_used = 0;
259 census->used = 0;
260 census->prim = 0;
261 census->void_total = 0;
262 census->drag_total = 0;
263 }
264
265 STATIC_INLINE void
266 freeEra(Census *census)
267 {
268 arenaFree(census->arena);
269 freeHashTable(census->hash, NULL);
270 }
271
272 /* --------------------------------------------------------------------------
273 * Increases era by 1 and initialize census[era].
274 * Reallocates gi[] and increases its size if needed.
275 * ----------------------------------------------------------------------- */
276
277 static void
278 nextEra( void )
279 {
280 #if defined(PROFILING)
281 if (doingLDVProfiling()) {
282 era++;
283
284 if (era == max_era) {
285 errorBelch("Maximum number of censuses reached.");
286 if (rtsConfig.rts_opts_suggestions == true) {
287 if (rtsConfig.rts_opts_enabled == RtsOptsAll) {
288 errorBelch("Use `+RTS -i' to reduce censuses.");
289 } else {
290 errorBelch("Relink with -rtsopts and "
291 "use `+RTS -i' to reduce censuses.");
292 }
293 }
294 stg_exit(EXIT_FAILURE);
295 }
296
297 if (era == n_censuses) {
298 n_censuses *= 2;
299 censuses = stgReallocBytes(censuses, sizeof(Census) * n_censuses,
300 "nextEra");
301 }
302 }
303 #endif /* PROFILING */
304
305 initEra( &censuses[era] );
306 }
307
308 /* ----------------------------------------------------------------------------
309 * Heap profiling by info table
310 * ------------------------------------------------------------------------- */
311
312 #if !defined(PROFILING)
313 FILE *hp_file;
314 static char *hp_filename;
315
316 void freeProfiling (void)
317 {
318 }
319
320 void initProfiling (void)
321 {
322 char *prog;
323
324 prog = stgMallocBytes(strlen(prog_name) + 1, "initProfiling2");
325 strcpy(prog, prog_name);
326 #if defined(mingw32_HOST_OS)
327 // on Windows, drop the .exe suffix if there is one
328 {
329 char *suff;
330 suff = strrchr(prog,'.');
331 if (suff != NULL && !strcmp(suff,".exe")) {
332 *suff = '\0';
333 }
334 }
335 #endif
336
337 if (RtsFlags.ProfFlags.doHeapProfile) {
338 /* Initialise the log file name */
339 hp_filename = stgMallocBytes(strlen(prog) + 6, "hpFileName");
340 sprintf(hp_filename, "%s.hp", prog);
341
342 /* open the log file */
343 if ((hp_file = __rts_fopen(hp_filename, "w")) == NULL) {
344 debugBelch("Can't open profiling report file %s\n",
345 hp_filename);
346 RtsFlags.ProfFlags.doHeapProfile = 0;
347 stgFree(prog);
348 return;
349 }
350 }
351
352 stgFree(prog);
353
354 initHeapProfiling();
355 }
356
357 void endProfiling( void )
358 {
359 endHeapProfiling();
360 }
361 #endif /* !PROFILING */
362
363 static void
364 printEscapedString(const char* string)
365 {
366 for (const char* p = string; *p != '\0'; ++p) {
367 if (*p == '\"') {
368 // Escape every " as ""
369 fputc('"', hp_file);
370 }
371 fputc(*p, hp_file);
372 }
373 }
374
375 static void
376 printSample(bool beginSample, StgDouble sampleValue)
377 {
378 fprintf(hp_file, "%s %f\n",
379 (beginSample ? "BEGIN_SAMPLE" : "END_SAMPLE"),
380 sampleValue);
381 if (!beginSample) {
382 fflush(hp_file);
383 }
384 }
385
386 static void
387 dumpCostCentresToEventLog(void)
388 {
389 #if defined(PROFILING)
390 CostCentre *cc, *next;
391 for (cc = CC_LIST; cc != NULL; cc = next) {
392 next = cc->link;
393 traceHeapProfCostCentre(cc->ccID, cc->label, cc->module,
394 cc->srcloc, cc->is_caf);
395 }
396 #endif
397 }
398
399 /* --------------------------------------------------------------------------
400 * Initialize the heap profilier
401 * ----------------------------------------------------------------------- */
402 uint32_t
403 initHeapProfiling(void)
404 {
405 if (! RtsFlags.ProfFlags.doHeapProfile) {
406 return 0;
407 }
408
409 #if defined(PROFILING)
410 if (doingLDVProfiling() && doingRetainerProfiling()) {
411 errorBelch("cannot mix -hb and -hr");
412 stg_exit(EXIT_FAILURE);
413 }
414 #if defined(THREADED_RTS)
415 // See Trac #12019.
416 if (doingLDVProfiling() && RtsFlags.ParFlags.nCapabilities > 1) {
417 errorBelch("-hb cannot be used with multiple capabilities");
418 stg_exit(EXIT_FAILURE);
419 }
420 #endif
421 #endif
422
423 // we only count eras if we're doing LDV profiling. Otherwise era
424 // is fixed at zero.
425 #if defined(PROFILING)
426 if (doingLDVProfiling()) {
427 era = 1;
428 } else
429 #endif
430 {
431 era = 0;
432 }
433
434 // max_era = 2^LDV_SHIFT
435 max_era = 1 << LDV_SHIFT;
436
437 n_censuses = 32;
438 censuses = stgMallocBytes(sizeof(Census) * n_censuses, "initHeapProfiling");
439
440 initEra( &censuses[era] );
441
442 /* initProfilingLogFile(); */
443 fprintf(hp_file, "JOB \"");
444 printEscapedString(prog_name);
445
446 #if defined(PROFILING)
447 for (int i = 1; i < prog_argc; ++i) {
448 fputc(' ', hp_file);
449 printEscapedString(prog_argv[i]);
450 }
451 fprintf(hp_file, " +RTS");
452 for (int i = 0; i < rts_argc; ++i) {
453 fputc(' ', hp_file);
454 printEscapedString(rts_argv[i]);
455 }
456 #endif /* PROFILING */
457
458 fprintf(hp_file, "\"\n" );
459
460 fprintf(hp_file, "DATE \"%s\"\n", time_str());
461
462 fprintf(hp_file, "SAMPLE_UNIT \"seconds\"\n");
463 fprintf(hp_file, "VALUE_UNIT \"bytes\"\n");
464
465 printSample(true, 0);
466 printSample(false, 0);
467
468 #if defined(PROFILING)
469 if (doingRetainerProfiling()) {
470 initRetainerProfiling();
471 }
472 #endif
473
474 traceHeapProfBegin(0);
475 dumpCostCentresToEventLog();
476
477 return 0;
478 }
479
480 void
481 endHeapProfiling(void)
482 {
483 StgDouble seconds;
484
485 if (! RtsFlags.ProfFlags.doHeapProfile) {
486 return;
487 }
488
489 #if defined(PROFILING)
490 if (doingRetainerProfiling()) {
491 endRetainerProfiling();
492 }
493 #endif
494
495 #if defined(PROFILING)
496 if (doingLDVProfiling()) {
497 uint32_t t;
498 LdvCensusKillAll();
499 aggregateCensusInfo();
500 for (t = 1; t < era; t++) {
501 dumpCensus( &censuses[t] );
502 }
503 }
504 #endif
505
506 #if defined(PROFILING)
507 if (doingLDVProfiling()) {
508 uint32_t t;
509 if (RtsFlags.ProfFlags.bioSelector != NULL) {
510 for (t = 1; t <= era; t++) {
511 freeEra( &censuses[t] );
512 }
513 } else {
514 freeEra( &censuses[era] );
515 }
516 } else {
517 freeEra( &censuses[0] );
518 }
519 #else
520 freeEra( &censuses[0] );
521 #endif
522
523 stgFree(censuses);
524
525 seconds = mut_user_time();
526 printSample(true, seconds);
527 printSample(false, seconds);
528 fclose(hp_file);
529 }
530
531
532
533 #if defined(PROFILING)
534 static size_t
535 buf_append(char *p, const char *q, char *end)
536 {
537 int m;
538
539 for (m = 0; p < end; p++, q++, m++) {
540 *p = *q;
541 if (*q == '\0') { break; }
542 }
543 return m;
544 }
545
546 static void
547 fprint_ccs(FILE *fp, CostCentreStack *ccs, uint32_t max_length)
548 {
549 char buf[max_length+1], *p, *buf_end;
550
551 // MAIN on its own gets printed as "MAIN", otherwise we ignore MAIN.
552 if (ccs == CCS_MAIN) {
553 fprintf(fp, "MAIN");
554 return;
555 }
556
557 fprintf(fp, "(%" FMT_Int ")", ccs->ccsID);
558
559 p = buf;
560 buf_end = buf + max_length + 1;
561
562 // keep printing components of the stack until we run out of space
563 // in the buffer. If we run out of space, end with "...".
564 for (; ccs != NULL && ccs != CCS_MAIN; ccs = ccs->prevStack) {
565
566 // CAF cost centres print as M.CAF, but we leave the module
567 // name out of all the others to save space.
568 if (!strcmp(ccs->cc->label,"CAF")) {
569 p += buf_append(p, ccs->cc->module, buf_end);
570 p += buf_append(p, ".CAF", buf_end);
571 } else {
572 p += buf_append(p, ccs->cc->label, buf_end);
573 if (ccs->prevStack != NULL && ccs->prevStack != CCS_MAIN) {
574 p += buf_append(p, "/", buf_end);
575 }
576 }
577
578 if (p >= buf_end) {
579 sprintf(buf+max_length-4, "...");
580 break;
581 }
582 }
583 fprintf(fp, "%s", buf);
584 }
585
586 bool
587 strMatchesSelector( const char* str, const char* sel )
588 {
589 const char* p;
590 // debugBelch("str_matches_selector %s %s\n", str, sel);
591 while (1) {
592 // Compare str against wherever we've got to in sel.
593 p = str;
594 while (*p != '\0' && *sel != ',' && *sel != '\0' && *p == *sel) {
595 p++; sel++;
596 }
597 // Match if all of str used and have reached the end of a sel fragment.
598 if (*p == '\0' && (*sel == ',' || *sel == '\0'))
599 return true;
600
601 // No match. Advance sel to the start of the next elem.
602 while (*sel != ',' && *sel != '\0') sel++;
603 if (*sel == ',') sel++;
604
605 /* Run out of sel ?? */
606 if (*sel == '\0') return false;
607 }
608 }
609
610 #endif /* PROFILING */
611
612 /* -----------------------------------------------------------------------------
613 * Figure out whether a closure should be counted in this census, by
614 * testing against all the specified constraints.
615 * -------------------------------------------------------------------------- */
616 static bool
617 closureSatisfiesConstraints( const StgClosure* p )
618 {
619 #if !defined(PROFILING)
620 (void)p; /* keep gcc -Wall happy */
621 return true;
622 #else
623 bool b;
624
625 // The CCS has a selected field to indicate whether this closure is
626 // deselected by not being mentioned in the module, CC, or CCS
627 // selectors.
628 if (!p->header.prof.ccs->selected) {
629 return false;
630 }
631
632 if (RtsFlags.ProfFlags.descrSelector) {
633 b = strMatchesSelector( (GET_PROF_DESC(get_itbl((StgClosure *)p))),
634 RtsFlags.ProfFlags.descrSelector );
635 if (!b) return false;
636 }
637 if (RtsFlags.ProfFlags.typeSelector) {
638 b = strMatchesSelector( (GET_PROF_TYPE(get_itbl((StgClosure *)p))),
639 RtsFlags.ProfFlags.typeSelector );
640 if (!b) return false;
641 }
642 if (RtsFlags.ProfFlags.retainerSelector) {
643 RetainerSet *rs;
644 uint32_t i;
645 // We must check that the retainer set is valid here. One
646 // reason it might not be valid is if this closure is a
647 // a newly deceased weak pointer (i.e. a DEAD_WEAK), since
648 // these aren't reached by the retainer profiler's traversal.
649 if (isRetainerSetFieldValid((StgClosure *)p)) {
650 rs = retainerSetOf((StgClosure *)p);
651 if (rs != NULL) {
652 for (i = 0; i < rs->num; i++) {
653 b = strMatchesSelector( rs->element[i]->cc->label,
654 RtsFlags.ProfFlags.retainerSelector );
655 if (b) return true;
656 }
657 }
658 }
659 return false;
660 }
661 return true;
662 #endif /* PROFILING */
663 }
664
665 /* -----------------------------------------------------------------------------
666 * Aggregate the heap census info for biographical profiling
667 * -------------------------------------------------------------------------- */
668 #if defined(PROFILING)
669 static void
670 aggregateCensusInfo( void )
671 {
672 HashTable *acc;
673 uint32_t t;
674 counter *c, *d, *ctrs;
675 Arena *arena;
676
677 if (!doingLDVProfiling()) return;
678
679 // Aggregate the LDV counters when displaying by biography.
680 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV) {
681 long void_total, drag_total;
682
683 // Now we compute void_total and drag_total for each census
684 // After the program has finished, the void_total field of
685 // each census contains the count of words that were *created*
686 // in this era and were eventually void. Conversely, if a
687 // void closure was destroyed in this era, it will be
688 // represented by a negative count of words in void_total.
689 //
690 // To get the count of live words that are void at each
691 // census, just propagate the void_total count forwards:
692
693 void_total = 0;
694 drag_total = 0;
695 for (t = 1; t < era; t++) { // note: start at 1, not 0
696 void_total += censuses[t].void_total;
697 drag_total += censuses[t].drag_total;
698 censuses[t].void_total = void_total;
699 censuses[t].drag_total = drag_total;
700
701 ASSERT( censuses[t].void_total <= censuses[t].not_used );
702 // should be true because: void_total is the count of
703 // live words that are void at this census, which *must*
704 // be less than the number of live words that have not
705 // been used yet.
706
707 ASSERT( censuses[t].drag_total <= censuses[t].used );
708 // similar reasoning as above.
709 }
710
711 return;
712 }
713
714 // otherwise... we're doing a heap profile that is restricted to
715 // some combination of lag, drag, void or use. We've kept all the
716 // census info for all censuses so far, but we still need to
717 // aggregate the counters forwards.
718
719 arena = newArena();
720 acc = allocHashTable();
721 ctrs = NULL;
722
723 for (t = 1; t < era; t++) {
724
725 // first look through all the counters we're aggregating
726 for (c = ctrs; c != NULL; c = c->next) {
727 // if one of the totals is non-zero, then this closure
728 // type must be present in the heap at this census time...
729 d = lookupHashTable(censuses[t].hash, (StgWord)c->identity);
730
731 if (d == NULL) {
732 // if this closure identity isn't present in the
733 // census for this time period, then our running
734 // totals *must* be zero.
735 ASSERT(c->c.ldv.void_total == 0 && c->c.ldv.drag_total == 0);
736
737 // debugCCS(c->identity);
738 // debugBelch(" census=%d void_total=%d drag_total=%d\n",
739 // t, c->c.ldv.void_total, c->c.ldv.drag_total);
740 } else {
741 d->c.ldv.void_total += c->c.ldv.void_total;
742 d->c.ldv.drag_total += c->c.ldv.drag_total;
743 c->c.ldv.void_total = d->c.ldv.void_total;
744 c->c.ldv.drag_total = d->c.ldv.drag_total;
745
746 ASSERT( c->c.ldv.void_total >= 0 );
747 ASSERT( c->c.ldv.drag_total >= 0 );
748 }
749 }
750
751 // now look through the counters in this census to find new ones
752 for (c = censuses[t].ctrs; c != NULL; c = c->next) {
753 d = lookupHashTable(acc, (StgWord)c->identity);
754 if (d == NULL) {
755 d = arenaAlloc( arena, sizeof(counter) );
756 initLDVCtr(d);
757 insertHashTable( acc, (StgWord)c->identity, d );
758 d->identity = c->identity;
759 d->next = ctrs;
760 ctrs = d;
761 d->c.ldv.void_total = c->c.ldv.void_total;
762 d->c.ldv.drag_total = c->c.ldv.drag_total;
763 }
764 ASSERT( c->c.ldv.void_total >= 0 );
765 ASSERT( c->c.ldv.drag_total >= 0 );
766 }
767 }
768
769 freeHashTable(acc, NULL);
770 arenaFree(arena);
771 }
772 #endif
773
774 /* -----------------------------------------------------------------------------
775 * Print out the results of a heap census.
776 * -------------------------------------------------------------------------- */
777 static void
778 dumpCensus( Census *census )
779 {
780 counter *ctr;
781 ssize_t count;
782
783 printSample(true, census->time);
784 traceHeapProfSampleBegin(era);
785
786 #if defined(PROFILING)
787 /* change typecast to uint64_t to remove
788 * print formatting warning. See #12636 */
789 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV) {
790 fprintf(hp_file, "VOID\t%" FMT_Word64 "\n",
791 (uint64_t)(census->void_total *
792 sizeof(W_)));
793 fprintf(hp_file, "LAG\t%" FMT_Word64 "\n",
794 (uint64_t)((census->not_used - census->void_total) *
795 sizeof(W_)));
796 fprintf(hp_file, "USE\t%" FMT_Word64 "\n",
797 (uint64_t)((census->used - census->drag_total) *
798 sizeof(W_)));
799 fprintf(hp_file, "INHERENT_USE\t%" FMT_Word64 "\n",
800 (uint64_t)(census->prim * sizeof(W_)));
801 fprintf(hp_file, "DRAG\t%" FMT_Word64 "\n",
802 (uint64_t)(census->drag_total * sizeof(W_)));
803 printSample(false, census->time);
804 return;
805 }
806 #endif
807
808 for (ctr = census->ctrs; ctr != NULL; ctr = ctr->next) {
809
810 #if defined(PROFILING)
811 if (RtsFlags.ProfFlags.bioSelector != NULL) {
812 count = 0;
813 if (strMatchesSelector("lag", RtsFlags.ProfFlags.bioSelector))
814 count += ctr->c.ldv.not_used - ctr->c.ldv.void_total;
815 if (strMatchesSelector("drag", RtsFlags.ProfFlags.bioSelector))
816 count += ctr->c.ldv.drag_total;
817 if (strMatchesSelector("void", RtsFlags.ProfFlags.bioSelector))
818 count += ctr->c.ldv.void_total;
819 if (strMatchesSelector("use", RtsFlags.ProfFlags.bioSelector))
820 count += ctr->c.ldv.used - ctr->c.ldv.drag_total;
821 } else
822 #endif
823 {
824 count = ctr->c.resid;
825 }
826
827 ASSERT( count >= 0 );
828
829 if (count == 0) continue;
830
831 switch (RtsFlags.ProfFlags.doHeapProfile) {
832 case HEAP_BY_CLOSURE_TYPE:
833 fprintf(hp_file, "%s", (char *)ctr->identity);
834 traceHeapProfSampleString(0, (char *)ctr->identity,
835 count * sizeof(W_));
836 break;
837 }
838
839 #if defined(PROFILING)
840 switch (RtsFlags.ProfFlags.doHeapProfile) {
841 case HEAP_BY_CCS:
842 fprint_ccs(hp_file, (CostCentreStack *)ctr->identity,
843 RtsFlags.ProfFlags.ccsLength);
844 traceHeapProfSampleCostCentre(0, (CostCentreStack *)ctr->identity,
845 count * sizeof(W_));
846 break;
847 case HEAP_BY_MOD:
848 case HEAP_BY_DESCR:
849 case HEAP_BY_TYPE:
850 fprintf(hp_file, "%s", (char *)ctr->identity);
851 traceHeapProfSampleString(0, (char *)ctr->identity,
852 count * sizeof(W_));
853 break;
854 case HEAP_BY_RETAINER:
855 {
856 RetainerSet *rs = (RetainerSet *)ctr->identity;
857
858 // it might be the distinguished retainer set rs_MANY:
859 if (rs == &rs_MANY) {
860 fprintf(hp_file, "MANY");
861 break;
862 }
863
864 // Mark this retainer set by negating its id, because it
865 // has appeared in at least one census. We print the
866 // values of all such retainer sets into the log file at
867 // the end. A retainer set may exist but not feature in
868 // any censuses if it arose as the intermediate retainer
869 // set for some closure during retainer set calculation.
870 if (rs->id > 0)
871 rs->id = -(rs->id);
872
873 // report in the unit of bytes: * sizeof(StgWord)
874 printRetainerSetShort(hp_file, rs, RtsFlags.ProfFlags.ccsLength);
875 break;
876 }
877 default:
878 barf("dumpCensus; doHeapProfile");
879 }
880 #endif
881
882 fprintf(hp_file, "\t%" FMT_Word "\n", (W_)count * sizeof(W_));
883 }
884
885 printSample(false, census->time);
886 }
887
888
889 static void heapProfObject(Census *census, StgClosure *p, size_t size,
890 bool prim
891 #if !defined(PROFILING)
892 STG_UNUSED
893 #endif
894 )
895 {
896 const void *identity;
897 size_t real_size;
898 counter *ctr;
899
900 identity = NULL;
901
902 #if defined(PROFILING)
903 // subtract the profiling overhead
904 real_size = size - sizeofW(StgProfHeader);
905 #else
906 real_size = size;
907 #endif
908
909 if (closureSatisfiesConstraints((StgClosure*)p)) {
910 #if defined(PROFILING)
911 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV) {
912 if (prim)
913 census->prim += real_size;
914 else if ((LDVW(p) & LDV_STATE_MASK) == LDV_STATE_CREATE)
915 census->not_used += real_size;
916 else
917 census->used += real_size;
918 } else
919 #endif
920 {
921 identity = closureIdentity((StgClosure *)p);
922
923 if (identity != NULL) {
924 ctr = lookupHashTable(census->hash, (StgWord)identity);
925 if (ctr != NULL) {
926 #if defined(PROFILING)
927 if (RtsFlags.ProfFlags.bioSelector != NULL) {
928 if (prim)
929 ctr->c.ldv.prim += real_size;
930 else if ((LDVW(p) & LDV_STATE_MASK) == LDV_STATE_CREATE)
931 ctr->c.ldv.not_used += real_size;
932 else
933 ctr->c.ldv.used += real_size;
934 } else
935 #endif
936 {
937 ctr->c.resid += real_size;
938 }
939 } else {
940 ctr = arenaAlloc( census->arena, sizeof(counter) );
941 initLDVCtr(ctr);
942 insertHashTable( census->hash, (StgWord)identity, ctr );
943 ctr->identity = identity;
944 ctr->next = census->ctrs;
945 census->ctrs = ctr;
946
947 #if defined(PROFILING)
948 if (RtsFlags.ProfFlags.bioSelector != NULL) {
949 if (prim)
950 ctr->c.ldv.prim = real_size;
951 else if ((LDVW(p) & LDV_STATE_MASK) == LDV_STATE_CREATE)
952 ctr->c.ldv.not_used = real_size;
953 else
954 ctr->c.ldv.used = real_size;
955 } else
956 #endif
957 {
958 ctr->c.resid = real_size;
959 }
960 }
961 }
962 }
963 }
964 }
965
966 // Compact objects require special handling code because they
967 // are not stored consecutively in memory (rather, each object
968 // is a list of objects), and that would break the while loop
969 // below. But we know that each block holds at most one object
970 // so we don't need the loop.
971 //
972 // See Note [Compact Normal Forms] for details.
973 static void
974 heapCensusCompactList(Census *census, bdescr *bd)
975 {
976 for (; bd != NULL; bd = bd->link) {
977 StgCompactNFDataBlock *block = (StgCompactNFDataBlock*)bd->start;
978 StgCompactNFData *str = block->owner;
979 heapProfObject(census, (StgClosure*)str,
980 compact_nfdata_full_sizeW(str), true);
981 }
982 }
983
984 /* -----------------------------------------------------------------------------
985 * Code to perform a heap census.
986 * -------------------------------------------------------------------------- */
987 static void
988 heapCensusChain( Census *census, bdescr *bd )
989 {
990 StgPtr p;
991 const StgInfoTable *info;
992 size_t size;
993 bool prim;
994
995 for (; bd != NULL; bd = bd->link) {
996
997 // HACK: pretend a pinned block is just one big ARR_WORDS
998 // owned by CCS_PINNED. These blocks can be full of holes due
999 // to alignment constraints so we can't traverse the memory
1000 // and do a proper census.
1001 if (bd->flags & BF_PINNED) {
1002 StgClosure arr;
1003 SET_HDR(&arr, &stg_ARR_WORDS_info, CCS_PINNED);
1004 heapProfObject(census, &arr, bd->blocks * BLOCK_SIZE_W, true);
1005 continue;
1006 }
1007
1008 p = bd->start;
1009
1010 // When we shrink a large ARR_WORDS, we do not adjust the free pointer
1011 // of the associated block descriptor, thus introducing slop at the end
1012 // of the object. This slop remains after GC, violating the assumption
1013 // of the loop below that all slop has been eliminated (#11627).
1014 // Consequently, we handle large ARR_WORDS objects as a special case.
1015 if (bd->flags & BF_LARGE
1016 && get_itbl((StgClosure *)p)->type == ARR_WORDS) {
1017 size = arr_words_sizeW((StgArrBytes *)p);
1018 prim = true;
1019 heapProfObject(census, (StgClosure *)p, size, prim);
1020 continue;
1021 }
1022
1023 while (p < bd->free) {
1024 info = get_itbl((const StgClosure *)p);
1025 prim = false;
1026
1027 switch (info->type) {
1028
1029 case THUNK:
1030 size = thunk_sizeW_fromITBL(info);
1031 break;
1032
1033 case THUNK_1_1:
1034 case THUNK_0_2:
1035 case THUNK_2_0:
1036 size = sizeofW(StgThunkHeader) + 2;
1037 break;
1038
1039 case THUNK_1_0:
1040 case THUNK_0_1:
1041 case THUNK_SELECTOR:
1042 size = sizeofW(StgThunkHeader) + 1;
1043 break;
1044
1045 case FUN:
1046 case BLACKHOLE:
1047 case BLOCKING_QUEUE:
1048 case FUN_1_0:
1049 case FUN_0_1:
1050 case FUN_1_1:
1051 case FUN_0_2:
1052 case FUN_2_0:
1053 case CONSTR:
1054 case CONSTR_NOCAF:
1055 case CONSTR_1_0:
1056 case CONSTR_0_1:
1057 case CONSTR_1_1:
1058 case CONSTR_0_2:
1059 case CONSTR_2_0:
1060 size = sizeW_fromITBL(info);
1061 break;
1062
1063 case IND:
1064 // Special case/Delicate Hack: INDs don't normally
1065 // appear, since we're doing this heap census right
1066 // after GC. However, GarbageCollect() also does
1067 // resurrectThreads(), which can update some
1068 // blackholes when it calls raiseAsync() on the
1069 // resurrected threads. So we know that any IND will
1070 // be the size of a BLACKHOLE.
1071 size = BLACKHOLE_sizeW();
1072 break;
1073
1074 case BCO:
1075 prim = true;
1076 size = bco_sizeW((StgBCO *)p);
1077 break;
1078
1079 case MVAR_CLEAN:
1080 case MVAR_DIRTY:
1081 case TVAR:
1082 case WEAK:
1083 case PRIM:
1084 case MUT_PRIM:
1085 case MUT_VAR_CLEAN:
1086 case MUT_VAR_DIRTY:
1087 prim = true;
1088 size = sizeW_fromITBL(info);
1089 break;
1090
1091 case AP:
1092 size = ap_sizeW((StgAP *)p);
1093 break;
1094
1095 case PAP:
1096 size = pap_sizeW((StgPAP *)p);
1097 break;
1098
1099 case AP_STACK:
1100 size = ap_stack_sizeW((StgAP_STACK *)p);
1101 break;
1102
1103 case ARR_WORDS:
1104 prim = true;
1105 size = arr_words_sizeW((StgArrBytes*)p);
1106 break;
1107
1108 case MUT_ARR_PTRS_CLEAN:
1109 case MUT_ARR_PTRS_DIRTY:
1110 case MUT_ARR_PTRS_FROZEN_CLEAN:
1111 case MUT_ARR_PTRS_FROZEN_DIRTY:
1112 prim = true;
1113 size = mut_arr_ptrs_sizeW((StgMutArrPtrs *)p);
1114 break;
1115
1116 case SMALL_MUT_ARR_PTRS_CLEAN:
1117 case SMALL_MUT_ARR_PTRS_DIRTY:
1118 case SMALL_MUT_ARR_PTRS_FROZEN_CLEAN:
1119 case SMALL_MUT_ARR_PTRS_FROZEN_DIRTY:
1120 prim = true;
1121 size = small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs *)p);
1122 break;
1123
1124 case TSO:
1125 prim = true;
1126 #if defined(PROFILING)
1127 if (RtsFlags.ProfFlags.includeTSOs) {
1128 size = sizeofW(StgTSO);
1129 break;
1130 } else {
1131 // Skip this TSO and move on to the next object
1132 p += sizeofW(StgTSO);
1133 continue;
1134 }
1135 #else
1136 size = sizeofW(StgTSO);
1137 break;
1138 #endif
1139
1140 case STACK:
1141 prim = true;
1142 #if defined(PROFILING)
1143 if (RtsFlags.ProfFlags.includeTSOs) {
1144 size = stack_sizeW((StgStack*)p);
1145 break;
1146 } else {
1147 // Skip this TSO and move on to the next object
1148 p += stack_sizeW((StgStack*)p);
1149 continue;
1150 }
1151 #else
1152 size = stack_sizeW((StgStack*)p);
1153 break;
1154 #endif
1155
1156 case TREC_CHUNK:
1157 prim = true;
1158 size = sizeofW(StgTRecChunk);
1159 break;
1160
1161 case COMPACT_NFDATA:
1162 barf("heapCensus, found compact object in the wrong list");
1163 break;
1164
1165 default:
1166 barf("heapCensus, unknown object: %d", info->type);
1167 }
1168
1169 heapProfObject(census,(StgClosure*)p,size,prim);
1170
1171 p += size;
1172 }
1173 }
1174 }
1175
1176 void heapCensus (Time t)
1177 {
1178 uint32_t g, n;
1179 Census *census;
1180 gen_workspace *ws;
1181
1182 census = &censuses[era];
1183 census->time = mut_user_time_until(t);
1184
1185 // calculate retainer sets if necessary
1186 #if defined(PROFILING)
1187 if (doingRetainerProfiling()) {
1188 retainerProfile();
1189 }
1190 #endif
1191
1192 #if defined(PROFILING)
1193 stat_startHeapCensus();
1194 #endif
1195
1196 // Traverse the heap, collecting the census info
1197 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
1198 heapCensusChain( census, generations[g].blocks );
1199 // Are we interested in large objects? might be
1200 // confusing to include the stack in a heap profile.
1201 heapCensusChain( census, generations[g].large_objects );
1202 heapCensusCompactList ( census, generations[g].compact_objects );
1203
1204 for (n = 0; n < n_capabilities; n++) {
1205 ws = &gc_threads[n]->gens[g];
1206 heapCensusChain(census, ws->todo_bd);
1207 heapCensusChain(census, ws->part_list);
1208 heapCensusChain(census, ws->scavd_list);
1209 }
1210 }
1211
1212 // dump out the census info
1213 #if defined(PROFILING)
1214 // We can't generate any info for LDV profiling until
1215 // the end of the run...
1216 if (!doingLDVProfiling())
1217 dumpCensus( census );
1218 #else
1219 dumpCensus( census );
1220 #endif
1221
1222
1223 // free our storage, unless we're keeping all the census info for
1224 // future restriction by biography.
1225 #if defined(PROFILING)
1226 if (RtsFlags.ProfFlags.bioSelector == NULL)
1227 {
1228 freeEra(census);
1229 census->hash = NULL;
1230 census->arena = NULL;
1231 }
1232 #endif
1233
1234 // we're into the next time period now
1235 nextEra();
1236
1237 #if defined(PROFILING)
1238 stat_endHeapCensus();
1239 #endif
1240 }