RTS tidyup sweep, first phase
[ghc.git] / rts / ProfHeap.c
1 /* ----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 1998-2003
4 *
5 * Support for heap profiling
6 *
7 * --------------------------------------------------------------------------*/
8
9 #include "PosixSource.h"
10 #include "Rts.h"
11
12 #include "RtsUtils.h"
13 #include "Profiling.h"
14 #include "ProfHeap.h"
15 #include "Stats.h"
16 #include "Hash.h"
17 #include "RetainerProfile.h"
18 #include "Arena.h"
19 #include "Printer.h"
20
21 #include <string.h>
22
23 /* -----------------------------------------------------------------------------
24 * era stores the current time period. It is the same as the
25 * number of censuses that have been performed.
26 *
27 * RESTRICTION:
28 * era must be no longer than LDV_SHIFT (15 or 30) bits.
29 * Invariants:
30 * era is initialized to 1 in initHeapProfiling().
31 *
32 * max_era is initialized to 2^LDV_SHIFT in initHeapProfiling().
33 * When era reaches max_era, the profiling stops because a closure can
34 * store only up to (max_era - 1) as its creation or last use time.
35 * -------------------------------------------------------------------------- */
36 unsigned int era;
37 static nat max_era;
38
39 /* -----------------------------------------------------------------------------
40 * Counters
41 *
42 * For most heap profiles each closure identity gets a simple count
43 * of live words in the heap at each census. However, if we're
44 * selecting by biography, then we have to keep the various
45 * lag/drag/void counters for each identity.
46 * -------------------------------------------------------------------------- */
47 typedef struct _counter {
48 void *identity;
49 union {
50 nat resid;
51 struct {
52 int prim; // total size of 'inherently used' closures
53 int not_used; // total size of 'never used' closures
54 int used; // total size of 'used at least once' closures
55 int void_total; // current total size of 'destroyed without being used' closures
56 int drag_total; // current total size of 'used at least once and waiting to die'
57 } ldv;
58 } c;
59 struct _counter *next;
60 } counter;
61
62 STATIC_INLINE void
63 initLDVCtr( counter *ctr )
64 {
65 ctr->c.ldv.prim = 0;
66 ctr->c.ldv.not_used = 0;
67 ctr->c.ldv.used = 0;
68 ctr->c.ldv.void_total = 0;
69 ctr->c.ldv.drag_total = 0;
70 }
71
72 typedef struct {
73 double time; // the time in MUT time when the census is made
74 HashTable * hash;
75 counter * ctrs;
76 Arena * arena;
77
78 // for LDV profiling, when just displaying by LDV
79 int prim;
80 int not_used;
81 int used;
82 int void_total;
83 int drag_total;
84 } Census;
85
86 static Census *censuses = NULL;
87 static nat n_censuses = 0;
88
89 #ifdef PROFILING
90 static void aggregateCensusInfo( void );
91 #endif
92
93 static void dumpCensus( Census *census );
94
95 static rtsBool closureSatisfiesConstraints( StgClosure* p );
96
97 /* ----------------------------------------------------------------------------
98 Closure Type Profiling;
99 ------------------------------------------------------------------------- */
100
101 #ifndef PROFILING
102 static char *type_names[] = {
103 "INVALID_OBJECT",
104 "CONSTR",
105 "CONSTR_1_0",
106 "CONSTR_0_1",
107 "CONSTR_2_0",
108 "CONSTR_1_1",
109 "CONSTR_0_2",
110 "CONSTR_STATIC",
111 "CONSTR_NOCAF_STATIC",
112 "FUN",
113 "FUN_1_0",
114 "FUN_0_1",
115 "FUN_2_0",
116 "FUN_1_1",
117 "FUN_0_2",
118 "FUN_STATIC",
119 "THUNK",
120 "THUNK_1_0",
121 "THUNK_0_1",
122 "THUNK_2_0",
123 "THUNK_1_1",
124 "THUNK_0_2",
125 "THUNK_STATIC",
126 "THUNK_SELECTOR",
127 "BCO",
128 "AP",
129 "PAP",
130 "AP_STACK",
131 "IND",
132 "IND_OLDGEN",
133 "IND_PERM",
134 "IND_OLDGEN_PERM",
135 "IND_STATIC",
136 "RET_BCO",
137 "RET_SMALL",
138 "RET_BIG",
139 "RET_DYN",
140 "RET_FUN",
141 "UPDATE_FRAME",
142 "CATCH_FRAME",
143 "STOP_FRAME",
144 "CAF_BLACKHOLE",
145 "BLACKHOLE",
146 "MVAR_CLEAN",
147 "MVAR_DIRTY",
148 "ARR_WORDS",
149 "MUT_ARR_PTRS_CLEAN",
150 "MUT_ARR_PTRS_DIRTY",
151 "MUT_ARR_PTRS_FROZEN0",
152 "MUT_ARR_PTRS_FROZEN",
153 "MUT_VAR_CLEAN",
154 "MUT_VAR_DIRTY",
155 "WEAK",
156 "STABLE_NAME",
157 "TSO",
158 "BLOCKED_FETCH",
159 "FETCH_ME",
160 "FETCH_ME_BQ",
161 "RBH",
162 "REMOTE_REF",
163 "TVAR_WATCH_QUEUE",
164 "INVARIANT_CHECK_QUEUE",
165 "ATOMIC_INVARIANT",
166 "TVAR",
167 "TREC_CHUNK",
168 "TREC_HEADER",
169 "ATOMICALLY_FRAME",
170 "CATCH_RETRY_FRAME",
171 "CATCH_STM_FRAME",
172 "WHITEHOLE",
173 "N_CLOSURE_TYPES"
174 };
175 #endif
176
177 /* ----------------------------------------------------------------------------
178 * Find the "closure identity", which is a unique pointer reresenting
179 * the band to which this closure's heap space is attributed in the
180 * heap profile.
181 * ------------------------------------------------------------------------- */
182 static void *
183 closureIdentity( StgClosure *p )
184 {
185 switch (RtsFlags.ProfFlags.doHeapProfile) {
186
187 #ifdef PROFILING
188 case HEAP_BY_CCS:
189 return p->header.prof.ccs;
190 case HEAP_BY_MOD:
191 return p->header.prof.ccs->cc->module;
192 case HEAP_BY_DESCR:
193 return GET_PROF_DESC(get_itbl(p));
194 case HEAP_BY_TYPE:
195 return GET_PROF_TYPE(get_itbl(p));
196 case HEAP_BY_RETAINER:
197 // AFAIK, the only closures in the heap which might not have a
198 // valid retainer set are DEAD_WEAK closures.
199 if (isRetainerSetFieldValid(p))
200 return retainerSetOf(p);
201 else
202 return NULL;
203
204 #else
205 case HEAP_BY_CLOSURE_TYPE:
206 {
207 StgInfoTable *info;
208 info = get_itbl(p);
209 switch (info->type) {
210 case CONSTR:
211 case CONSTR_1_0:
212 case CONSTR_0_1:
213 case CONSTR_2_0:
214 case CONSTR_1_1:
215 case CONSTR_0_2:
216 case CONSTR_STATIC:
217 case CONSTR_NOCAF_STATIC:
218 return GET_CON_DESC(itbl_to_con_itbl(info));
219 default:
220 return type_names[info->type];
221 }
222 }
223
224 #endif
225 default:
226 barf("closureIdentity");
227 }
228 }
229
230 /* --------------------------------------------------------------------------
231 * Profiling type predicates
232 * ----------------------------------------------------------------------- */
233 #ifdef PROFILING
234 STATIC_INLINE rtsBool
235 doingLDVProfiling( void )
236 {
237 return (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV
238 || RtsFlags.ProfFlags.bioSelector != NULL);
239 }
240
241 STATIC_INLINE rtsBool
242 doingRetainerProfiling( void )
243 {
244 return (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER
245 || RtsFlags.ProfFlags.retainerSelector != NULL);
246 }
247 #endif /* PROFILING */
248
249 // Precesses a closure 'c' being destroyed whose size is 'size'.
250 // Make sure that LDV_recordDead() is not invoked on 'inherently used' closures
251 // such as TSO; they should not be involved in computing dragNew or voidNew.
252 //
253 // Even though era is checked in both LdvCensusForDead() and
254 // LdvCensusKillAll(), we still need to make sure that era is > 0 because
255 // LDV_recordDead() may be called from elsewhere in the runtime system. E.g.,
256 // when a thunk is replaced by an indirection object.
257
258 #ifdef PROFILING
259 void
260 LDV_recordDead( StgClosure *c, nat size )
261 {
262 void *id;
263 nat t;
264 counter *ctr;
265
266 if (era > 0 && closureSatisfiesConstraints(c)) {
267 size -= sizeofW(StgProfHeader);
268 ASSERT(LDVW(c) != 0);
269 if ((LDVW((c)) & LDV_STATE_MASK) == LDV_STATE_CREATE) {
270 t = (LDVW((c)) & LDV_CREATE_MASK) >> LDV_SHIFT;
271 if (t < era) {
272 if (RtsFlags.ProfFlags.bioSelector == NULL) {
273 censuses[t].void_total += (int)size;
274 censuses[era].void_total -= (int)size;
275 ASSERT(censuses[t].void_total < censuses[t].not_used);
276 } else {
277 id = closureIdentity(c);
278 ctr = lookupHashTable(censuses[t].hash, (StgWord)id);
279 ASSERT( ctr != NULL );
280 ctr->c.ldv.void_total += (int)size;
281 ctr = lookupHashTable(censuses[era].hash, (StgWord)id);
282 if (ctr == NULL) {
283 ctr = arenaAlloc(censuses[era].arena, sizeof(counter));
284 initLDVCtr(ctr);
285 insertHashTable(censuses[era].hash, (StgWord)id, ctr);
286 ctr->identity = id;
287 ctr->next = censuses[era].ctrs;
288 censuses[era].ctrs = ctr;
289 }
290 ctr->c.ldv.void_total -= (int)size;
291 }
292 }
293 } else {
294 t = LDVW((c)) & LDV_LAST_MASK;
295 if (t + 1 < era) {
296 if (RtsFlags.ProfFlags.bioSelector == NULL) {
297 censuses[t+1].drag_total += size;
298 censuses[era].drag_total -= size;
299 } else {
300 void *id;
301 id = closureIdentity(c);
302 ctr = lookupHashTable(censuses[t+1].hash, (StgWord)id);
303 ASSERT( ctr != NULL );
304 ctr->c.ldv.drag_total += (int)size;
305 ctr = lookupHashTable(censuses[era].hash, (StgWord)id);
306 if (ctr == NULL) {
307 ctr = arenaAlloc(censuses[era].arena, sizeof(counter));
308 initLDVCtr(ctr);
309 insertHashTable(censuses[era].hash, (StgWord)id, ctr);
310 ctr->identity = id;
311 ctr->next = censuses[era].ctrs;
312 censuses[era].ctrs = ctr;
313 }
314 ctr->c.ldv.drag_total -= (int)size;
315 }
316 }
317 }
318 }
319 }
320 #endif
321
322 /* --------------------------------------------------------------------------
323 * Initialize censuses[era];
324 * ----------------------------------------------------------------------- */
325
326 STATIC_INLINE void
327 initEra(Census *census)
328 {
329 census->hash = allocHashTable();
330 census->ctrs = NULL;
331 census->arena = newArena();
332
333 census->not_used = 0;
334 census->used = 0;
335 census->prim = 0;
336 census->void_total = 0;
337 census->drag_total = 0;
338 }
339
340 STATIC_INLINE void
341 freeEra(Census *census)
342 {
343 if (RtsFlags.ProfFlags.bioSelector != NULL)
344 // when bioSelector==NULL, these are freed in heapCensus()
345 {
346 arenaFree(census->arena);
347 freeHashTable(census->hash, NULL);
348 }
349 }
350
351 /* --------------------------------------------------------------------------
352 * Increases era by 1 and initialize census[era].
353 * Reallocates gi[] and increases its size if needed.
354 * ----------------------------------------------------------------------- */
355
356 static void
357 nextEra( void )
358 {
359 #ifdef PROFILING
360 if (doingLDVProfiling()) {
361 era++;
362
363 if (era == max_era) {
364 errorBelch("maximum number of censuses reached; use +RTS -i to reduce");
365 stg_exit(EXIT_FAILURE);
366 }
367
368 if (era == n_censuses) {
369 n_censuses *= 2;
370 censuses = stgReallocBytes(censuses, sizeof(Census) * n_censuses,
371 "nextEra");
372 }
373 }
374 #endif /* PROFILING */
375
376 initEra( &censuses[era] );
377 }
378
379 /* ----------------------------------------------------------------------------
380 * Heap profiling by info table
381 * ------------------------------------------------------------------------- */
382
383 #if !defined(PROFILING)
384 FILE *hp_file;
385 static char *hp_filename;
386
387 void initProfiling1 (void)
388 {
389 }
390
391 void freeProfiling1 (void)
392 {
393 }
394
395 void initProfiling2 (void)
396 {
397 char *prog;
398
399 prog = stgMallocBytes(strlen(prog_name) + 1, "initProfiling2");
400 strcpy(prog, prog_name);
401 #ifdef mingw32_HOST_OS
402 // on Windows, drop the .exe suffix if there is one
403 {
404 char *suff;
405 suff = strrchr(prog,'.');
406 if (suff != NULL && !strcmp(suff,".exe")) {
407 *suff = '\0';
408 }
409 }
410 #endif
411
412 if (RtsFlags.ProfFlags.doHeapProfile) {
413 /* Initialise the log file name */
414 hp_filename = stgMallocBytes(strlen(prog) + 6, "hpFileName");
415 sprintf(hp_filename, "%s.hp", prog);
416
417 /* open the log file */
418 if ((hp_file = fopen(hp_filename, "w")) == NULL) {
419 debugBelch("Can't open profiling report file %s\n",
420 hp_filename);
421 RtsFlags.ProfFlags.doHeapProfile = 0;
422 return;
423 }
424 }
425
426 stgFree(prog);
427
428 initHeapProfiling();
429 }
430
431 void endProfiling( void )
432 {
433 endHeapProfiling();
434 }
435 #endif /* !PROFILING */
436
437 static void
438 printSample(rtsBool beginSample, StgDouble sampleValue)
439 {
440 StgDouble fractionalPart, integralPart;
441 fractionalPart = modf(sampleValue, &integralPart);
442 fprintf(hp_file, "%s %" FMT_Word64 ".%02" FMT_Word64 "\n",
443 (beginSample ? "BEGIN_SAMPLE" : "END_SAMPLE"),
444 (StgWord64)integralPart, (StgWord64)(fractionalPart * 100));
445 }
446
447 /* --------------------------------------------------------------------------
448 * Initialize the heap profilier
449 * ----------------------------------------------------------------------- */
450 nat
451 initHeapProfiling(void)
452 {
453 if (! RtsFlags.ProfFlags.doHeapProfile) {
454 return 0;
455 }
456
457 #ifdef PROFILING
458 if (doingLDVProfiling() && doingRetainerProfiling()) {
459 errorBelch("cannot mix -hb and -hr");
460 stg_exit(EXIT_FAILURE);
461 }
462 #endif
463
464 // we only count eras if we're doing LDV profiling. Otherwise era
465 // is fixed at zero.
466 #ifdef PROFILING
467 if (doingLDVProfiling()) {
468 era = 1;
469 } else
470 #endif
471 {
472 era = 0;
473 }
474
475 // max_era = 2^LDV_SHIFT
476 max_era = 1 << LDV_SHIFT;
477
478 n_censuses = 32;
479 censuses = stgMallocBytes(sizeof(Census) * n_censuses, "initHeapProfiling");
480
481 initEra( &censuses[era] );
482
483 /* initProfilingLogFile(); */
484 fprintf(hp_file, "JOB \"%s", prog_name);
485
486 #ifdef PROFILING
487 {
488 int count;
489 for(count = 1; count < prog_argc; count++)
490 fprintf(hp_file, " %s", prog_argv[count]);
491 fprintf(hp_file, " +RTS");
492 for(count = 0; count < rts_argc; count++)
493 fprintf(hp_file, " %s", rts_argv[count]);
494 }
495 #endif /* PROFILING */
496
497 fprintf(hp_file, "\"\n" );
498
499 fprintf(hp_file, "DATE \"%s\"\n", time_str());
500
501 fprintf(hp_file, "SAMPLE_UNIT \"seconds\"\n");
502 fprintf(hp_file, "VALUE_UNIT \"bytes\"\n");
503
504 printSample(rtsTrue, 0);
505 printSample(rtsFalse, 0);
506
507 #ifdef PROFILING
508 if (doingRetainerProfiling()) {
509 initRetainerProfiling();
510 }
511 #endif
512
513 return 0;
514 }
515
516 void
517 endHeapProfiling(void)
518 {
519 StgDouble seconds;
520
521 if (! RtsFlags.ProfFlags.doHeapProfile) {
522 return;
523 }
524
525 #ifdef PROFILING
526 if (doingRetainerProfiling()) {
527 endRetainerProfiling();
528 }
529 #endif
530
531 #ifdef PROFILING
532 if (doingLDVProfiling()) {
533 nat t;
534 LdvCensusKillAll();
535 aggregateCensusInfo();
536 for (t = 1; t < era; t++) {
537 dumpCensus( &censuses[t] );
538 }
539 }
540 #endif
541
542 #ifdef PROFILING
543 if (doingLDVProfiling()) {
544 nat t;
545 for (t = 1; t <= era; t++) {
546 freeEra( &censuses[t] );
547 }
548 } else {
549 freeEra( &censuses[0] );
550 }
551 #else
552 freeEra( &censuses[0] );
553 #endif
554
555 stgFree(censuses);
556
557 seconds = mut_user_time();
558 printSample(rtsTrue, seconds);
559 printSample(rtsFalse, seconds);
560 fclose(hp_file);
561 }
562
563
564
565 #ifdef PROFILING
566 static size_t
567 buf_append(char *p, const char *q, char *end)
568 {
569 int m;
570
571 for (m = 0; p < end; p++, q++, m++) {
572 *p = *q;
573 if (*q == '\0') { break; }
574 }
575 return m;
576 }
577
578 static void
579 fprint_ccs(FILE *fp, CostCentreStack *ccs, nat max_length)
580 {
581 char buf[max_length+1], *p, *buf_end;
582
583 // MAIN on its own gets printed as "MAIN", otherwise we ignore MAIN.
584 if (ccs == CCS_MAIN) {
585 fprintf(fp, "MAIN");
586 return;
587 }
588
589 fprintf(fp, "(%ld)", ccs->ccsID);
590
591 p = buf;
592 buf_end = buf + max_length + 1;
593
594 // keep printing components of the stack until we run out of space
595 // in the buffer. If we run out of space, end with "...".
596 for (; ccs != NULL && ccs != CCS_MAIN; ccs = ccs->prevStack) {
597
598 // CAF cost centres print as M.CAF, but we leave the module
599 // name out of all the others to save space.
600 if (!strcmp(ccs->cc->label,"CAF")) {
601 p += buf_append(p, ccs->cc->module, buf_end);
602 p += buf_append(p, ".CAF", buf_end);
603 } else {
604 p += buf_append(p, ccs->cc->label, buf_end);
605 if (ccs->prevStack != NULL && ccs->prevStack != CCS_MAIN) {
606 p += buf_append(p, "/", buf_end);
607 }
608 }
609
610 if (p >= buf_end) {
611 sprintf(buf+max_length-4, "...");
612 break;
613 }
614 }
615 fprintf(fp, "%s", buf);
616 }
617
618 rtsBool
619 strMatchesSelector( char* str, char* sel )
620 {
621 char* p;
622 // debugBelch("str_matches_selector %s %s\n", str, sel);
623 while (1) {
624 // Compare str against wherever we've got to in sel.
625 p = str;
626 while (*p != '\0' && *sel != ',' && *sel != '\0' && *p == *sel) {
627 p++; sel++;
628 }
629 // Match if all of str used and have reached the end of a sel fragment.
630 if (*p == '\0' && (*sel == ',' || *sel == '\0'))
631 return rtsTrue;
632
633 // No match. Advance sel to the start of the next elem.
634 while (*sel != ',' && *sel != '\0') sel++;
635 if (*sel == ',') sel++;
636
637 /* Run out of sel ?? */
638 if (*sel == '\0') return rtsFalse;
639 }
640 }
641
642 #endif /* PROFILING */
643
644 /* -----------------------------------------------------------------------------
645 * Figure out whether a closure should be counted in this census, by
646 * testing against all the specified constraints.
647 * -------------------------------------------------------------------------- */
648 static rtsBool
649 closureSatisfiesConstraints( StgClosure* p )
650 {
651 #if !defined(PROFILING)
652 (void)p; /* keep gcc -Wall happy */
653 return rtsTrue;
654 #else
655 rtsBool b;
656
657 // The CCS has a selected field to indicate whether this closure is
658 // deselected by not being mentioned in the module, CC, or CCS
659 // selectors.
660 if (!p->header.prof.ccs->selected) {
661 return rtsFalse;
662 }
663
664 if (RtsFlags.ProfFlags.descrSelector) {
665 b = strMatchesSelector( (GET_PROF_DESC(get_itbl((StgClosure *)p))),
666 RtsFlags.ProfFlags.descrSelector );
667 if (!b) return rtsFalse;
668 }
669 if (RtsFlags.ProfFlags.typeSelector) {
670 b = strMatchesSelector( (GET_PROF_TYPE(get_itbl((StgClosure *)p))),
671 RtsFlags.ProfFlags.typeSelector );
672 if (!b) return rtsFalse;
673 }
674 if (RtsFlags.ProfFlags.retainerSelector) {
675 RetainerSet *rs;
676 nat i;
677 // We must check that the retainer set is valid here. One
678 // reason it might not be valid is if this closure is a
679 // a newly deceased weak pointer (i.e. a DEAD_WEAK), since
680 // these aren't reached by the retainer profiler's traversal.
681 if (isRetainerSetFieldValid((StgClosure *)p)) {
682 rs = retainerSetOf((StgClosure *)p);
683 if (rs != NULL) {
684 for (i = 0; i < rs->num; i++) {
685 b = strMatchesSelector( rs->element[i]->cc->label,
686 RtsFlags.ProfFlags.retainerSelector );
687 if (b) return rtsTrue;
688 }
689 }
690 }
691 return rtsFalse;
692 }
693 return rtsTrue;
694 #endif /* PROFILING */
695 }
696
697 /* -----------------------------------------------------------------------------
698 * Aggregate the heap census info for biographical profiling
699 * -------------------------------------------------------------------------- */
700 #ifdef PROFILING
701 static void
702 aggregateCensusInfo( void )
703 {
704 HashTable *acc;
705 nat t;
706 counter *c, *d, *ctrs;
707 Arena *arena;
708
709 if (!doingLDVProfiling()) return;
710
711 // Aggregate the LDV counters when displaying by biography.
712 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV) {
713 int void_total, drag_total;
714
715 // Now we compute void_total and drag_total for each census
716 // After the program has finished, the void_total field of
717 // each census contains the count of words that were *created*
718 // in this era and were eventually void. Conversely, if a
719 // void closure was destroyed in this era, it will be
720 // represented by a negative count of words in void_total.
721 //
722 // To get the count of live words that are void at each
723 // census, just propagate the void_total count forwards:
724
725 void_total = 0;
726 drag_total = 0;
727 for (t = 1; t < era; t++) { // note: start at 1, not 0
728 void_total += censuses[t].void_total;
729 drag_total += censuses[t].drag_total;
730 censuses[t].void_total = void_total;
731 censuses[t].drag_total = drag_total;
732
733 ASSERT( censuses[t].void_total <= censuses[t].not_used );
734 // should be true because: void_total is the count of
735 // live words that are void at this census, which *must*
736 // be less than the number of live words that have not
737 // been used yet.
738
739 ASSERT( censuses[t].drag_total <= censuses[t].used );
740 // similar reasoning as above.
741 }
742
743 return;
744 }
745
746 // otherwise... we're doing a heap profile that is restricted to
747 // some combination of lag, drag, void or use. We've kept all the
748 // census info for all censuses so far, but we still need to
749 // aggregate the counters forwards.
750
751 arena = newArena();
752 acc = allocHashTable();
753 ctrs = NULL;
754
755 for (t = 1; t < era; t++) {
756
757 // first look through all the counters we're aggregating
758 for (c = ctrs; c != NULL; c = c->next) {
759 // if one of the totals is non-zero, then this closure
760 // type must be present in the heap at this census time...
761 d = lookupHashTable(censuses[t].hash, (StgWord)c->identity);
762
763 if (d == NULL) {
764 // if this closure identity isn't present in the
765 // census for this time period, then our running
766 // totals *must* be zero.
767 ASSERT(c->c.ldv.void_total == 0 && c->c.ldv.drag_total == 0);
768
769 // debugCCS(c->identity);
770 // debugBelch(" census=%d void_total=%d drag_total=%d\n",
771 // t, c->c.ldv.void_total, c->c.ldv.drag_total);
772 } else {
773 d->c.ldv.void_total += c->c.ldv.void_total;
774 d->c.ldv.drag_total += c->c.ldv.drag_total;
775 c->c.ldv.void_total = d->c.ldv.void_total;
776 c->c.ldv.drag_total = d->c.ldv.drag_total;
777
778 ASSERT( c->c.ldv.void_total >= 0 );
779 ASSERT( c->c.ldv.drag_total >= 0 );
780 }
781 }
782
783 // now look through the counters in this census to find new ones
784 for (c = censuses[t].ctrs; c != NULL; c = c->next) {
785 d = lookupHashTable(acc, (StgWord)c->identity);
786 if (d == NULL) {
787 d = arenaAlloc( arena, sizeof(counter) );
788 initLDVCtr(d);
789 insertHashTable( acc, (StgWord)c->identity, d );
790 d->identity = c->identity;
791 d->next = ctrs;
792 ctrs = d;
793 d->c.ldv.void_total = c->c.ldv.void_total;
794 d->c.ldv.drag_total = c->c.ldv.drag_total;
795 }
796 ASSERT( c->c.ldv.void_total >= 0 );
797 ASSERT( c->c.ldv.drag_total >= 0 );
798 }
799 }
800
801 freeHashTable(acc, NULL);
802 arenaFree(arena);
803 }
804 #endif
805
806 /* -----------------------------------------------------------------------------
807 * Print out the results of a heap census.
808 * -------------------------------------------------------------------------- */
809 static void
810 dumpCensus( Census *census )
811 {
812 counter *ctr;
813 int count;
814
815 printSample(rtsTrue, census->time);
816
817 #ifdef PROFILING
818 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV) {
819 fprintf(hp_file, "VOID\t%lu\n", (unsigned long)(census->void_total) * sizeof(W_));
820 fprintf(hp_file, "LAG\t%lu\n",
821 (unsigned long)(census->not_used - census->void_total) * sizeof(W_));
822 fprintf(hp_file, "USE\t%lu\n",
823 (unsigned long)(census->used - census->drag_total) * sizeof(W_));
824 fprintf(hp_file, "INHERENT_USE\t%lu\n",
825 (unsigned long)(census->prim) * sizeof(W_));
826 fprintf(hp_file, "DRAG\t%lu\n",
827 (unsigned long)(census->drag_total) * sizeof(W_));
828 printSample(rtsFalse, census->time);
829 return;
830 }
831 #endif
832
833 for (ctr = census->ctrs; ctr != NULL; ctr = ctr->next) {
834
835 #ifdef PROFILING
836 if (RtsFlags.ProfFlags.bioSelector != NULL) {
837 count = 0;
838 if (strMatchesSelector("lag", RtsFlags.ProfFlags.bioSelector))
839 count += ctr->c.ldv.not_used - ctr->c.ldv.void_total;
840 if (strMatchesSelector("drag", RtsFlags.ProfFlags.bioSelector))
841 count += ctr->c.ldv.drag_total;
842 if (strMatchesSelector("void", RtsFlags.ProfFlags.bioSelector))
843 count += ctr->c.ldv.void_total;
844 if (strMatchesSelector("use", RtsFlags.ProfFlags.bioSelector))
845 count += ctr->c.ldv.used - ctr->c.ldv.drag_total;
846 } else
847 #endif
848 {
849 count = ctr->c.resid;
850 }
851
852 ASSERT( count >= 0 );
853
854 if (count == 0) continue;
855
856 #if !defined(PROFILING)
857 switch (RtsFlags.ProfFlags.doHeapProfile) {
858 case HEAP_BY_CLOSURE_TYPE:
859 fprintf(hp_file, "%s", (char *)ctr->identity);
860 break;
861 }
862 #endif
863
864 #ifdef PROFILING
865 switch (RtsFlags.ProfFlags.doHeapProfile) {
866 case HEAP_BY_CCS:
867 fprint_ccs(hp_file, (CostCentreStack *)ctr->identity, RtsFlags.ProfFlags.ccsLength);
868 break;
869 case HEAP_BY_MOD:
870 case HEAP_BY_DESCR:
871 case HEAP_BY_TYPE:
872 fprintf(hp_file, "%s", (char *)ctr->identity);
873 break;
874 case HEAP_BY_RETAINER:
875 {
876 RetainerSet *rs = (RetainerSet *)ctr->identity;
877
878 // it might be the distinguished retainer set rs_MANY:
879 if (rs == &rs_MANY) {
880 fprintf(hp_file, "MANY");
881 break;
882 }
883
884 // Mark this retainer set by negating its id, because it
885 // has appeared in at least one census. We print the
886 // values of all such retainer sets into the log file at
887 // the end. A retainer set may exist but not feature in
888 // any censuses if it arose as the intermediate retainer
889 // set for some closure during retainer set calculation.
890 if (rs->id > 0)
891 rs->id = -(rs->id);
892
893 // report in the unit of bytes: * sizeof(StgWord)
894 printRetainerSetShort(hp_file, rs);
895 break;
896 }
897 default:
898 barf("dumpCensus; doHeapProfile");
899 }
900 #endif
901
902 fprintf(hp_file, "\t%lu\n", (unsigned long)count * sizeof(W_));
903 }
904
905 printSample(rtsFalse, census->time);
906 }
907
908 /* -----------------------------------------------------------------------------
909 * Code to perform a heap census.
910 * -------------------------------------------------------------------------- */
911 static void
912 heapCensusChain( Census *census, bdescr *bd )
913 {
914 StgPtr p;
915 StgInfoTable *info;
916 void *identity;
917 nat size;
918 counter *ctr;
919 nat real_size;
920 rtsBool prim;
921
922 for (; bd != NULL; bd = bd->link) {
923
924 // HACK: ignore pinned blocks, because they contain gaps.
925 // It's not clear exactly what we'd like to do here, since we
926 // can't tell which objects in the block are actually alive.
927 // Perhaps the whole block should be counted as SYSTEM memory.
928 if (bd->flags & BF_PINNED) {
929 continue;
930 }
931
932 p = bd->start;
933 while (p < bd->free) {
934 info = get_itbl((StgClosure *)p);
935 prim = rtsFalse;
936
937 switch (info->type) {
938
939 case THUNK:
940 size = thunk_sizeW_fromITBL(info);
941 break;
942
943 case THUNK_1_1:
944 case THUNK_0_2:
945 case THUNK_2_0:
946 size = sizeofW(StgThunkHeader) + 2;
947 break;
948
949 case THUNK_1_0:
950 case THUNK_0_1:
951 case THUNK_SELECTOR:
952 size = sizeofW(StgThunkHeader) + 1;
953 break;
954
955 case CONSTR:
956 case FUN:
957 case IND_PERM:
958 case IND_OLDGEN:
959 case IND_OLDGEN_PERM:
960 case CAF_BLACKHOLE:
961 case BLACKHOLE:
962 case FUN_1_0:
963 case FUN_0_1:
964 case FUN_1_1:
965 case FUN_0_2:
966 case FUN_2_0:
967 case CONSTR_1_0:
968 case CONSTR_0_1:
969 case CONSTR_1_1:
970 case CONSTR_0_2:
971 case CONSTR_2_0:
972 size = sizeW_fromITBL(info);
973 break;
974
975 case IND:
976 // Special case/Delicate Hack: INDs don't normally
977 // appear, since we're doing this heap census right
978 // after GC. However, GarbageCollect() also does
979 // resurrectThreads(), which can update some
980 // blackholes when it calls raiseAsync() on the
981 // resurrected threads. So we know that any IND will
982 // be the size of a BLACKHOLE.
983 size = BLACKHOLE_sizeW();
984 break;
985
986 case BCO:
987 prim = rtsTrue;
988 size = bco_sizeW((StgBCO *)p);
989 break;
990
991 case MVAR_CLEAN:
992 case MVAR_DIRTY:
993 case WEAK:
994 case STABLE_NAME:
995 case MUT_VAR_CLEAN:
996 case MUT_VAR_DIRTY:
997 prim = rtsTrue;
998 size = sizeW_fromITBL(info);
999 break;
1000
1001 case AP:
1002 size = ap_sizeW((StgAP *)p);
1003 break;
1004
1005 case PAP:
1006 size = pap_sizeW((StgPAP *)p);
1007 break;
1008
1009 case AP_STACK:
1010 size = ap_stack_sizeW((StgAP_STACK *)p);
1011 break;
1012
1013 case ARR_WORDS:
1014 prim = rtsTrue;
1015 size = arr_words_sizeW((StgArrWords*)p);
1016 break;
1017
1018 case MUT_ARR_PTRS_CLEAN:
1019 case MUT_ARR_PTRS_DIRTY:
1020 case MUT_ARR_PTRS_FROZEN:
1021 case MUT_ARR_PTRS_FROZEN0:
1022 prim = rtsTrue;
1023 size = mut_arr_ptrs_sizeW((StgMutArrPtrs *)p);
1024 break;
1025
1026 case TSO:
1027 prim = rtsTrue;
1028 #ifdef PROFILING
1029 if (RtsFlags.ProfFlags.includeTSOs) {
1030 size = tso_sizeW((StgTSO *)p);
1031 break;
1032 } else {
1033 // Skip this TSO and move on to the next object
1034 p += tso_sizeW((StgTSO *)p);
1035 continue;
1036 }
1037 #else
1038 size = tso_sizeW((StgTSO *)p);
1039 break;
1040 #endif
1041
1042 case TREC_HEADER:
1043 prim = rtsTrue;
1044 size = sizeofW(StgTRecHeader);
1045 break;
1046
1047 case TVAR_WATCH_QUEUE:
1048 prim = rtsTrue;
1049 size = sizeofW(StgTVarWatchQueue);
1050 break;
1051
1052 case INVARIANT_CHECK_QUEUE:
1053 prim = rtsTrue;
1054 size = sizeofW(StgInvariantCheckQueue);
1055 break;
1056
1057 case ATOMIC_INVARIANT:
1058 prim = rtsTrue;
1059 size = sizeofW(StgAtomicInvariant);
1060 break;
1061
1062 case TVAR:
1063 prim = rtsTrue;
1064 size = sizeofW(StgTVar);
1065 break;
1066
1067 case TREC_CHUNK:
1068 prim = rtsTrue;
1069 size = sizeofW(StgTRecChunk);
1070 break;
1071
1072 default:
1073 barf("heapCensus, unknown object: %d", info->type);
1074 }
1075
1076 identity = NULL;
1077
1078 #ifdef PROFILING
1079 // subtract the profiling overhead
1080 real_size = size - sizeofW(StgProfHeader);
1081 #else
1082 real_size = size;
1083 #endif
1084
1085 if (closureSatisfiesConstraints((StgClosure*)p)) {
1086 #ifdef PROFILING
1087 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV) {
1088 if (prim)
1089 census->prim += real_size;
1090 else if ((LDVW(p) & LDV_STATE_MASK) == LDV_STATE_CREATE)
1091 census->not_used += real_size;
1092 else
1093 census->used += real_size;
1094 } else
1095 #endif
1096 {
1097 identity = closureIdentity((StgClosure *)p);
1098
1099 if (identity != NULL) {
1100 ctr = lookupHashTable( census->hash, (StgWord)identity );
1101 if (ctr != NULL) {
1102 #ifdef PROFILING
1103 if (RtsFlags.ProfFlags.bioSelector != NULL) {
1104 if (prim)
1105 ctr->c.ldv.prim += real_size;
1106 else if ((LDVW(p) & LDV_STATE_MASK) == LDV_STATE_CREATE)
1107 ctr->c.ldv.not_used += real_size;
1108 else
1109 ctr->c.ldv.used += real_size;
1110 } else
1111 #endif
1112 {
1113 ctr->c.resid += real_size;
1114 }
1115 } else {
1116 ctr = arenaAlloc( census->arena, sizeof(counter) );
1117 initLDVCtr(ctr);
1118 insertHashTable( census->hash, (StgWord)identity, ctr );
1119 ctr->identity = identity;
1120 ctr->next = census->ctrs;
1121 census->ctrs = ctr;
1122
1123 #ifdef PROFILING
1124 if (RtsFlags.ProfFlags.bioSelector != NULL) {
1125 if (prim)
1126 ctr->c.ldv.prim = real_size;
1127 else if ((LDVW(p) & LDV_STATE_MASK) == LDV_STATE_CREATE)
1128 ctr->c.ldv.not_used = real_size;
1129 else
1130 ctr->c.ldv.used = real_size;
1131 } else
1132 #endif
1133 {
1134 ctr->c.resid = real_size;
1135 }
1136 }
1137 }
1138 }
1139 }
1140
1141 p += size;
1142 }
1143 }
1144 }
1145
1146 void
1147 heapCensus( void )
1148 {
1149 nat g, s;
1150 Census *census;
1151
1152 census = &censuses[era];
1153 census->time = mut_user_time();
1154
1155 // calculate retainer sets if necessary
1156 #ifdef PROFILING
1157 if (doingRetainerProfiling()) {
1158 retainerProfile();
1159 }
1160 #endif
1161
1162 #ifdef PROFILING
1163 stat_startHeapCensus();
1164 #endif
1165
1166 // Traverse the heap, collecting the census info
1167 if (RtsFlags.GcFlags.generations == 1) {
1168 heapCensusChain( census, g0s0->blocks );
1169 } else {
1170 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
1171 for (s = 0; s < generations[g].n_steps; s++) {
1172 heapCensusChain( census, generations[g].steps[s].blocks );
1173 // Are we interested in large objects? might be
1174 // confusing to include the stack in a heap profile.
1175 heapCensusChain( census, generations[g].steps[s].large_objects );
1176 }
1177 }
1178 }
1179
1180 // dump out the census info
1181 #ifdef PROFILING
1182 // We can't generate any info for LDV profiling until
1183 // the end of the run...
1184 if (!doingLDVProfiling())
1185 dumpCensus( census );
1186 #else
1187 dumpCensus( census );
1188 #endif
1189
1190
1191 // free our storage, unless we're keeping all the census info for
1192 // future restriction by biography.
1193 #ifdef PROFILING
1194 if (RtsFlags.ProfFlags.bioSelector == NULL)
1195 {
1196 freeHashTable( census->hash, NULL/* don't free the elements */ );
1197 arenaFree( census->arena );
1198 census->hash = NULL;
1199 census->arena = NULL;
1200 }
1201 #endif
1202
1203 // we're into the next time period now
1204 nextEra();
1205
1206 #ifdef PROFILING
1207 stat_endHeapCensus();
1208 #endif
1209 }
1210