FIX BUILD (non-profiling RTS was broken in previous patch)
[ghc.git] / rts / ProfHeap.c
1 /* ----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 1998-2003
4 *
5 * Support for heap profiling
6 *
7 * --------------------------------------------------------------------------*/
8
9 #include "PosixSource.h"
10 #include "Rts.h"
11 #include "RtsUtils.h"
12 #include "RtsFlags.h"
13 #include "Profiling.h"
14 #include "ProfHeap.h"
15 #include "Stats.h"
16 #include "Hash.h"
17 #include "RetainerProfile.h"
18 #include "LdvProfile.h"
19 #include "Arena.h"
20 #include "Printer.h"
21
22 #include <string.h>
23 #include <stdlib.h>
24 #include <math.h>
25
26 /* -----------------------------------------------------------------------------
27 * era stores the current time period. It is the same as the
28 * number of censuses that have been performed.
29 *
30 * RESTRICTION:
31 * era must be no longer than LDV_SHIFT (15 or 30) bits.
32 * Invariants:
33 * era is initialized to 1 in initHeapProfiling().
34 *
35 * max_era is initialized to 2^LDV_SHIFT in initHeapProfiling().
36 * When era reaches max_era, the profiling stops because a closure can
37 * store only up to (max_era - 1) as its creation or last use time.
38 * -------------------------------------------------------------------------- */
39 unsigned int era;
40 static nat max_era;
41
42 /* -----------------------------------------------------------------------------
43 * Counters
44 *
45 * For most heap profiles each closure identity gets a simple count
46 * of live words in the heap at each census. However, if we're
47 * selecting by biography, then we have to keep the various
48 * lag/drag/void counters for each identity.
49 * -------------------------------------------------------------------------- */
50 typedef struct _counter {
51 void *identity;
52 union {
53 nat resid;
54 struct {
55 int prim; // total size of 'inherently used' closures
56 int not_used; // total size of 'never used' closures
57 int used; // total size of 'used at least once' closures
58 int void_total; // current total size of 'destroyed without being used' closures
59 int drag_total; // current total size of 'used at least once and waiting to die'
60 } ldv;
61 } c;
62 struct _counter *next;
63 } counter;
64
65 STATIC_INLINE void
66 initLDVCtr( counter *ctr )
67 {
68 ctr->c.ldv.prim = 0;
69 ctr->c.ldv.not_used = 0;
70 ctr->c.ldv.used = 0;
71 ctr->c.ldv.void_total = 0;
72 ctr->c.ldv.drag_total = 0;
73 }
74
75 typedef struct {
76 double time; // the time in MUT time when the census is made
77 HashTable * hash;
78 counter * ctrs;
79 Arena * arena;
80
81 // for LDV profiling, when just displaying by LDV
82 int prim;
83 int not_used;
84 int used;
85 int void_total;
86 int drag_total;
87 } Census;
88
89 static Census *censuses = NULL;
90 static nat n_censuses = 0;
91
92 #ifdef PROFILING
93 static void aggregateCensusInfo( void );
94 #endif
95
96 static void dumpCensus( Census *census );
97
98 /* ----------------------------------------------------------------------------
99 Closure Type Profiling;
100 ------------------------------------------------------------------------- */
101
102 static char *type_names[] = {
103 "INVALID_OBJECT",
104 "CONSTR",
105 "CONSTR_1_0",
106 "CONSTR_0_1",
107 "CONSTR_2_0",
108 "CONSTR_1_1",
109 "CONSTR_0_2",
110 "CONSTR_STATIC",
111 "CONSTR_NOCAF_STATIC",
112 "FUN",
113 "FUN_1_0",
114 "FUN_0_1",
115 "FUN_2_0",
116 "FUN_1_1",
117 "FUN_0_2",
118 "FUN_STATIC",
119 "THUNK",
120 "THUNK_1_0",
121 "THUNK_0_1",
122 "THUNK_2_0",
123 "THUNK_1_1",
124 "THUNK_0_2",
125 "THUNK_STATIC",
126 "THUNK_SELECTOR",
127 "BCO",
128 "AP",
129 "PAP",
130 "AP_STACK",
131 "IND",
132 "IND_OLDGEN",
133 "IND_PERM",
134 "IND_OLDGEN_PERM",
135 "IND_STATIC",
136 "RET_BCO",
137 "RET_SMALL",
138 "RET_BIG",
139 "RET_DYN",
140 "RET_FUN",
141 "UPDATE_FRAME",
142 "CATCH_FRAME",
143 "STOP_FRAME",
144 "CAF_BLACKHOLE",
145 "BLACKHOLE",
146 "SE_BLACKHOLE",
147 "SE_CAF_BLACKHOLE",
148 "MVAR",
149 "ARR_WORDS",
150 "MUT_ARR_PTRS_CLEAN",
151 "MUT_ARR_PTRS_DIRTY",
152 "MUT_ARR_PTRS_FROZEN0",
153 "MUT_ARR_PTRS_FROZEN",
154 "MUT_VAR_CLEAN",
155 "MUT_VAR_DIRTY",
156 "WEAK",
157 "STABLE_NAME",
158 "TSO",
159 "BLOCKED_FETCH",
160 "FETCH_ME",
161 "FETCH_ME_BQ",
162 "RBH",
163 "EVACUATED",
164 "REMOTE_REF",
165 "TVAR_WATCH_QUEUE",
166 "INVARIANT_CHECK_QUEUE",
167 "ATOMIC_INVARIANT",
168 "TVAR",
169 "TREC_CHUNK",
170 "TREC_HEADER",
171 "ATOMICALLY_FRAME",
172 "CATCH_RETRY_FRAME",
173 "CATCH_STM_FRAME",
174 "N_CLOSURE_TYPES"
175 };
176
177 /* ----------------------------------------------------------------------------
178 * Find the "closure identity", which is a unique pointer reresenting
179 * the band to which this closure's heap space is attributed in the
180 * heap profile.
181 * ------------------------------------------------------------------------- */
182 STATIC_INLINE void *
183 closureIdentity( StgClosure *p )
184 {
185 switch (RtsFlags.ProfFlags.doHeapProfile) {
186
187 #ifdef PROFILING
188 case HEAP_BY_CCS:
189 return p->header.prof.ccs;
190 case HEAP_BY_MOD:
191 return p->header.prof.ccs->cc->module;
192 case HEAP_BY_DESCR:
193 return get_itbl(p)->prof.closure_desc;
194 case HEAP_BY_TYPE:
195 return get_itbl(p)->prof.closure_type;
196 case HEAP_BY_RETAINER:
197 // AFAIK, the only closures in the heap which might not have a
198 // valid retainer set are DEAD_WEAK closures.
199 if (isRetainerSetFieldValid(p))
200 return retainerSetOf(p);
201 else
202 return NULL;
203
204 #else
205 case HEAP_BY_CLOSURE_TYPE:
206 {
207 StgInfoTable *info;
208 info = get_itbl(p);
209 switch (info->type) {
210 case CONSTR:
211 case CONSTR_1_0:
212 case CONSTR_0_1:
213 case CONSTR_2_0:
214 case CONSTR_1_1:
215 case CONSTR_0_2:
216 case CONSTR_STATIC:
217 case CONSTR_NOCAF_STATIC:
218 printf("",strlen(GET_CON_DESC(itbl_to_con_itbl(info))));
219 return GET_CON_DESC(itbl_to_con_itbl(info));
220 default:
221 return type_names[info->type];
222 }
223 }
224
225 #endif
226 default:
227 barf("closureIdentity");
228 }
229 }
230
231 /* --------------------------------------------------------------------------
232 * Profiling type predicates
233 * ----------------------------------------------------------------------- */
234 #ifdef PROFILING
235 STATIC_INLINE rtsBool
236 doingLDVProfiling( void )
237 {
238 return (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV
239 || RtsFlags.ProfFlags.bioSelector != NULL);
240 }
241
242 STATIC_INLINE rtsBool
243 doingRetainerProfiling( void )
244 {
245 return (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER
246 || RtsFlags.ProfFlags.retainerSelector != NULL);
247 }
248 #endif /* PROFILING */
249
250 // Precesses a closure 'c' being destroyed whose size is 'size'.
251 // Make sure that LDV_recordDead() is not invoked on 'inherently used' closures
252 // such as TSO; they should not be involved in computing dragNew or voidNew.
253 //
254 // Even though era is checked in both LdvCensusForDead() and
255 // LdvCensusKillAll(), we still need to make sure that era is > 0 because
256 // LDV_recordDead() may be called from elsewhere in the runtime system. E.g.,
257 // when a thunk is replaced by an indirection object.
258
259 #ifdef PROFILING
260 void
261 LDV_recordDead( StgClosure *c, nat size )
262 {
263 void *id;
264 nat t;
265 counter *ctr;
266
267 if (era > 0 && closureSatisfiesConstraints(c)) {
268 size -= sizeofW(StgProfHeader);
269 ASSERT(LDVW(c) != 0);
270 if ((LDVW((c)) & LDV_STATE_MASK) == LDV_STATE_CREATE) {
271 t = (LDVW((c)) & LDV_CREATE_MASK) >> LDV_SHIFT;
272 if (t < era) {
273 if (RtsFlags.ProfFlags.bioSelector == NULL) {
274 censuses[t].void_total += (int)size;
275 censuses[era].void_total -= (int)size;
276 ASSERT(censuses[t].void_total < censuses[t].not_used);
277 } else {
278 id = closureIdentity(c);
279 ctr = lookupHashTable(censuses[t].hash, (StgWord)id);
280 ASSERT( ctr != NULL );
281 ctr->c.ldv.void_total += (int)size;
282 ctr = lookupHashTable(censuses[era].hash, (StgWord)id);
283 if (ctr == NULL) {
284 ctr = arenaAlloc(censuses[era].arena, sizeof(counter));
285 initLDVCtr(ctr);
286 insertHashTable(censuses[era].hash, (StgWord)id, ctr);
287 ctr->identity = id;
288 ctr->next = censuses[era].ctrs;
289 censuses[era].ctrs = ctr;
290 }
291 ctr->c.ldv.void_total -= (int)size;
292 }
293 }
294 } else {
295 t = LDVW((c)) & LDV_LAST_MASK;
296 if (t + 1 < era) {
297 if (RtsFlags.ProfFlags.bioSelector == NULL) {
298 censuses[t+1].drag_total += size;
299 censuses[era].drag_total -= size;
300 } else {
301 void *id;
302 id = closureIdentity(c);
303 ctr = lookupHashTable(censuses[t+1].hash, (StgWord)id);
304 ASSERT( ctr != NULL );
305 ctr->c.ldv.drag_total += (int)size;
306 ctr = lookupHashTable(censuses[era].hash, (StgWord)id);
307 if (ctr == NULL) {
308 ctr = arenaAlloc(censuses[era].arena, sizeof(counter));
309 initLDVCtr(ctr);
310 insertHashTable(censuses[era].hash, (StgWord)id, ctr);
311 ctr->identity = id;
312 ctr->next = censuses[era].ctrs;
313 censuses[era].ctrs = ctr;
314 }
315 ctr->c.ldv.drag_total -= (int)size;
316 }
317 }
318 }
319 }
320 }
321 #endif
322
323 /* --------------------------------------------------------------------------
324 * Initialize censuses[era];
325 * ----------------------------------------------------------------------- */
326
327 STATIC_INLINE void
328 initEra(Census *census)
329 {
330 census->hash = allocHashTable();
331 census->ctrs = NULL;
332 census->arena = newArena();
333
334 census->not_used = 0;
335 census->used = 0;
336 census->prim = 0;
337 census->void_total = 0;
338 census->drag_total = 0;
339 }
340
341 STATIC_INLINE void
342 freeEra(Census *census)
343 {
344 arenaFree(census->arena);
345 freeHashTable(census->hash, NULL);
346 }
347
348 /* --------------------------------------------------------------------------
349 * Increases era by 1 and initialize census[era].
350 * Reallocates gi[] and increases its size if needed.
351 * ----------------------------------------------------------------------- */
352
353 static void
354 nextEra( void )
355 {
356 #ifdef PROFILING
357 if (doingLDVProfiling()) {
358 era++;
359
360 if (era == max_era) {
361 errorBelch("maximum number of censuses reached; use +RTS -i to reduce");
362 stg_exit(EXIT_FAILURE);
363 }
364
365 if (era == n_censuses) {
366 n_censuses *= 2;
367 censuses = stgReallocBytes(censuses, sizeof(Census) * n_censuses,
368 "nextEra");
369 }
370 }
371 #endif /* PROFILING */
372
373 initEra( &censuses[era] );
374 }
375
376 /* ----------------------------------------------------------------------------
377 * Heap profiling by info table
378 * ------------------------------------------------------------------------- */
379
380 #if !defined(PROFILING)
381 FILE *hp_file;
382 static char *hp_filename;
383
384 void initProfiling1 (void)
385 {
386 }
387
388 void freeProfiling1 (void)
389 {
390 }
391
392 void initProfiling2 (void)
393 {
394 if (RtsFlags.ProfFlags.doHeapProfile) {
395 /* Initialise the log file name */
396 hp_filename = stgMallocBytes(strlen(prog_name) + 6, "hpFileName");
397 sprintf(hp_filename, "%s.hp", prog_name);
398
399 /* open the log file */
400 if ((hp_file = fopen(hp_filename, "w")) == NULL) {
401 debugBelch("Can't open profiling report file %s\n",
402 hp_filename);
403 RtsFlags.ProfFlags.doHeapProfile = 0;
404 return;
405 }
406 }
407
408 initHeapProfiling();
409 }
410
411 void endProfiling( void )
412 {
413 endHeapProfiling();
414 }
415 #endif /* !PROFILING */
416
417 static void
418 printSample(rtsBool beginSample, StgDouble sampleValue)
419 {
420 StgDouble fractionalPart, integralPart;
421 fractionalPart = modf(sampleValue, &integralPart);
422 fprintf(hp_file, "%s %" FMT_Word64 ".%02" FMT_Word64 "\n",
423 (beginSample ? "BEGIN_SAMPLE" : "END_SAMPLE"),
424 (StgWord64)integralPart, (StgWord64)(fractionalPart * 100));
425 }
426
427 /* --------------------------------------------------------------------------
428 * Initialize the heap profilier
429 * ----------------------------------------------------------------------- */
430 nat
431 initHeapProfiling(void)
432 {
433 if (! RtsFlags.ProfFlags.doHeapProfile) {
434 return 0;
435 }
436
437 #ifdef PROFILING
438 if (doingLDVProfiling() && doingRetainerProfiling()) {
439 errorBelch("cannot mix -hb and -hr");
440 stg_exit(EXIT_FAILURE);
441 }
442 #endif
443
444 // we only count eras if we're doing LDV profiling. Otherwise era
445 // is fixed at zero.
446 #ifdef PROFILING
447 if (doingLDVProfiling()) {
448 era = 1;
449 } else
450 #endif
451 {
452 era = 0;
453 }
454
455 { // max_era = 2^LDV_SHIFT
456 nat p;
457 max_era = 1;
458 for (p = 0; p < LDV_SHIFT; p++)
459 max_era *= 2;
460 }
461
462 n_censuses = 32;
463 censuses = stgMallocBytes(sizeof(Census) * n_censuses, "initHeapProfiling");
464
465 initEra( &censuses[era] );
466
467 /* initProfilingLogFile(); */
468 fprintf(hp_file, "JOB \"%s", prog_name);
469
470 #ifdef PROFILING
471 {
472 int count;
473 for(count = 1; count < prog_argc; count++)
474 fprintf(hp_file, " %s", prog_argv[count]);
475 fprintf(hp_file, " +RTS");
476 for(count = 0; count < rts_argc; count++)
477 fprintf(hp_file, " %s", rts_argv[count]);
478 }
479 #endif /* PROFILING */
480
481 fprintf(hp_file, "\"\n" );
482
483 fprintf(hp_file, "DATE \"%s\"\n", time_str());
484
485 fprintf(hp_file, "SAMPLE_UNIT \"seconds\"\n");
486 fprintf(hp_file, "VALUE_UNIT \"bytes\"\n");
487
488 printSample(rtsTrue, 0);
489 printSample(rtsFalse, 0);
490
491 #ifdef PROFILING
492 if (doingRetainerProfiling()) {
493 initRetainerProfiling();
494 }
495 #endif
496
497 return 0;
498 }
499
500 void
501 endHeapProfiling(void)
502 {
503 StgDouble seconds;
504
505 if (! RtsFlags.ProfFlags.doHeapProfile) {
506 return;
507 }
508
509 #ifdef PROFILING
510 if (doingRetainerProfiling()) {
511 endRetainerProfiling();
512 }
513 #endif
514
515 #ifdef PROFILING
516 if (doingLDVProfiling()) {
517 nat t;
518 LdvCensusKillAll();
519 aggregateCensusInfo();
520 for (t = 1; t < era; t++) {
521 dumpCensus( &censuses[t] );
522 }
523 }
524 #endif
525
526 #ifdef PROFILING
527 if (doingLDVProfiling()) {
528 nat t;
529 for (t = 1; t <= era; t++) {
530 freeEra( &censuses[t] );
531 }
532 } else {
533 freeEra( &censuses[0] );
534 }
535 #else
536 freeEra( &censuses[0] );
537 #endif
538
539 stgFree(censuses);
540
541 seconds = mut_user_time();
542 printSample(rtsTrue, seconds);
543 printSample(rtsFalse, seconds);
544 fclose(hp_file);
545 }
546
547
548
549 #ifdef PROFILING
550 static size_t
551 buf_append(char *p, const char *q, char *end)
552 {
553 int m;
554
555 for (m = 0; p < end; p++, q++, m++) {
556 *p = *q;
557 if (*q == '\0') { break; }
558 }
559 return m;
560 }
561
562 static void
563 fprint_ccs(FILE *fp, CostCentreStack *ccs, nat max_length)
564 {
565 char buf[max_length+1], *p, *buf_end;
566
567 // MAIN on its own gets printed as "MAIN", otherwise we ignore MAIN.
568 if (ccs == CCS_MAIN) {
569 fprintf(fp, "MAIN");
570 return;
571 }
572
573 fprintf(fp, "(%ld)", ccs->ccsID);
574
575 p = buf;
576 buf_end = buf + max_length + 1;
577
578 // keep printing components of the stack until we run out of space
579 // in the buffer. If we run out of space, end with "...".
580 for (; ccs != NULL && ccs != CCS_MAIN; ccs = ccs->prevStack) {
581
582 // CAF cost centres print as M.CAF, but we leave the module
583 // name out of all the others to save space.
584 if (!strcmp(ccs->cc->label,"CAF")) {
585 p += buf_append(p, ccs->cc->module, buf_end);
586 p += buf_append(p, ".CAF", buf_end);
587 } else {
588 p += buf_append(p, ccs->cc->label, buf_end);
589 if (ccs->prevStack != NULL && ccs->prevStack != CCS_MAIN) {
590 p += buf_append(p, "/", buf_end);
591 }
592 }
593
594 if (p >= buf_end) {
595 sprintf(buf+max_length-4, "...");
596 break;
597 }
598 }
599 fprintf(fp, "%s", buf);
600 }
601 #endif /* PROFILING */
602
603 rtsBool
604 strMatchesSelector( char* str, char* sel )
605 {
606 char* p;
607 // debugBelch("str_matches_selector %s %s\n", str, sel);
608 while (1) {
609 // Compare str against wherever we've got to in sel.
610 p = str;
611 while (*p != '\0' && *sel != ',' && *sel != '\0' && *p == *sel) {
612 p++; sel++;
613 }
614 // Match if all of str used and have reached the end of a sel fragment.
615 if (*p == '\0' && (*sel == ',' || *sel == '\0'))
616 return rtsTrue;
617
618 // No match. Advance sel to the start of the next elem.
619 while (*sel != ',' && *sel != '\0') sel++;
620 if (*sel == ',') sel++;
621
622 /* Run out of sel ?? */
623 if (*sel == '\0') return rtsFalse;
624 }
625 }
626
627 /* -----------------------------------------------------------------------------
628 * Figure out whether a closure should be counted in this census, by
629 * testing against all the specified constraints.
630 * -------------------------------------------------------------------------- */
631 rtsBool
632 closureSatisfiesConstraints( StgClosure* p )
633 {
634 #if !defined(PROFILING)
635 (void)p; /* keep gcc -Wall happy */
636 return rtsTrue;
637 #else
638 rtsBool b;
639
640 // The CCS has a selected field to indicate whether this closure is
641 // deselected by not being mentioned in the module, CC, or CCS
642 // selectors.
643 if (!p->header.prof.ccs->selected) {
644 return rtsFalse;
645 }
646
647 if (RtsFlags.ProfFlags.descrSelector) {
648 b = strMatchesSelector( (get_itbl((StgClosure *)p))->prof.closure_desc,
649 RtsFlags.ProfFlags.descrSelector );
650 if (!b) return rtsFalse;
651 }
652 if (RtsFlags.ProfFlags.typeSelector) {
653 b = strMatchesSelector( (get_itbl((StgClosure *)p))->prof.closure_type,
654 RtsFlags.ProfFlags.typeSelector );
655 if (!b) return rtsFalse;
656 }
657 if (RtsFlags.ProfFlags.retainerSelector) {
658 RetainerSet *rs;
659 nat i;
660 // We must check that the retainer set is valid here. One
661 // reason it might not be valid is if this closure is a
662 // a newly deceased weak pointer (i.e. a DEAD_WEAK), since
663 // these aren't reached by the retainer profiler's traversal.
664 if (isRetainerSetFieldValid((StgClosure *)p)) {
665 rs = retainerSetOf((StgClosure *)p);
666 if (rs != NULL) {
667 for (i = 0; i < rs->num; i++) {
668 b = strMatchesSelector( rs->element[i]->cc->label,
669 RtsFlags.ProfFlags.retainerSelector );
670 if (b) return rtsTrue;
671 }
672 }
673 }
674 return rtsFalse;
675 }
676 return rtsTrue;
677 #endif /* PROFILING */
678 }
679
680 /* -----------------------------------------------------------------------------
681 * Aggregate the heap census info for biographical profiling
682 * -------------------------------------------------------------------------- */
683 #ifdef PROFILING
684 static void
685 aggregateCensusInfo( void )
686 {
687 HashTable *acc;
688 nat t;
689 counter *c, *d, *ctrs;
690 Arena *arena;
691
692 if (!doingLDVProfiling()) return;
693
694 // Aggregate the LDV counters when displaying by biography.
695 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV) {
696 int void_total, drag_total;
697
698 // Now we compute void_total and drag_total for each census
699 // After the program has finished, the void_total field of
700 // each census contains the count of words that were *created*
701 // in this era and were eventually void. Conversely, if a
702 // void closure was destroyed in this era, it will be
703 // represented by a negative count of words in void_total.
704 //
705 // To get the count of live words that are void at each
706 // census, just propagate the void_total count forwards:
707
708 void_total = 0;
709 drag_total = 0;
710 for (t = 1; t < era; t++) { // note: start at 1, not 0
711 void_total += censuses[t].void_total;
712 drag_total += censuses[t].drag_total;
713 censuses[t].void_total = void_total;
714 censuses[t].drag_total = drag_total;
715
716 ASSERT( censuses[t].void_total <= censuses[t].not_used );
717 // should be true because: void_total is the count of
718 // live words that are void at this census, which *must*
719 // be less than the number of live words that have not
720 // been used yet.
721
722 ASSERT( censuses[t].drag_total <= censuses[t].used );
723 // similar reasoning as above.
724 }
725
726 return;
727 }
728
729 // otherwise... we're doing a heap profile that is restricted to
730 // some combination of lag, drag, void or use. We've kept all the
731 // census info for all censuses so far, but we still need to
732 // aggregate the counters forwards.
733
734 arena = newArena();
735 acc = allocHashTable();
736 ctrs = NULL;
737
738 for (t = 1; t < era; t++) {
739
740 // first look through all the counters we're aggregating
741 for (c = ctrs; c != NULL; c = c->next) {
742 // if one of the totals is non-zero, then this closure
743 // type must be present in the heap at this census time...
744 d = lookupHashTable(censuses[t].hash, (StgWord)c->identity);
745
746 if (d == NULL) {
747 // if this closure identity isn't present in the
748 // census for this time period, then our running
749 // totals *must* be zero.
750 ASSERT(c->c.ldv.void_total == 0 && c->c.ldv.drag_total == 0);
751
752 // debugCCS(c->identity);
753 // debugBelch(" census=%d void_total=%d drag_total=%d\n",
754 // t, c->c.ldv.void_total, c->c.ldv.drag_total);
755 } else {
756 d->c.ldv.void_total += c->c.ldv.void_total;
757 d->c.ldv.drag_total += c->c.ldv.drag_total;
758 c->c.ldv.void_total = d->c.ldv.void_total;
759 c->c.ldv.drag_total = d->c.ldv.drag_total;
760
761 ASSERT( c->c.ldv.void_total >= 0 );
762 ASSERT( c->c.ldv.drag_total >= 0 );
763 }
764 }
765
766 // now look through the counters in this census to find new ones
767 for (c = censuses[t].ctrs; c != NULL; c = c->next) {
768 d = lookupHashTable(acc, (StgWord)c->identity);
769 if (d == NULL) {
770 d = arenaAlloc( arena, sizeof(counter) );
771 initLDVCtr(d);
772 insertHashTable( acc, (StgWord)c->identity, d );
773 d->identity = c->identity;
774 d->next = ctrs;
775 ctrs = d;
776 d->c.ldv.void_total = c->c.ldv.void_total;
777 d->c.ldv.drag_total = c->c.ldv.drag_total;
778 }
779 ASSERT( c->c.ldv.void_total >= 0 );
780 ASSERT( c->c.ldv.drag_total >= 0 );
781 }
782 }
783
784 freeHashTable(acc, NULL);
785 arenaFree(arena);
786 }
787 #endif
788
789 /* -----------------------------------------------------------------------------
790 * Print out the results of a heap census.
791 * -------------------------------------------------------------------------- */
792 static void
793 dumpCensus( Census *census )
794 {
795 counter *ctr;
796 int count;
797
798 printSample(rtsTrue, census->time);
799
800 #ifdef PROFILING
801 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV) {
802 fprintf(hp_file, "VOID\t%lu\n", (unsigned long)(census->void_total) * sizeof(W_));
803 fprintf(hp_file, "LAG\t%lu\n",
804 (unsigned long)(census->not_used - census->void_total) * sizeof(W_));
805 fprintf(hp_file, "USE\t%lu\n",
806 (unsigned long)(census->used - census->drag_total) * sizeof(W_));
807 fprintf(hp_file, "INHERENT_USE\t%lu\n",
808 (unsigned long)(census->prim) * sizeof(W_));
809 fprintf(hp_file, "DRAG\t%lu\n",
810 (unsigned long)(census->drag_total) * sizeof(W_));
811 printSample(rtsFalse, census->time);
812 return;
813 }
814 #endif
815
816 for (ctr = census->ctrs; ctr != NULL; ctr = ctr->next) {
817
818 #ifdef PROFILING
819 if (RtsFlags.ProfFlags.bioSelector != NULL) {
820 count = 0;
821 if (strMatchesSelector("lag", RtsFlags.ProfFlags.bioSelector))
822 count += ctr->c.ldv.not_used - ctr->c.ldv.void_total;
823 if (strMatchesSelector("drag", RtsFlags.ProfFlags.bioSelector))
824 count += ctr->c.ldv.drag_total;
825 if (strMatchesSelector("void", RtsFlags.ProfFlags.bioSelector))
826 count += ctr->c.ldv.void_total;
827 if (strMatchesSelector("use", RtsFlags.ProfFlags.bioSelector))
828 count += ctr->c.ldv.used - ctr->c.ldv.drag_total;
829 } else
830 #endif
831 {
832 count = ctr->c.resid;
833 }
834
835 ASSERT( count >= 0 );
836
837 if (count == 0) continue;
838
839 #if !defined(PROFILING)
840 switch (RtsFlags.ProfFlags.doHeapProfile) {
841 case HEAP_BY_CLOSURE_TYPE:
842 fprintf(hp_file, "%s", (char *)ctr->identity);
843 break;
844 }
845 #endif
846
847 #ifdef PROFILING
848 switch (RtsFlags.ProfFlags.doHeapProfile) {
849 case HEAP_BY_CCS:
850 fprint_ccs(hp_file, (CostCentreStack *)ctr->identity, RtsFlags.ProfFlags.ccsLength);
851 break;
852 case HEAP_BY_MOD:
853 case HEAP_BY_DESCR:
854 case HEAP_BY_TYPE:
855 fprintf(hp_file, "%s", (char *)ctr->identity);
856 break;
857 case HEAP_BY_RETAINER:
858 {
859 RetainerSet *rs = (RetainerSet *)ctr->identity;
860
861 // it might be the distinguished retainer set rs_MANY:
862 if (rs == &rs_MANY) {
863 fprintf(hp_file, "MANY");
864 break;
865 }
866
867 // Mark this retainer set by negating its id, because it
868 // has appeared in at least one census. We print the
869 // values of all such retainer sets into the log file at
870 // the end. A retainer set may exist but not feature in
871 // any censuses if it arose as the intermediate retainer
872 // set for some closure during retainer set calculation.
873 if (rs->id > 0)
874 rs->id = -(rs->id);
875
876 // report in the unit of bytes: * sizeof(StgWord)
877 printRetainerSetShort(hp_file, rs);
878 break;
879 }
880 default:
881 barf("dumpCensus; doHeapProfile");
882 }
883 #endif
884
885 fprintf(hp_file, "\t%lu\n", (unsigned long)count * sizeof(W_));
886 }
887
888 printSample(rtsFalse, census->time);
889 }
890
891 /* -----------------------------------------------------------------------------
892 * Code to perform a heap census.
893 * -------------------------------------------------------------------------- */
894 static void
895 heapCensusChain( Census *census, bdescr *bd )
896 {
897 StgPtr p;
898 StgInfoTable *info;
899 void *identity;
900 nat size;
901 counter *ctr;
902 nat real_size;
903 rtsBool prim;
904
905 for (; bd != NULL; bd = bd->link) {
906
907 // HACK: ignore pinned blocks, because they contain gaps.
908 // It's not clear exactly what we'd like to do here, since we
909 // can't tell which objects in the block are actually alive.
910 // Perhaps the whole block should be counted as SYSTEM memory.
911 if (bd->flags & BF_PINNED) {
912 continue;
913 }
914
915 p = bd->start;
916 while (p < bd->free) {
917 info = get_itbl((StgClosure *)p);
918 prim = rtsFalse;
919
920 switch (info->type) {
921
922 case THUNK:
923 size = thunk_sizeW_fromITBL(info);
924 break;
925
926 case THUNK_1_1:
927 case THUNK_0_2:
928 case THUNK_2_0:
929 size = sizeofW(StgThunkHeader) + 2;
930 break;
931
932 case THUNK_1_0:
933 case THUNK_0_1:
934 case THUNK_SELECTOR:
935 size = sizeofW(StgThunkHeader) + 1;
936 break;
937
938 case CONSTR:
939 case FUN:
940 case IND_PERM:
941 case IND_OLDGEN:
942 case IND_OLDGEN_PERM:
943 case CAF_BLACKHOLE:
944 case SE_CAF_BLACKHOLE:
945 case SE_BLACKHOLE:
946 case BLACKHOLE:
947 case FUN_1_0:
948 case FUN_0_1:
949 case FUN_1_1:
950 case FUN_0_2:
951 case FUN_2_0:
952 case CONSTR_1_0:
953 case CONSTR_0_1:
954 case CONSTR_1_1:
955 case CONSTR_0_2:
956 case CONSTR_2_0:
957 size = sizeW_fromITBL(info);
958 break;
959
960 case IND:
961 // Special case/Delicate Hack: INDs don't normally
962 // appear, since we're doing this heap census right
963 // after GC. However, GarbageCollect() also does
964 // resurrectThreads(), which can update some
965 // blackholes when it calls raiseAsync() on the
966 // resurrected threads. So we know that any IND will
967 // be the size of a BLACKHOLE.
968 size = BLACKHOLE_sizeW();
969 break;
970
971 case BCO:
972 prim = rtsTrue;
973 size = bco_sizeW((StgBCO *)p);
974 break;
975
976 case MVAR:
977 case WEAK:
978 case STABLE_NAME:
979 case MUT_VAR_CLEAN:
980 case MUT_VAR_DIRTY:
981 prim = rtsTrue;
982 size = sizeW_fromITBL(info);
983 break;
984
985 case AP:
986 size = ap_sizeW((StgAP *)p);
987 break;
988
989 case PAP:
990 size = pap_sizeW((StgPAP *)p);
991 break;
992
993 case AP_STACK:
994 size = ap_stack_sizeW((StgAP_STACK *)p);
995 break;
996
997 case ARR_WORDS:
998 prim = rtsTrue;
999 size = arr_words_sizeW(stgCast(StgArrWords*,p));
1000 break;
1001
1002 case MUT_ARR_PTRS_CLEAN:
1003 case MUT_ARR_PTRS_DIRTY:
1004 case MUT_ARR_PTRS_FROZEN:
1005 case MUT_ARR_PTRS_FROZEN0:
1006 prim = rtsTrue;
1007 size = mut_arr_ptrs_sizeW((StgMutArrPtrs *)p);
1008 break;
1009
1010 case TSO:
1011 prim = rtsTrue;
1012 #ifdef PROFILING
1013 if (RtsFlags.ProfFlags.includeTSOs) {
1014 size = tso_sizeW((StgTSO *)p);
1015 break;
1016 } else {
1017 // Skip this TSO and move on to the next object
1018 p += tso_sizeW((StgTSO *)p);
1019 continue;
1020 }
1021 #else
1022 size = tso_sizeW((StgTSO *)p);
1023 break;
1024 #endif
1025
1026 case TREC_HEADER:
1027 prim = rtsTrue;
1028 size = sizeofW(StgTRecHeader);
1029 break;
1030
1031 case TVAR_WATCH_QUEUE:
1032 prim = rtsTrue;
1033 size = sizeofW(StgTVarWatchQueue);
1034 break;
1035
1036 case INVARIANT_CHECK_QUEUE:
1037 prim = rtsTrue;
1038 size = sizeofW(StgInvariantCheckQueue);
1039 break;
1040
1041 case ATOMIC_INVARIANT:
1042 prim = rtsTrue;
1043 size = sizeofW(StgAtomicInvariant);
1044 break;
1045
1046 case TVAR:
1047 prim = rtsTrue;
1048 size = sizeofW(StgTVar);
1049 break;
1050
1051 case TREC_CHUNK:
1052 prim = rtsTrue;
1053 size = sizeofW(StgTRecChunk);
1054 break;
1055
1056 default:
1057 barf("heapCensus, unknown object: %d", info->type);
1058 }
1059
1060 identity = NULL;
1061
1062 #ifdef PROFILING
1063 // subtract the profiling overhead
1064 real_size = size - sizeofW(StgProfHeader);
1065 #else
1066 real_size = size;
1067 #endif
1068
1069 if (closureSatisfiesConstraints((StgClosure*)p)) {
1070 #ifdef PROFILING
1071 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV) {
1072 if (prim)
1073 census->prim += real_size;
1074 else if ((LDVW(p) & LDV_STATE_MASK) == LDV_STATE_CREATE)
1075 census->not_used += real_size;
1076 else
1077 census->used += real_size;
1078 } else
1079 #endif
1080 {
1081 identity = closureIdentity((StgClosure *)p);
1082
1083 if (identity != NULL) {
1084 ctr = lookupHashTable( census->hash, (StgWord)identity );
1085 if (ctr != NULL) {
1086 #ifdef PROFILING
1087 if (RtsFlags.ProfFlags.bioSelector != NULL) {
1088 if (prim)
1089 ctr->c.ldv.prim += real_size;
1090 else if ((LDVW(p) & LDV_STATE_MASK) == LDV_STATE_CREATE)
1091 ctr->c.ldv.not_used += real_size;
1092 else
1093 ctr->c.ldv.used += real_size;
1094 } else
1095 #endif
1096 {
1097 ctr->c.resid += real_size;
1098 }
1099 } else {
1100 ctr = arenaAlloc( census->arena, sizeof(counter) );
1101 initLDVCtr(ctr);
1102 insertHashTable( census->hash, (StgWord)identity, ctr );
1103 ctr->identity = identity;
1104 ctr->next = census->ctrs;
1105 census->ctrs = ctr;
1106
1107 #ifdef PROFILING
1108 if (RtsFlags.ProfFlags.bioSelector != NULL) {
1109 if (prim)
1110 ctr->c.ldv.prim = real_size;
1111 else if ((LDVW(p) & LDV_STATE_MASK) == LDV_STATE_CREATE)
1112 ctr->c.ldv.not_used = real_size;
1113 else
1114 ctr->c.ldv.used = real_size;
1115 } else
1116 #endif
1117 {
1118 ctr->c.resid = real_size;
1119 }
1120 }
1121 }
1122 }
1123 }
1124
1125 p += size;
1126 }
1127 }
1128 }
1129
1130 void
1131 heapCensus( void )
1132 {
1133 nat g, s;
1134 Census *census;
1135
1136 census = &censuses[era];
1137 census->time = mut_user_time();
1138
1139 // calculate retainer sets if necessary
1140 #ifdef PROFILING
1141 if (doingRetainerProfiling()) {
1142 retainerProfile();
1143 }
1144 #endif
1145
1146 #ifdef PROFILING
1147 stat_startHeapCensus();
1148 #endif
1149
1150 // Traverse the heap, collecting the census info
1151
1152 // First the small_alloc_list: we have to fix the free pointer at
1153 // the end by calling tidyAllocatedLists() first.
1154 tidyAllocateLists();
1155 heapCensusChain( census, small_alloc_list );
1156
1157 // Now traverse the heap in each generation/step.
1158 if (RtsFlags.GcFlags.generations == 1) {
1159 heapCensusChain( census, g0s0->blocks );
1160 } else {
1161 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
1162 for (s = 0; s < generations[g].n_steps; s++) {
1163 heapCensusChain( census, generations[g].steps[s].blocks );
1164 // Are we interested in large objects? might be
1165 // confusing to include the stack in a heap profile.
1166 heapCensusChain( census, generations[g].steps[s].large_objects );
1167 }
1168 }
1169 }
1170
1171 // dump out the census info
1172 #ifdef PROFILING
1173 // We can't generate any info for LDV profiling until
1174 // the end of the run...
1175 if (!doingLDVProfiling())
1176 dumpCensus( census );
1177 #else
1178 dumpCensus( census );
1179 #endif
1180
1181
1182 // free our storage, unless we're keeping all the census info for
1183 // future restriction by biography.
1184 #ifdef PROFILING
1185 if (RtsFlags.ProfFlags.bioSelector == NULL)
1186 {
1187 freeHashTable( census->hash, NULL/* don't free the elements */ );
1188 arenaFree( census->arena );
1189 census->hash = NULL;
1190 census->arena = NULL;
1191 }
1192 #endif
1193
1194 // we're into the next time period now
1195 nextEra();
1196
1197 #ifdef PROFILING
1198 stat_endHeapCensus();
1199 #endif
1200 }
1201