another fix for -hb: we appear to be freeing the hash table and arena twice
[ghc.git] / rts / ProfHeap.c
1 /* ----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 1998-2003
4 *
5 * Support for heap profiling
6 *
7 * --------------------------------------------------------------------------*/
8
9 #include "PosixSource.h"
10 #include "Rts.h"
11 #include "RtsUtils.h"
12 #include "RtsFlags.h"
13 #include "Profiling.h"
14 #include "ProfHeap.h"
15 #include "Stats.h"
16 #include "Hash.h"
17 #include "RetainerProfile.h"
18 #include "LdvProfile.h"
19 #include "Arena.h"
20 #include "Printer.h"
21
22 #include <string.h>
23 #include <stdlib.h>
24 #include <math.h>
25
26 /* -----------------------------------------------------------------------------
27 * era stores the current time period. It is the same as the
28 * number of censuses that have been performed.
29 *
30 * RESTRICTION:
31 * era must be no longer than LDV_SHIFT (15 or 30) bits.
32 * Invariants:
33 * era is initialized to 1 in initHeapProfiling().
34 *
35 * max_era is initialized to 2^LDV_SHIFT in initHeapProfiling().
36 * When era reaches max_era, the profiling stops because a closure can
37 * store only up to (max_era - 1) as its creation or last use time.
38 * -------------------------------------------------------------------------- */
39 unsigned int era;
40 static nat max_era;
41
42 /* -----------------------------------------------------------------------------
43 * Counters
44 *
45 * For most heap profiles each closure identity gets a simple count
46 * of live words in the heap at each census. However, if we're
47 * selecting by biography, then we have to keep the various
48 * lag/drag/void counters for each identity.
49 * -------------------------------------------------------------------------- */
50 typedef struct _counter {
51 void *identity;
52 union {
53 nat resid;
54 struct {
55 int prim; // total size of 'inherently used' closures
56 int not_used; // total size of 'never used' closures
57 int used; // total size of 'used at least once' closures
58 int void_total; // current total size of 'destroyed without being used' closures
59 int drag_total; // current total size of 'used at least once and waiting to die'
60 } ldv;
61 } c;
62 struct _counter *next;
63 } counter;
64
65 STATIC_INLINE void
66 initLDVCtr( counter *ctr )
67 {
68 ctr->c.ldv.prim = 0;
69 ctr->c.ldv.not_used = 0;
70 ctr->c.ldv.used = 0;
71 ctr->c.ldv.void_total = 0;
72 ctr->c.ldv.drag_total = 0;
73 }
74
75 typedef struct {
76 double time; // the time in MUT time when the census is made
77 HashTable * hash;
78 counter * ctrs;
79 Arena * arena;
80
81 // for LDV profiling, when just displaying by LDV
82 int prim;
83 int not_used;
84 int used;
85 int void_total;
86 int drag_total;
87 } Census;
88
89 static Census *censuses = NULL;
90 static nat n_censuses = 0;
91
92 #ifdef PROFILING
93 static void aggregateCensusInfo( void );
94 #endif
95
96 static void dumpCensus( Census *census );
97
98 /* ----------------------------------------------------------------------------
99 Closure Type Profiling;
100 ------------------------------------------------------------------------- */
101
102 #ifndef PROFILING
103 static char *type_names[] = {
104 "INVALID_OBJECT",
105 "CONSTR",
106 "CONSTR_1_0",
107 "CONSTR_0_1",
108 "CONSTR_2_0",
109 "CONSTR_1_1",
110 "CONSTR_0_2",
111 "CONSTR_STATIC",
112 "CONSTR_NOCAF_STATIC",
113 "FUN",
114 "FUN_1_0",
115 "FUN_0_1",
116 "FUN_2_0",
117 "FUN_1_1",
118 "FUN_0_2",
119 "FUN_STATIC",
120 "THUNK",
121 "THUNK_1_0",
122 "THUNK_0_1",
123 "THUNK_2_0",
124 "THUNK_1_1",
125 "THUNK_0_2",
126 "THUNK_STATIC",
127 "THUNK_SELECTOR",
128 "BCO",
129 "AP",
130 "PAP",
131 "AP_STACK",
132 "IND",
133 "IND_OLDGEN",
134 "IND_PERM",
135 "IND_OLDGEN_PERM",
136 "IND_STATIC",
137 "RET_BCO",
138 "RET_SMALL",
139 "RET_BIG",
140 "RET_DYN",
141 "RET_FUN",
142 "UPDATE_FRAME",
143 "CATCH_FRAME",
144 "STOP_FRAME",
145 "CAF_BLACKHOLE",
146 "BLACKHOLE",
147 "SE_BLACKHOLE",
148 "SE_CAF_BLACKHOLE",
149 "MVAR",
150 "ARR_WORDS",
151 "MUT_ARR_PTRS_CLEAN",
152 "MUT_ARR_PTRS_DIRTY",
153 "MUT_ARR_PTRS_FROZEN0",
154 "MUT_ARR_PTRS_FROZEN",
155 "MUT_VAR_CLEAN",
156 "MUT_VAR_DIRTY",
157 "WEAK",
158 "STABLE_NAME",
159 "TSO",
160 "BLOCKED_FETCH",
161 "FETCH_ME",
162 "FETCH_ME_BQ",
163 "RBH",
164 "EVACUATED",
165 "REMOTE_REF",
166 "TVAR_WATCH_QUEUE",
167 "INVARIANT_CHECK_QUEUE",
168 "ATOMIC_INVARIANT",
169 "TVAR",
170 "TREC_CHUNK",
171 "TREC_HEADER",
172 "ATOMICALLY_FRAME",
173 "CATCH_RETRY_FRAME",
174 "CATCH_STM_FRAME",
175 "N_CLOSURE_TYPES"
176 };
177 #endif
178
179 /* ----------------------------------------------------------------------------
180 * Find the "closure identity", which is a unique pointer reresenting
181 * the band to which this closure's heap space is attributed in the
182 * heap profile.
183 * ------------------------------------------------------------------------- */
184 STATIC_INLINE void *
185 closureIdentity( StgClosure *p )
186 {
187 switch (RtsFlags.ProfFlags.doHeapProfile) {
188
189 #ifdef PROFILING
190 case HEAP_BY_CCS:
191 return p->header.prof.ccs;
192 case HEAP_BY_MOD:
193 return p->header.prof.ccs->cc->module;
194 case HEAP_BY_DESCR:
195 return GET_PROF_DESC(get_itbl(p));
196 case HEAP_BY_TYPE:
197 return GET_PROF_TYPE(get_itbl(p));
198 case HEAP_BY_RETAINER:
199 // AFAIK, the only closures in the heap which might not have a
200 // valid retainer set are DEAD_WEAK closures.
201 if (isRetainerSetFieldValid(p))
202 return retainerSetOf(p);
203 else
204 return NULL;
205
206 #else
207 case HEAP_BY_CLOSURE_TYPE:
208 {
209 StgInfoTable *info;
210 info = get_itbl(p);
211 switch (info->type) {
212 case CONSTR:
213 case CONSTR_1_0:
214 case CONSTR_0_1:
215 case CONSTR_2_0:
216 case CONSTR_1_1:
217 case CONSTR_0_2:
218 case CONSTR_STATIC:
219 case CONSTR_NOCAF_STATIC:
220 printf("",strlen(GET_CON_DESC(itbl_to_con_itbl(info))));
221 return GET_CON_DESC(itbl_to_con_itbl(info));
222 default:
223 return type_names[info->type];
224 }
225 }
226
227 #endif
228 default:
229 barf("closureIdentity");
230 }
231 }
232
233 /* --------------------------------------------------------------------------
234 * Profiling type predicates
235 * ----------------------------------------------------------------------- */
236 #ifdef PROFILING
237 STATIC_INLINE rtsBool
238 doingLDVProfiling( void )
239 {
240 return (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV
241 || RtsFlags.ProfFlags.bioSelector != NULL);
242 }
243
244 STATIC_INLINE rtsBool
245 doingRetainerProfiling( void )
246 {
247 return (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER
248 || RtsFlags.ProfFlags.retainerSelector != NULL);
249 }
250 #endif /* PROFILING */
251
252 // Precesses a closure 'c' being destroyed whose size is 'size'.
253 // Make sure that LDV_recordDead() is not invoked on 'inherently used' closures
254 // such as TSO; they should not be involved in computing dragNew or voidNew.
255 //
256 // Even though era is checked in both LdvCensusForDead() and
257 // LdvCensusKillAll(), we still need to make sure that era is > 0 because
258 // LDV_recordDead() may be called from elsewhere in the runtime system. E.g.,
259 // when a thunk is replaced by an indirection object.
260
261 #ifdef PROFILING
262 void
263 LDV_recordDead( StgClosure *c, nat size )
264 {
265 void *id;
266 nat t;
267 counter *ctr;
268
269 if (era > 0 && closureSatisfiesConstraints(c)) {
270 size -= sizeofW(StgProfHeader);
271 ASSERT(LDVW(c) != 0);
272 if ((LDVW((c)) & LDV_STATE_MASK) == LDV_STATE_CREATE) {
273 t = (LDVW((c)) & LDV_CREATE_MASK) >> LDV_SHIFT;
274 if (t < era) {
275 if (RtsFlags.ProfFlags.bioSelector == NULL) {
276 censuses[t].void_total += (int)size;
277 censuses[era].void_total -= (int)size;
278 ASSERT(censuses[t].void_total < censuses[t].not_used);
279 } else {
280 id = closureIdentity(c);
281 ctr = lookupHashTable(censuses[t].hash, (StgWord)id);
282 ASSERT( ctr != NULL );
283 ctr->c.ldv.void_total += (int)size;
284 ctr = lookupHashTable(censuses[era].hash, (StgWord)id);
285 if (ctr == NULL) {
286 ctr = arenaAlloc(censuses[era].arena, sizeof(counter));
287 initLDVCtr(ctr);
288 insertHashTable(censuses[era].hash, (StgWord)id, ctr);
289 ctr->identity = id;
290 ctr->next = censuses[era].ctrs;
291 censuses[era].ctrs = ctr;
292 }
293 ctr->c.ldv.void_total -= (int)size;
294 }
295 }
296 } else {
297 t = LDVW((c)) & LDV_LAST_MASK;
298 if (t + 1 < era) {
299 if (RtsFlags.ProfFlags.bioSelector == NULL) {
300 censuses[t+1].drag_total += size;
301 censuses[era].drag_total -= size;
302 } else {
303 void *id;
304 id = closureIdentity(c);
305 ctr = lookupHashTable(censuses[t+1].hash, (StgWord)id);
306 ASSERT( ctr != NULL );
307 ctr->c.ldv.drag_total += (int)size;
308 ctr = lookupHashTable(censuses[era].hash, (StgWord)id);
309 if (ctr == NULL) {
310 ctr = arenaAlloc(censuses[era].arena, sizeof(counter));
311 initLDVCtr(ctr);
312 insertHashTable(censuses[era].hash, (StgWord)id, ctr);
313 ctr->identity = id;
314 ctr->next = censuses[era].ctrs;
315 censuses[era].ctrs = ctr;
316 }
317 ctr->c.ldv.drag_total -= (int)size;
318 }
319 }
320 }
321 }
322 }
323 #endif
324
325 /* --------------------------------------------------------------------------
326 * Initialize censuses[era];
327 * ----------------------------------------------------------------------- */
328
329 STATIC_INLINE void
330 initEra(Census *census)
331 {
332 census->hash = allocHashTable();
333 census->ctrs = NULL;
334 census->arena = newArena();
335
336 census->not_used = 0;
337 census->used = 0;
338 census->prim = 0;
339 census->void_total = 0;
340 census->drag_total = 0;
341 }
342
343 STATIC_INLINE void
344 freeEra(Census *census)
345 {
346 if (RtsFlags.ProfFlags.bioSelector != NULL)
347 // when bioSelector==NULL, these are freed in heapCensus()
348 {
349 arenaFree(census->arena);
350 freeHashTable(census->hash, NULL);
351 }
352 }
353
354 /* --------------------------------------------------------------------------
355 * Increases era by 1 and initialize census[era].
356 * Reallocates gi[] and increases its size if needed.
357 * ----------------------------------------------------------------------- */
358
359 static void
360 nextEra( void )
361 {
362 #ifdef PROFILING
363 if (doingLDVProfiling()) {
364 era++;
365
366 if (era == max_era) {
367 errorBelch("maximum number of censuses reached; use +RTS -i to reduce");
368 stg_exit(EXIT_FAILURE);
369 }
370
371 if (era == n_censuses) {
372 n_censuses *= 2;
373 censuses = stgReallocBytes(censuses, sizeof(Census) * n_censuses,
374 "nextEra");
375 }
376 }
377 #endif /* PROFILING */
378
379 initEra( &censuses[era] );
380 }
381
382 /* ----------------------------------------------------------------------------
383 * Heap profiling by info table
384 * ------------------------------------------------------------------------- */
385
386 #if !defined(PROFILING)
387 FILE *hp_file;
388 static char *hp_filename;
389
390 void initProfiling1 (void)
391 {
392 }
393
394 void freeProfiling1 (void)
395 {
396 }
397
398 void initProfiling2 (void)
399 {
400 if (RtsFlags.ProfFlags.doHeapProfile) {
401 /* Initialise the log file name */
402 hp_filename = stgMallocBytes(strlen(prog_name) + 6, "hpFileName");
403 sprintf(hp_filename, "%s.hp", prog_name);
404
405 /* open the log file */
406 if ((hp_file = fopen(hp_filename, "w")) == NULL) {
407 debugBelch("Can't open profiling report file %s\n",
408 hp_filename);
409 RtsFlags.ProfFlags.doHeapProfile = 0;
410 return;
411 }
412 }
413
414 initHeapProfiling();
415 }
416
417 void endProfiling( void )
418 {
419 endHeapProfiling();
420 }
421 #endif /* !PROFILING */
422
423 static void
424 printSample(rtsBool beginSample, StgDouble sampleValue)
425 {
426 StgDouble fractionalPart, integralPart;
427 fractionalPart = modf(sampleValue, &integralPart);
428 fprintf(hp_file, "%s %" FMT_Word64 ".%02" FMT_Word64 "\n",
429 (beginSample ? "BEGIN_SAMPLE" : "END_SAMPLE"),
430 (StgWord64)integralPart, (StgWord64)(fractionalPart * 100));
431 }
432
433 /* --------------------------------------------------------------------------
434 * Initialize the heap profilier
435 * ----------------------------------------------------------------------- */
436 nat
437 initHeapProfiling(void)
438 {
439 if (! RtsFlags.ProfFlags.doHeapProfile) {
440 return 0;
441 }
442
443 #ifdef PROFILING
444 if (doingLDVProfiling() && doingRetainerProfiling()) {
445 errorBelch("cannot mix -hb and -hr");
446 stg_exit(EXIT_FAILURE);
447 }
448 #endif
449
450 // we only count eras if we're doing LDV profiling. Otherwise era
451 // is fixed at zero.
452 #ifdef PROFILING
453 if (doingLDVProfiling()) {
454 era = 1;
455 } else
456 #endif
457 {
458 era = 0;
459 }
460
461 { // max_era = 2^LDV_SHIFT
462 nat p;
463 max_era = 1;
464 for (p = 0; p < LDV_SHIFT; p++)
465 max_era *= 2;
466 }
467
468 n_censuses = 32;
469 censuses = stgMallocBytes(sizeof(Census) * n_censuses, "initHeapProfiling");
470
471 initEra( &censuses[era] );
472
473 /* initProfilingLogFile(); */
474 fprintf(hp_file, "JOB \"%s", prog_name);
475
476 #ifdef PROFILING
477 {
478 int count;
479 for(count = 1; count < prog_argc; count++)
480 fprintf(hp_file, " %s", prog_argv[count]);
481 fprintf(hp_file, " +RTS");
482 for(count = 0; count < rts_argc; count++)
483 fprintf(hp_file, " %s", rts_argv[count]);
484 }
485 #endif /* PROFILING */
486
487 fprintf(hp_file, "\"\n" );
488
489 fprintf(hp_file, "DATE \"%s\"\n", time_str());
490
491 fprintf(hp_file, "SAMPLE_UNIT \"seconds\"\n");
492 fprintf(hp_file, "VALUE_UNIT \"bytes\"\n");
493
494 printSample(rtsTrue, 0);
495 printSample(rtsFalse, 0);
496
497 #ifdef PROFILING
498 if (doingRetainerProfiling()) {
499 initRetainerProfiling();
500 }
501 #endif
502
503 return 0;
504 }
505
506 void
507 endHeapProfiling(void)
508 {
509 StgDouble seconds;
510
511 if (! RtsFlags.ProfFlags.doHeapProfile) {
512 return;
513 }
514
515 #ifdef PROFILING
516 if (doingRetainerProfiling()) {
517 endRetainerProfiling();
518 }
519 #endif
520
521 #ifdef PROFILING
522 if (doingLDVProfiling()) {
523 nat t;
524 LdvCensusKillAll();
525 aggregateCensusInfo();
526 for (t = 1; t < era; t++) {
527 dumpCensus( &censuses[t] );
528 }
529 }
530 #endif
531
532 #ifdef PROFILING
533 if (doingLDVProfiling()) {
534 nat t;
535 for (t = 1; t <= era; t++) {
536 freeEra( &censuses[t] );
537 }
538 } else {
539 freeEra( &censuses[0] );
540 }
541 #else
542 freeEra( &censuses[0] );
543 #endif
544
545 stgFree(censuses);
546
547 seconds = mut_user_time();
548 printSample(rtsTrue, seconds);
549 printSample(rtsFalse, seconds);
550 fclose(hp_file);
551 }
552
553
554
555 #ifdef PROFILING
556 static size_t
557 buf_append(char *p, const char *q, char *end)
558 {
559 int m;
560
561 for (m = 0; p < end; p++, q++, m++) {
562 *p = *q;
563 if (*q == '\0') { break; }
564 }
565 return m;
566 }
567
568 static void
569 fprint_ccs(FILE *fp, CostCentreStack *ccs, nat max_length)
570 {
571 char buf[max_length+1], *p, *buf_end;
572
573 // MAIN on its own gets printed as "MAIN", otherwise we ignore MAIN.
574 if (ccs == CCS_MAIN) {
575 fprintf(fp, "MAIN");
576 return;
577 }
578
579 fprintf(fp, "(%ld)", ccs->ccsID);
580
581 p = buf;
582 buf_end = buf + max_length + 1;
583
584 // keep printing components of the stack until we run out of space
585 // in the buffer. If we run out of space, end with "...".
586 for (; ccs != NULL && ccs != CCS_MAIN; ccs = ccs->prevStack) {
587
588 // CAF cost centres print as M.CAF, but we leave the module
589 // name out of all the others to save space.
590 if (!strcmp(ccs->cc->label,"CAF")) {
591 p += buf_append(p, ccs->cc->module, buf_end);
592 p += buf_append(p, ".CAF", buf_end);
593 } else {
594 p += buf_append(p, ccs->cc->label, buf_end);
595 if (ccs->prevStack != NULL && ccs->prevStack != CCS_MAIN) {
596 p += buf_append(p, "/", buf_end);
597 }
598 }
599
600 if (p >= buf_end) {
601 sprintf(buf+max_length-4, "...");
602 break;
603 }
604 }
605 fprintf(fp, "%s", buf);
606 }
607 #endif /* PROFILING */
608
609 rtsBool
610 strMatchesSelector( char* str, char* sel )
611 {
612 char* p;
613 // debugBelch("str_matches_selector %s %s\n", str, sel);
614 while (1) {
615 // Compare str against wherever we've got to in sel.
616 p = str;
617 while (*p != '\0' && *sel != ',' && *sel != '\0' && *p == *sel) {
618 p++; sel++;
619 }
620 // Match if all of str used and have reached the end of a sel fragment.
621 if (*p == '\0' && (*sel == ',' || *sel == '\0'))
622 return rtsTrue;
623
624 // No match. Advance sel to the start of the next elem.
625 while (*sel != ',' && *sel != '\0') sel++;
626 if (*sel == ',') sel++;
627
628 /* Run out of sel ?? */
629 if (*sel == '\0') return rtsFalse;
630 }
631 }
632
633 /* -----------------------------------------------------------------------------
634 * Figure out whether a closure should be counted in this census, by
635 * testing against all the specified constraints.
636 * -------------------------------------------------------------------------- */
637 rtsBool
638 closureSatisfiesConstraints( StgClosure* p )
639 {
640 #if !defined(PROFILING)
641 (void)p; /* keep gcc -Wall happy */
642 return rtsTrue;
643 #else
644 rtsBool b;
645
646 // The CCS has a selected field to indicate whether this closure is
647 // deselected by not being mentioned in the module, CC, or CCS
648 // selectors.
649 if (!p->header.prof.ccs->selected) {
650 return rtsFalse;
651 }
652
653 if (RtsFlags.ProfFlags.descrSelector) {
654 b = strMatchesSelector( (GET_PROF_DESC(get_itbl((StgClosure *)p))),
655 RtsFlags.ProfFlags.descrSelector );
656 if (!b) return rtsFalse;
657 }
658 if (RtsFlags.ProfFlags.typeSelector) {
659 b = strMatchesSelector( (GET_PROF_TYPE(get_itbl((StgClosure *)p))),
660 RtsFlags.ProfFlags.typeSelector );
661 if (!b) return rtsFalse;
662 }
663 if (RtsFlags.ProfFlags.retainerSelector) {
664 RetainerSet *rs;
665 nat i;
666 // We must check that the retainer set is valid here. One
667 // reason it might not be valid is if this closure is a
668 // a newly deceased weak pointer (i.e. a DEAD_WEAK), since
669 // these aren't reached by the retainer profiler's traversal.
670 if (isRetainerSetFieldValid((StgClosure *)p)) {
671 rs = retainerSetOf((StgClosure *)p);
672 if (rs != NULL) {
673 for (i = 0; i < rs->num; i++) {
674 b = strMatchesSelector( rs->element[i]->cc->label,
675 RtsFlags.ProfFlags.retainerSelector );
676 if (b) return rtsTrue;
677 }
678 }
679 }
680 return rtsFalse;
681 }
682 return rtsTrue;
683 #endif /* PROFILING */
684 }
685
686 /* -----------------------------------------------------------------------------
687 * Aggregate the heap census info for biographical profiling
688 * -------------------------------------------------------------------------- */
689 #ifdef PROFILING
690 static void
691 aggregateCensusInfo( void )
692 {
693 HashTable *acc;
694 nat t;
695 counter *c, *d, *ctrs;
696 Arena *arena;
697
698 if (!doingLDVProfiling()) return;
699
700 // Aggregate the LDV counters when displaying by biography.
701 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV) {
702 int void_total, drag_total;
703
704 // Now we compute void_total and drag_total for each census
705 // After the program has finished, the void_total field of
706 // each census contains the count of words that were *created*
707 // in this era and were eventually void. Conversely, if a
708 // void closure was destroyed in this era, it will be
709 // represented by a negative count of words in void_total.
710 //
711 // To get the count of live words that are void at each
712 // census, just propagate the void_total count forwards:
713
714 void_total = 0;
715 drag_total = 0;
716 for (t = 1; t < era; t++) { // note: start at 1, not 0
717 void_total += censuses[t].void_total;
718 drag_total += censuses[t].drag_total;
719 censuses[t].void_total = void_total;
720 censuses[t].drag_total = drag_total;
721
722 ASSERT( censuses[t].void_total <= censuses[t].not_used );
723 // should be true because: void_total is the count of
724 // live words that are void at this census, which *must*
725 // be less than the number of live words that have not
726 // been used yet.
727
728 ASSERT( censuses[t].drag_total <= censuses[t].used );
729 // similar reasoning as above.
730 }
731
732 return;
733 }
734
735 // otherwise... we're doing a heap profile that is restricted to
736 // some combination of lag, drag, void or use. We've kept all the
737 // census info for all censuses so far, but we still need to
738 // aggregate the counters forwards.
739
740 arena = newArena();
741 acc = allocHashTable();
742 ctrs = NULL;
743
744 for (t = 1; t < era; t++) {
745
746 // first look through all the counters we're aggregating
747 for (c = ctrs; c != NULL; c = c->next) {
748 // if one of the totals is non-zero, then this closure
749 // type must be present in the heap at this census time...
750 d = lookupHashTable(censuses[t].hash, (StgWord)c->identity);
751
752 if (d == NULL) {
753 // if this closure identity isn't present in the
754 // census for this time period, then our running
755 // totals *must* be zero.
756 ASSERT(c->c.ldv.void_total == 0 && c->c.ldv.drag_total == 0);
757
758 // debugCCS(c->identity);
759 // debugBelch(" census=%d void_total=%d drag_total=%d\n",
760 // t, c->c.ldv.void_total, c->c.ldv.drag_total);
761 } else {
762 d->c.ldv.void_total += c->c.ldv.void_total;
763 d->c.ldv.drag_total += c->c.ldv.drag_total;
764 c->c.ldv.void_total = d->c.ldv.void_total;
765 c->c.ldv.drag_total = d->c.ldv.drag_total;
766
767 ASSERT( c->c.ldv.void_total >= 0 );
768 ASSERT( c->c.ldv.drag_total >= 0 );
769 }
770 }
771
772 // now look through the counters in this census to find new ones
773 for (c = censuses[t].ctrs; c != NULL; c = c->next) {
774 d = lookupHashTable(acc, (StgWord)c->identity);
775 if (d == NULL) {
776 d = arenaAlloc( arena, sizeof(counter) );
777 initLDVCtr(d);
778 insertHashTable( acc, (StgWord)c->identity, d );
779 d->identity = c->identity;
780 d->next = ctrs;
781 ctrs = d;
782 d->c.ldv.void_total = c->c.ldv.void_total;
783 d->c.ldv.drag_total = c->c.ldv.drag_total;
784 }
785 ASSERT( c->c.ldv.void_total >= 0 );
786 ASSERT( c->c.ldv.drag_total >= 0 );
787 }
788 }
789
790 freeHashTable(acc, NULL);
791 arenaFree(arena);
792 }
793 #endif
794
795 /* -----------------------------------------------------------------------------
796 * Print out the results of a heap census.
797 * -------------------------------------------------------------------------- */
798 static void
799 dumpCensus( Census *census )
800 {
801 counter *ctr;
802 int count;
803
804 printSample(rtsTrue, census->time);
805
806 #ifdef PROFILING
807 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV) {
808 fprintf(hp_file, "VOID\t%lu\n", (unsigned long)(census->void_total) * sizeof(W_));
809 fprintf(hp_file, "LAG\t%lu\n",
810 (unsigned long)(census->not_used - census->void_total) * sizeof(W_));
811 fprintf(hp_file, "USE\t%lu\n",
812 (unsigned long)(census->used - census->drag_total) * sizeof(W_));
813 fprintf(hp_file, "INHERENT_USE\t%lu\n",
814 (unsigned long)(census->prim) * sizeof(W_));
815 fprintf(hp_file, "DRAG\t%lu\n",
816 (unsigned long)(census->drag_total) * sizeof(W_));
817 printSample(rtsFalse, census->time);
818 return;
819 }
820 #endif
821
822 for (ctr = census->ctrs; ctr != NULL; ctr = ctr->next) {
823
824 #ifdef PROFILING
825 if (RtsFlags.ProfFlags.bioSelector != NULL) {
826 count = 0;
827 if (strMatchesSelector("lag", RtsFlags.ProfFlags.bioSelector))
828 count += ctr->c.ldv.not_used - ctr->c.ldv.void_total;
829 if (strMatchesSelector("drag", RtsFlags.ProfFlags.bioSelector))
830 count += ctr->c.ldv.drag_total;
831 if (strMatchesSelector("void", RtsFlags.ProfFlags.bioSelector))
832 count += ctr->c.ldv.void_total;
833 if (strMatchesSelector("use", RtsFlags.ProfFlags.bioSelector))
834 count += ctr->c.ldv.used - ctr->c.ldv.drag_total;
835 } else
836 #endif
837 {
838 count = ctr->c.resid;
839 }
840
841 ASSERT( count >= 0 );
842
843 if (count == 0) continue;
844
845 #if !defined(PROFILING)
846 switch (RtsFlags.ProfFlags.doHeapProfile) {
847 case HEAP_BY_CLOSURE_TYPE:
848 fprintf(hp_file, "%s", (char *)ctr->identity);
849 break;
850 }
851 #endif
852
853 #ifdef PROFILING
854 switch (RtsFlags.ProfFlags.doHeapProfile) {
855 case HEAP_BY_CCS:
856 fprint_ccs(hp_file, (CostCentreStack *)ctr->identity, RtsFlags.ProfFlags.ccsLength);
857 break;
858 case HEAP_BY_MOD:
859 case HEAP_BY_DESCR:
860 case HEAP_BY_TYPE:
861 fprintf(hp_file, "%s", (char *)ctr->identity);
862 break;
863 case HEAP_BY_RETAINER:
864 {
865 RetainerSet *rs = (RetainerSet *)ctr->identity;
866
867 // it might be the distinguished retainer set rs_MANY:
868 if (rs == &rs_MANY) {
869 fprintf(hp_file, "MANY");
870 break;
871 }
872
873 // Mark this retainer set by negating its id, because it
874 // has appeared in at least one census. We print the
875 // values of all such retainer sets into the log file at
876 // the end. A retainer set may exist but not feature in
877 // any censuses if it arose as the intermediate retainer
878 // set for some closure during retainer set calculation.
879 if (rs->id > 0)
880 rs->id = -(rs->id);
881
882 // report in the unit of bytes: * sizeof(StgWord)
883 printRetainerSetShort(hp_file, rs);
884 break;
885 }
886 default:
887 barf("dumpCensus; doHeapProfile");
888 }
889 #endif
890
891 fprintf(hp_file, "\t%lu\n", (unsigned long)count * sizeof(W_));
892 }
893
894 printSample(rtsFalse, census->time);
895 }
896
897 /* -----------------------------------------------------------------------------
898 * Code to perform a heap census.
899 * -------------------------------------------------------------------------- */
900 static void
901 heapCensusChain( Census *census, bdescr *bd )
902 {
903 StgPtr p;
904 StgInfoTable *info;
905 void *identity;
906 nat size;
907 counter *ctr;
908 nat real_size;
909 rtsBool prim;
910
911 for (; bd != NULL; bd = bd->link) {
912
913 // HACK: ignore pinned blocks, because they contain gaps.
914 // It's not clear exactly what we'd like to do here, since we
915 // can't tell which objects in the block are actually alive.
916 // Perhaps the whole block should be counted as SYSTEM memory.
917 if (bd->flags & BF_PINNED) {
918 continue;
919 }
920
921 p = bd->start;
922 while (p < bd->free) {
923 info = get_itbl((StgClosure *)p);
924 prim = rtsFalse;
925
926 switch (info->type) {
927
928 case THUNK:
929 size = thunk_sizeW_fromITBL(info);
930 break;
931
932 case THUNK_1_1:
933 case THUNK_0_2:
934 case THUNK_2_0:
935 size = sizeofW(StgThunkHeader) + 2;
936 break;
937
938 case THUNK_1_0:
939 case THUNK_0_1:
940 case THUNK_SELECTOR:
941 size = sizeofW(StgThunkHeader) + 1;
942 break;
943
944 case CONSTR:
945 case FUN:
946 case IND_PERM:
947 case IND_OLDGEN:
948 case IND_OLDGEN_PERM:
949 case CAF_BLACKHOLE:
950 case SE_CAF_BLACKHOLE:
951 case SE_BLACKHOLE:
952 case BLACKHOLE:
953 case FUN_1_0:
954 case FUN_0_1:
955 case FUN_1_1:
956 case FUN_0_2:
957 case FUN_2_0:
958 case CONSTR_1_0:
959 case CONSTR_0_1:
960 case CONSTR_1_1:
961 case CONSTR_0_2:
962 case CONSTR_2_0:
963 size = sizeW_fromITBL(info);
964 break;
965
966 case IND:
967 // Special case/Delicate Hack: INDs don't normally
968 // appear, since we're doing this heap census right
969 // after GC. However, GarbageCollect() also does
970 // resurrectThreads(), which can update some
971 // blackholes when it calls raiseAsync() on the
972 // resurrected threads. So we know that any IND will
973 // be the size of a BLACKHOLE.
974 size = BLACKHOLE_sizeW();
975 break;
976
977 case BCO:
978 prim = rtsTrue;
979 size = bco_sizeW((StgBCO *)p);
980 break;
981
982 case MVAR:
983 case WEAK:
984 case STABLE_NAME:
985 case MUT_VAR_CLEAN:
986 case MUT_VAR_DIRTY:
987 prim = rtsTrue;
988 size = sizeW_fromITBL(info);
989 break;
990
991 case AP:
992 size = ap_sizeW((StgAP *)p);
993 break;
994
995 case PAP:
996 size = pap_sizeW((StgPAP *)p);
997 break;
998
999 case AP_STACK:
1000 size = ap_stack_sizeW((StgAP_STACK *)p);
1001 break;
1002
1003 case ARR_WORDS:
1004 prim = rtsTrue;
1005 size = arr_words_sizeW(stgCast(StgArrWords*,p));
1006 break;
1007
1008 case MUT_ARR_PTRS_CLEAN:
1009 case MUT_ARR_PTRS_DIRTY:
1010 case MUT_ARR_PTRS_FROZEN:
1011 case MUT_ARR_PTRS_FROZEN0:
1012 prim = rtsTrue;
1013 size = mut_arr_ptrs_sizeW((StgMutArrPtrs *)p);
1014 break;
1015
1016 case TSO:
1017 prim = rtsTrue;
1018 #ifdef PROFILING
1019 if (RtsFlags.ProfFlags.includeTSOs) {
1020 size = tso_sizeW((StgTSO *)p);
1021 break;
1022 } else {
1023 // Skip this TSO and move on to the next object
1024 p += tso_sizeW((StgTSO *)p);
1025 continue;
1026 }
1027 #else
1028 size = tso_sizeW((StgTSO *)p);
1029 break;
1030 #endif
1031
1032 case TREC_HEADER:
1033 prim = rtsTrue;
1034 size = sizeofW(StgTRecHeader);
1035 break;
1036
1037 case TVAR_WATCH_QUEUE:
1038 prim = rtsTrue;
1039 size = sizeofW(StgTVarWatchQueue);
1040 break;
1041
1042 case INVARIANT_CHECK_QUEUE:
1043 prim = rtsTrue;
1044 size = sizeofW(StgInvariantCheckQueue);
1045 break;
1046
1047 case ATOMIC_INVARIANT:
1048 prim = rtsTrue;
1049 size = sizeofW(StgAtomicInvariant);
1050 break;
1051
1052 case TVAR:
1053 prim = rtsTrue;
1054 size = sizeofW(StgTVar);
1055 break;
1056
1057 case TREC_CHUNK:
1058 prim = rtsTrue;
1059 size = sizeofW(StgTRecChunk);
1060 break;
1061
1062 default:
1063 barf("heapCensus, unknown object: %d", info->type);
1064 }
1065
1066 identity = NULL;
1067
1068 #ifdef PROFILING
1069 // subtract the profiling overhead
1070 real_size = size - sizeofW(StgProfHeader);
1071 #else
1072 real_size = size;
1073 #endif
1074
1075 if (closureSatisfiesConstraints((StgClosure*)p)) {
1076 #ifdef PROFILING
1077 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV) {
1078 if (prim)
1079 census->prim += real_size;
1080 else if ((LDVW(p) & LDV_STATE_MASK) == LDV_STATE_CREATE)
1081 census->not_used += real_size;
1082 else
1083 census->used += real_size;
1084 } else
1085 #endif
1086 {
1087 identity = closureIdentity((StgClosure *)p);
1088
1089 if (identity != NULL) {
1090 ctr = lookupHashTable( census->hash, (StgWord)identity );
1091 if (ctr != NULL) {
1092 #ifdef PROFILING
1093 if (RtsFlags.ProfFlags.bioSelector != NULL) {
1094 if (prim)
1095 ctr->c.ldv.prim += real_size;
1096 else if ((LDVW(p) & LDV_STATE_MASK) == LDV_STATE_CREATE)
1097 ctr->c.ldv.not_used += real_size;
1098 else
1099 ctr->c.ldv.used += real_size;
1100 } else
1101 #endif
1102 {
1103 ctr->c.resid += real_size;
1104 }
1105 } else {
1106 ctr = arenaAlloc( census->arena, sizeof(counter) );
1107 initLDVCtr(ctr);
1108 insertHashTable( census->hash, (StgWord)identity, ctr );
1109 ctr->identity = identity;
1110 ctr->next = census->ctrs;
1111 census->ctrs = ctr;
1112
1113 #ifdef PROFILING
1114 if (RtsFlags.ProfFlags.bioSelector != NULL) {
1115 if (prim)
1116 ctr->c.ldv.prim = real_size;
1117 else if ((LDVW(p) & LDV_STATE_MASK) == LDV_STATE_CREATE)
1118 ctr->c.ldv.not_used = real_size;
1119 else
1120 ctr->c.ldv.used = real_size;
1121 } else
1122 #endif
1123 {
1124 ctr->c.resid = real_size;
1125 }
1126 }
1127 }
1128 }
1129 }
1130
1131 p += size;
1132 }
1133 }
1134 }
1135
1136 void
1137 heapCensus( void )
1138 {
1139 nat g, s;
1140 Census *census;
1141
1142 census = &censuses[era];
1143 census->time = mut_user_time();
1144
1145 // calculate retainer sets if necessary
1146 #ifdef PROFILING
1147 if (doingRetainerProfiling()) {
1148 retainerProfile();
1149 }
1150 #endif
1151
1152 #ifdef PROFILING
1153 stat_startHeapCensus();
1154 #endif
1155
1156 // Traverse the heap, collecting the census info
1157
1158 // First the small_alloc_list: we have to fix the free pointer at
1159 // the end by calling tidyAllocatedLists() first.
1160 tidyAllocateLists();
1161 heapCensusChain( census, small_alloc_list );
1162
1163 // Now traverse the heap in each generation/step.
1164 if (RtsFlags.GcFlags.generations == 1) {
1165 heapCensusChain( census, g0s0->blocks );
1166 } else {
1167 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
1168 for (s = 0; s < generations[g].n_steps; s++) {
1169 heapCensusChain( census, generations[g].steps[s].blocks );
1170 // Are we interested in large objects? might be
1171 // confusing to include the stack in a heap profile.
1172 heapCensusChain( census, generations[g].steps[s].large_objects );
1173 }
1174 }
1175 }
1176
1177 // dump out the census info
1178 #ifdef PROFILING
1179 // We can't generate any info for LDV profiling until
1180 // the end of the run...
1181 if (!doingLDVProfiling())
1182 dumpCensus( census );
1183 #else
1184 dumpCensus( census );
1185 #endif
1186
1187
1188 // free our storage, unless we're keeping all the census info for
1189 // future restriction by biography.
1190 #ifdef PROFILING
1191 if (RtsFlags.ProfFlags.bioSelector == NULL)
1192 {
1193 freeHashTable( census->hash, NULL/* don't free the elements */ );
1194 arenaFree( census->arena );
1195 census->hash = NULL;
1196 census->arena = NULL;
1197 }
1198 #endif
1199
1200 // we're into the next time period now
1201 nextEra();
1202
1203 #ifdef PROFILING
1204 stat_endHeapCensus();
1205 #endif
1206 }
1207