Fold ghc-prim.git into ghc.git (re #8545)
[ghc.git] / rts / ProfHeap.c
1 /* ----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 1998-2003
4 *
5 * Support for heap profiling
6 *
7 * --------------------------------------------------------------------------*/
8
9 #include "PosixSource.h"
10 #include "Rts.h"
11
12 #include "RtsUtils.h"
13 #include "Profiling.h"
14 #include "ProfHeap.h"
15 #include "Stats.h"
16 #include "Hash.h"
17 #include "RetainerProfile.h"
18 #include "LdvProfile.h"
19 #include "Arena.h"
20 #include "Printer.h"
21 #include "sm/GCThread.h"
22
23 #include <string.h>
24
25 /* -----------------------------------------------------------------------------
26 * era stores the current time period. It is the same as the
27 * number of censuses that have been performed.
28 *
29 * RESTRICTION:
30 * era must be no longer than LDV_SHIFT (15 or 30) bits.
31 * Invariants:
32 * era is initialized to 1 in initHeapProfiling().
33 *
34 * max_era is initialized to 2^LDV_SHIFT in initHeapProfiling().
35 * When era reaches max_era, the profiling stops because a closure can
36 * store only up to (max_era - 1) as its creation or last use time.
37 * -------------------------------------------------------------------------- */
38 unsigned int era;
39 static nat max_era;
40
41 /* -----------------------------------------------------------------------------
42 * Counters
43 *
44 * For most heap profiles each closure identity gets a simple count
45 * of live words in the heap at each census. However, if we're
46 * selecting by biography, then we have to keep the various
47 * lag/drag/void counters for each identity.
48 * -------------------------------------------------------------------------- */
49 typedef struct _counter {
50 void *identity;
51 union {
52 nat resid;
53 struct {
54 long prim; // total size of 'inherently used' closures
55 long not_used; // total size of 'never used' closures
56 long used; // total size of 'used at least once' closures
57 long void_total; // current total size of 'destroyed without being used' closures
58 long drag_total; // current total size of 'used at least once and waiting to die'
59 } ldv;
60 } c;
61 struct _counter *next;
62 } counter;
63
64 STATIC_INLINE void
65 initLDVCtr( counter *ctr )
66 {
67 ctr->c.ldv.prim = 0;
68 ctr->c.ldv.not_used = 0;
69 ctr->c.ldv.used = 0;
70 ctr->c.ldv.void_total = 0;
71 ctr->c.ldv.drag_total = 0;
72 }
73
74 typedef struct {
75 double time; // the time in MUT time when the census is made
76 HashTable * hash;
77 counter * ctrs;
78 Arena * arena;
79
80 // for LDV profiling, when just displaying by LDV
81 long prim;
82 long not_used;
83 long used;
84 long void_total;
85 long drag_total;
86 } Census;
87
88 static Census *censuses = NULL;
89 static nat n_censuses = 0;
90
91 #ifdef PROFILING
92 static void aggregateCensusInfo( void );
93 #endif
94
95 static void dumpCensus( Census *census );
96
97 static rtsBool closureSatisfiesConstraints( StgClosure* p );
98
99 /* ----------------------------------------------------------------------------
100 * Find the "closure identity", which is a unique pointer representing
101 * the band to which this closure's heap space is attributed in the
102 * heap profile.
103 * ------------------------------------------------------------------------- */
104 static void *
105 closureIdentity( StgClosure *p )
106 {
107 switch (RtsFlags.ProfFlags.doHeapProfile) {
108
109 #ifdef PROFILING
110 case HEAP_BY_CCS:
111 return p->header.prof.ccs;
112 case HEAP_BY_MOD:
113 return p->header.prof.ccs->cc->module;
114 case HEAP_BY_DESCR:
115 return GET_PROF_DESC(get_itbl(p));
116 case HEAP_BY_TYPE:
117 return GET_PROF_TYPE(get_itbl(p));
118 case HEAP_BY_RETAINER:
119 // AFAIK, the only closures in the heap which might not have a
120 // valid retainer set are DEAD_WEAK closures.
121 if (isRetainerSetFieldValid(p))
122 return retainerSetOf(p);
123 else
124 return NULL;
125
126 #else
127 case HEAP_BY_CLOSURE_TYPE:
128 {
129 StgInfoTable *info;
130 info = get_itbl(p);
131 switch (info->type) {
132 case CONSTR:
133 case CONSTR_1_0:
134 case CONSTR_0_1:
135 case CONSTR_2_0:
136 case CONSTR_1_1:
137 case CONSTR_0_2:
138 case CONSTR_STATIC:
139 case CONSTR_NOCAF_STATIC:
140 return GET_CON_DESC(itbl_to_con_itbl(info));
141 default:
142 return closure_type_names[info->type];
143 }
144 }
145
146 #endif
147 default:
148 barf("closureIdentity");
149 }
150 }
151
152 /* --------------------------------------------------------------------------
153 * Profiling type predicates
154 * ----------------------------------------------------------------------- */
155 #ifdef PROFILING
156 STATIC_INLINE rtsBool
157 doingLDVProfiling( void )
158 {
159 return (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV
160 || RtsFlags.ProfFlags.bioSelector != NULL);
161 }
162
163 STATIC_INLINE rtsBool
164 doingRetainerProfiling( void )
165 {
166 return (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER
167 || RtsFlags.ProfFlags.retainerSelector != NULL);
168 }
169 #endif /* PROFILING */
170
171 // Precesses a closure 'c' being destroyed whose size is 'size'.
172 // Make sure that LDV_recordDead() is not invoked on 'inherently used' closures
173 // such as TSO; they should not be involved in computing dragNew or voidNew.
174 //
175 // Even though era is checked in both LdvCensusForDead() and
176 // LdvCensusKillAll(), we still need to make sure that era is > 0 because
177 // LDV_recordDead() may be called from elsewhere in the runtime system. E.g.,
178 // when a thunk is replaced by an indirection object.
179
180 #ifdef PROFILING
181 void
182 LDV_recordDead( StgClosure *c, nat size )
183 {
184 void *id;
185 nat t;
186 counter *ctr;
187
188 if (era > 0 && closureSatisfiesConstraints(c)) {
189 size -= sizeofW(StgProfHeader);
190 ASSERT(LDVW(c) != 0);
191 if ((LDVW((c)) & LDV_STATE_MASK) == LDV_STATE_CREATE) {
192 t = (LDVW((c)) & LDV_CREATE_MASK) >> LDV_SHIFT;
193 if (t < era) {
194 if (RtsFlags.ProfFlags.bioSelector == NULL) {
195 censuses[t].void_total += (long)size;
196 censuses[era].void_total -= (long)size;
197 ASSERT(censuses[t].void_total < censuses[t].not_used);
198 } else {
199 id = closureIdentity(c);
200 ctr = lookupHashTable(censuses[t].hash, (StgWord)id);
201 ASSERT( ctr != NULL );
202 ctr->c.ldv.void_total += (long)size;
203 ctr = lookupHashTable(censuses[era].hash, (StgWord)id);
204 if (ctr == NULL) {
205 ctr = arenaAlloc(censuses[era].arena, sizeof(counter));
206 initLDVCtr(ctr);
207 insertHashTable(censuses[era].hash, (StgWord)id, ctr);
208 ctr->identity = id;
209 ctr->next = censuses[era].ctrs;
210 censuses[era].ctrs = ctr;
211 }
212 ctr->c.ldv.void_total -= (long)size;
213 }
214 }
215 } else {
216 t = LDVW((c)) & LDV_LAST_MASK;
217 if (t + 1 < era) {
218 if (RtsFlags.ProfFlags.bioSelector == NULL) {
219 censuses[t+1].drag_total += size;
220 censuses[era].drag_total -= size;
221 } else {
222 void *id;
223 id = closureIdentity(c);
224 ctr = lookupHashTable(censuses[t+1].hash, (StgWord)id);
225 ASSERT( ctr != NULL );
226 ctr->c.ldv.drag_total += (long)size;
227 ctr = lookupHashTable(censuses[era].hash, (StgWord)id);
228 if (ctr == NULL) {
229 ctr = arenaAlloc(censuses[era].arena, sizeof(counter));
230 initLDVCtr(ctr);
231 insertHashTable(censuses[era].hash, (StgWord)id, ctr);
232 ctr->identity = id;
233 ctr->next = censuses[era].ctrs;
234 censuses[era].ctrs = ctr;
235 }
236 ctr->c.ldv.drag_total -= (long)size;
237 }
238 }
239 }
240 }
241 }
242 #endif
243
244 /* --------------------------------------------------------------------------
245 * Initialize censuses[era];
246 * ----------------------------------------------------------------------- */
247
248 STATIC_INLINE void
249 initEra(Census *census)
250 {
251 census->hash = allocHashTable();
252 census->ctrs = NULL;
253 census->arena = newArena();
254
255 census->not_used = 0;
256 census->used = 0;
257 census->prim = 0;
258 census->void_total = 0;
259 census->drag_total = 0;
260 }
261
262 STATIC_INLINE void
263 freeEra(Census *census)
264 {
265 arenaFree(census->arena);
266 freeHashTable(census->hash, NULL);
267 }
268
269 /* --------------------------------------------------------------------------
270 * Increases era by 1 and initialize census[era].
271 * Reallocates gi[] and increases its size if needed.
272 * ----------------------------------------------------------------------- */
273
274 static void
275 nextEra( void )
276 {
277 #ifdef PROFILING
278 if (doingLDVProfiling()) {
279 era++;
280
281 if (era == max_era) {
282 errorBelch("maximum number of censuses reached; use +RTS -i to reduce");
283 stg_exit(EXIT_FAILURE);
284 }
285
286 if (era == n_censuses) {
287 n_censuses *= 2;
288 censuses = stgReallocBytes(censuses, sizeof(Census) * n_censuses,
289 "nextEra");
290 }
291 }
292 #endif /* PROFILING */
293
294 initEra( &censuses[era] );
295 }
296
297 /* ----------------------------------------------------------------------------
298 * Heap profiling by info table
299 * ------------------------------------------------------------------------- */
300
301 #if !defined(PROFILING)
302 FILE *hp_file;
303 static char *hp_filename;
304
305 void initProfiling1 (void)
306 {
307 }
308
309 void freeProfiling (void)
310 {
311 }
312
313 void initProfiling2 (void)
314 {
315 char *prog;
316
317 prog = stgMallocBytes(strlen(prog_name) + 1, "initProfiling2");
318 strcpy(prog, prog_name);
319 #ifdef mingw32_HOST_OS
320 // on Windows, drop the .exe suffix if there is one
321 {
322 char *suff;
323 suff = strrchr(prog,'.');
324 if (suff != NULL && !strcmp(suff,".exe")) {
325 *suff = '\0';
326 }
327 }
328 #endif
329
330 if (RtsFlags.ProfFlags.doHeapProfile) {
331 /* Initialise the log file name */
332 hp_filename = stgMallocBytes(strlen(prog) + 6, "hpFileName");
333 sprintf(hp_filename, "%s.hp", prog);
334
335 /* open the log file */
336 if ((hp_file = fopen(hp_filename, "w")) == NULL) {
337 debugBelch("Can't open profiling report file %s\n",
338 hp_filename);
339 RtsFlags.ProfFlags.doHeapProfile = 0;
340 return;
341 }
342 }
343
344 stgFree(prog);
345
346 initHeapProfiling();
347 }
348
349 void endProfiling( void )
350 {
351 endHeapProfiling();
352 }
353 #endif /* !PROFILING */
354
355 static void
356 printSample(rtsBool beginSample, StgDouble sampleValue)
357 {
358 StgDouble fractionalPart, integralPart;
359 fractionalPart = modf(sampleValue, &integralPart);
360 fprintf(hp_file, "%s %" FMT_Word64 ".%02" FMT_Word64 "\n",
361 (beginSample ? "BEGIN_SAMPLE" : "END_SAMPLE"),
362 (StgWord64)integralPart, (StgWord64)(fractionalPart * 100));
363 if (!beginSample) {
364 fflush(hp_file);
365 }
366 }
367
368 /* --------------------------------------------------------------------------
369 * Initialize the heap profilier
370 * ----------------------------------------------------------------------- */
371 nat
372 initHeapProfiling(void)
373 {
374 if (! RtsFlags.ProfFlags.doHeapProfile) {
375 return 0;
376 }
377
378 #ifdef PROFILING
379 if (doingLDVProfiling() && doingRetainerProfiling()) {
380 errorBelch("cannot mix -hb and -hr");
381 stg_exit(EXIT_FAILURE);
382 }
383 #endif
384
385 // we only count eras if we're doing LDV profiling. Otherwise era
386 // is fixed at zero.
387 #ifdef PROFILING
388 if (doingLDVProfiling()) {
389 era = 1;
390 } else
391 #endif
392 {
393 era = 0;
394 }
395
396 // max_era = 2^LDV_SHIFT
397 max_era = 1 << LDV_SHIFT;
398
399 n_censuses = 32;
400 censuses = stgMallocBytes(sizeof(Census) * n_censuses, "initHeapProfiling");
401
402 initEra( &censuses[era] );
403
404 /* initProfilingLogFile(); */
405 fprintf(hp_file, "JOB \"%s", prog_name);
406
407 #ifdef PROFILING
408 {
409 int count;
410 for(count = 1; count < prog_argc; count++)
411 fprintf(hp_file, " %s", prog_argv[count]);
412 fprintf(hp_file, " +RTS");
413 for(count = 0; count < rts_argc; count++)
414 fprintf(hp_file, " %s", rts_argv[count]);
415 }
416 #endif /* PROFILING */
417
418 fprintf(hp_file, "\"\n" );
419
420 fprintf(hp_file, "DATE \"%s\"\n", time_str());
421
422 fprintf(hp_file, "SAMPLE_UNIT \"seconds\"\n");
423 fprintf(hp_file, "VALUE_UNIT \"bytes\"\n");
424
425 printSample(rtsTrue, 0);
426 printSample(rtsFalse, 0);
427
428 #ifdef PROFILING
429 if (doingRetainerProfiling()) {
430 initRetainerProfiling();
431 }
432 #endif
433
434 return 0;
435 }
436
437 void
438 endHeapProfiling(void)
439 {
440 StgDouble seconds;
441
442 if (! RtsFlags.ProfFlags.doHeapProfile) {
443 return;
444 }
445
446 #ifdef PROFILING
447 if (doingRetainerProfiling()) {
448 endRetainerProfiling();
449 }
450 #endif
451
452 #ifdef PROFILING
453 if (doingLDVProfiling()) {
454 nat t;
455 LdvCensusKillAll();
456 aggregateCensusInfo();
457 for (t = 1; t < era; t++) {
458 dumpCensus( &censuses[t] );
459 }
460 }
461 #endif
462
463 #ifdef PROFILING
464 if (doingLDVProfiling()) {
465 nat t;
466 if (RtsFlags.ProfFlags.bioSelector != NULL) {
467 for (t = 1; t <= era; t++) {
468 freeEra( &censuses[t] );
469 }
470 } else {
471 freeEra( &censuses[era] );
472 }
473 } else {
474 freeEra( &censuses[0] );
475 }
476 #else
477 freeEra( &censuses[0] );
478 #endif
479
480 stgFree(censuses);
481
482 seconds = mut_user_time();
483 printSample(rtsTrue, seconds);
484 printSample(rtsFalse, seconds);
485 fclose(hp_file);
486 }
487
488
489
490 #ifdef PROFILING
491 static size_t
492 buf_append(char *p, const char *q, char *end)
493 {
494 int m;
495
496 for (m = 0; p < end; p++, q++, m++) {
497 *p = *q;
498 if (*q == '\0') { break; }
499 }
500 return m;
501 }
502
503 static void
504 fprint_ccs(FILE *fp, CostCentreStack *ccs, nat max_length)
505 {
506 char buf[max_length+1], *p, *buf_end;
507
508 // MAIN on its own gets printed as "MAIN", otherwise we ignore MAIN.
509 if (ccs == CCS_MAIN) {
510 fprintf(fp, "MAIN");
511 return;
512 }
513
514 fprintf(fp, "(%ld)", ccs->ccsID);
515
516 p = buf;
517 buf_end = buf + max_length + 1;
518
519 // keep printing components of the stack until we run out of space
520 // in the buffer. If we run out of space, end with "...".
521 for (; ccs != NULL && ccs != CCS_MAIN; ccs = ccs->prevStack) {
522
523 // CAF cost centres print as M.CAF, but we leave the module
524 // name out of all the others to save space.
525 if (!strcmp(ccs->cc->label,"CAF")) {
526 p += buf_append(p, ccs->cc->module, buf_end);
527 p += buf_append(p, ".CAF", buf_end);
528 } else {
529 p += buf_append(p, ccs->cc->label, buf_end);
530 if (ccs->prevStack != NULL && ccs->prevStack != CCS_MAIN) {
531 p += buf_append(p, "/", buf_end);
532 }
533 }
534
535 if (p >= buf_end) {
536 sprintf(buf+max_length-4, "...");
537 break;
538 }
539 }
540 fprintf(fp, "%s", buf);
541 }
542
543 rtsBool
544 strMatchesSelector( char* str, char* sel )
545 {
546 char* p;
547 // debugBelch("str_matches_selector %s %s\n", str, sel);
548 while (1) {
549 // Compare str against wherever we've got to in sel.
550 p = str;
551 while (*p != '\0' && *sel != ',' && *sel != '\0' && *p == *sel) {
552 p++; sel++;
553 }
554 // Match if all of str used and have reached the end of a sel fragment.
555 if (*p == '\0' && (*sel == ',' || *sel == '\0'))
556 return rtsTrue;
557
558 // No match. Advance sel to the start of the next elem.
559 while (*sel != ',' && *sel != '\0') sel++;
560 if (*sel == ',') sel++;
561
562 /* Run out of sel ?? */
563 if (*sel == '\0') return rtsFalse;
564 }
565 }
566
567 #endif /* PROFILING */
568
569 /* -----------------------------------------------------------------------------
570 * Figure out whether a closure should be counted in this census, by
571 * testing against all the specified constraints.
572 * -------------------------------------------------------------------------- */
573 static rtsBool
574 closureSatisfiesConstraints( StgClosure* p )
575 {
576 #if !defined(PROFILING)
577 (void)p; /* keep gcc -Wall happy */
578 return rtsTrue;
579 #else
580 rtsBool b;
581
582 // The CCS has a selected field to indicate whether this closure is
583 // deselected by not being mentioned in the module, CC, or CCS
584 // selectors.
585 if (!p->header.prof.ccs->selected) {
586 return rtsFalse;
587 }
588
589 if (RtsFlags.ProfFlags.descrSelector) {
590 b = strMatchesSelector( (GET_PROF_DESC(get_itbl((StgClosure *)p))),
591 RtsFlags.ProfFlags.descrSelector );
592 if (!b) return rtsFalse;
593 }
594 if (RtsFlags.ProfFlags.typeSelector) {
595 b = strMatchesSelector( (GET_PROF_TYPE(get_itbl((StgClosure *)p))),
596 RtsFlags.ProfFlags.typeSelector );
597 if (!b) return rtsFalse;
598 }
599 if (RtsFlags.ProfFlags.retainerSelector) {
600 RetainerSet *rs;
601 nat i;
602 // We must check that the retainer set is valid here. One
603 // reason it might not be valid is if this closure is a
604 // a newly deceased weak pointer (i.e. a DEAD_WEAK), since
605 // these aren't reached by the retainer profiler's traversal.
606 if (isRetainerSetFieldValid((StgClosure *)p)) {
607 rs = retainerSetOf((StgClosure *)p);
608 if (rs != NULL) {
609 for (i = 0; i < rs->num; i++) {
610 b = strMatchesSelector( rs->element[i]->cc->label,
611 RtsFlags.ProfFlags.retainerSelector );
612 if (b) return rtsTrue;
613 }
614 }
615 }
616 return rtsFalse;
617 }
618 return rtsTrue;
619 #endif /* PROFILING */
620 }
621
622 /* -----------------------------------------------------------------------------
623 * Aggregate the heap census info for biographical profiling
624 * -------------------------------------------------------------------------- */
625 #ifdef PROFILING
626 static void
627 aggregateCensusInfo( void )
628 {
629 HashTable *acc;
630 nat t;
631 counter *c, *d, *ctrs;
632 Arena *arena;
633
634 if (!doingLDVProfiling()) return;
635
636 // Aggregate the LDV counters when displaying by biography.
637 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV) {
638 long void_total, drag_total;
639
640 // Now we compute void_total and drag_total for each census
641 // After the program has finished, the void_total field of
642 // each census contains the count of words that were *created*
643 // in this era and were eventually void. Conversely, if a
644 // void closure was destroyed in this era, it will be
645 // represented by a negative count of words in void_total.
646 //
647 // To get the count of live words that are void at each
648 // census, just propagate the void_total count forwards:
649
650 void_total = 0;
651 drag_total = 0;
652 for (t = 1; t < era; t++) { // note: start at 1, not 0
653 void_total += censuses[t].void_total;
654 drag_total += censuses[t].drag_total;
655 censuses[t].void_total = void_total;
656 censuses[t].drag_total = drag_total;
657
658 ASSERT( censuses[t].void_total <= censuses[t].not_used );
659 // should be true because: void_total is the count of
660 // live words that are void at this census, which *must*
661 // be less than the number of live words that have not
662 // been used yet.
663
664 ASSERT( censuses[t].drag_total <= censuses[t].used );
665 // similar reasoning as above.
666 }
667
668 return;
669 }
670
671 // otherwise... we're doing a heap profile that is restricted to
672 // some combination of lag, drag, void or use. We've kept all the
673 // census info for all censuses so far, but we still need to
674 // aggregate the counters forwards.
675
676 arena = newArena();
677 acc = allocHashTable();
678 ctrs = NULL;
679
680 for (t = 1; t < era; t++) {
681
682 // first look through all the counters we're aggregating
683 for (c = ctrs; c != NULL; c = c->next) {
684 // if one of the totals is non-zero, then this closure
685 // type must be present in the heap at this census time...
686 d = lookupHashTable(censuses[t].hash, (StgWord)c->identity);
687
688 if (d == NULL) {
689 // if this closure identity isn't present in the
690 // census for this time period, then our running
691 // totals *must* be zero.
692 ASSERT(c->c.ldv.void_total == 0 && c->c.ldv.drag_total == 0);
693
694 // debugCCS(c->identity);
695 // debugBelch(" census=%d void_total=%d drag_total=%d\n",
696 // t, c->c.ldv.void_total, c->c.ldv.drag_total);
697 } else {
698 d->c.ldv.void_total += c->c.ldv.void_total;
699 d->c.ldv.drag_total += c->c.ldv.drag_total;
700 c->c.ldv.void_total = d->c.ldv.void_total;
701 c->c.ldv.drag_total = d->c.ldv.drag_total;
702
703 ASSERT( c->c.ldv.void_total >= 0 );
704 ASSERT( c->c.ldv.drag_total >= 0 );
705 }
706 }
707
708 // now look through the counters in this census to find new ones
709 for (c = censuses[t].ctrs; c != NULL; c = c->next) {
710 d = lookupHashTable(acc, (StgWord)c->identity);
711 if (d == NULL) {
712 d = arenaAlloc( arena, sizeof(counter) );
713 initLDVCtr(d);
714 insertHashTable( acc, (StgWord)c->identity, d );
715 d->identity = c->identity;
716 d->next = ctrs;
717 ctrs = d;
718 d->c.ldv.void_total = c->c.ldv.void_total;
719 d->c.ldv.drag_total = c->c.ldv.drag_total;
720 }
721 ASSERT( c->c.ldv.void_total >= 0 );
722 ASSERT( c->c.ldv.drag_total >= 0 );
723 }
724 }
725
726 freeHashTable(acc, NULL);
727 arenaFree(arena);
728 }
729 #endif
730
731 /* -----------------------------------------------------------------------------
732 * Print out the results of a heap census.
733 * -------------------------------------------------------------------------- */
734 static void
735 dumpCensus( Census *census )
736 {
737 counter *ctr;
738 long count;
739
740 printSample(rtsTrue, census->time);
741
742 #ifdef PROFILING
743 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV) {
744 fprintf(hp_file, "VOID\t%lu\n", (unsigned long)(census->void_total) * sizeof(W_));
745 fprintf(hp_file, "LAG\t%lu\n",
746 (unsigned long)(census->not_used - census->void_total) * sizeof(W_));
747 fprintf(hp_file, "USE\t%lu\n",
748 (unsigned long)(census->used - census->drag_total) * sizeof(W_));
749 fprintf(hp_file, "INHERENT_USE\t%lu\n",
750 (unsigned long)(census->prim) * sizeof(W_));
751 fprintf(hp_file, "DRAG\t%lu\n",
752 (unsigned long)(census->drag_total) * sizeof(W_));
753 printSample(rtsFalse, census->time);
754 return;
755 }
756 #endif
757
758 for (ctr = census->ctrs; ctr != NULL; ctr = ctr->next) {
759
760 #ifdef PROFILING
761 if (RtsFlags.ProfFlags.bioSelector != NULL) {
762 count = 0;
763 if (strMatchesSelector("lag", RtsFlags.ProfFlags.bioSelector))
764 count += ctr->c.ldv.not_used - ctr->c.ldv.void_total;
765 if (strMatchesSelector("drag", RtsFlags.ProfFlags.bioSelector))
766 count += ctr->c.ldv.drag_total;
767 if (strMatchesSelector("void", RtsFlags.ProfFlags.bioSelector))
768 count += ctr->c.ldv.void_total;
769 if (strMatchesSelector("use", RtsFlags.ProfFlags.bioSelector))
770 count += ctr->c.ldv.used - ctr->c.ldv.drag_total;
771 } else
772 #endif
773 {
774 count = ctr->c.resid;
775 }
776
777 ASSERT( count >= 0 );
778
779 if (count == 0) continue;
780
781 #if !defined(PROFILING)
782 switch (RtsFlags.ProfFlags.doHeapProfile) {
783 case HEAP_BY_CLOSURE_TYPE:
784 fprintf(hp_file, "%s", (char *)ctr->identity);
785 break;
786 }
787 #endif
788
789 #ifdef PROFILING
790 switch (RtsFlags.ProfFlags.doHeapProfile) {
791 case HEAP_BY_CCS:
792 fprint_ccs(hp_file, (CostCentreStack *)ctr->identity, RtsFlags.ProfFlags.ccsLength);
793 break;
794 case HEAP_BY_MOD:
795 case HEAP_BY_DESCR:
796 case HEAP_BY_TYPE:
797 fprintf(hp_file, "%s", (char *)ctr->identity);
798 break;
799 case HEAP_BY_RETAINER:
800 {
801 RetainerSet *rs = (RetainerSet *)ctr->identity;
802
803 // it might be the distinguished retainer set rs_MANY:
804 if (rs == &rs_MANY) {
805 fprintf(hp_file, "MANY");
806 break;
807 }
808
809 // Mark this retainer set by negating its id, because it
810 // has appeared in at least one census. We print the
811 // values of all such retainer sets into the log file at
812 // the end. A retainer set may exist but not feature in
813 // any censuses if it arose as the intermediate retainer
814 // set for some closure during retainer set calculation.
815 if (rs->id > 0)
816 rs->id = -(rs->id);
817
818 // report in the unit of bytes: * sizeof(StgWord)
819 printRetainerSetShort(hp_file, rs, RtsFlags.ProfFlags.ccsLength);
820 break;
821 }
822 default:
823 barf("dumpCensus; doHeapProfile");
824 }
825 #endif
826
827 fprintf(hp_file, "\t%" FMT_SizeT "\n", (W_)count * sizeof(W_));
828 }
829
830 printSample(rtsFalse, census->time);
831 }
832
833
834 static void heapProfObject(Census *census, StgClosure *p, nat size,
835 rtsBool prim
836 #ifndef PROFILING
837 STG_UNUSED
838 #endif
839 )
840 {
841 void *identity;
842 nat real_size;
843 counter *ctr;
844
845 identity = NULL;
846
847 #ifdef PROFILING
848 // subtract the profiling overhead
849 real_size = size - sizeofW(StgProfHeader);
850 #else
851 real_size = size;
852 #endif
853
854 if (closureSatisfiesConstraints((StgClosure*)p)) {
855 #ifdef PROFILING
856 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV) {
857 if (prim)
858 census->prim += real_size;
859 else if ((LDVW(p) & LDV_STATE_MASK) == LDV_STATE_CREATE)
860 census->not_used += real_size;
861 else
862 census->used += real_size;
863 } else
864 #endif
865 {
866 identity = closureIdentity((StgClosure *)p);
867
868 if (identity != NULL) {
869 ctr = lookupHashTable( census->hash, (StgWord)identity );
870 if (ctr != NULL) {
871 #ifdef PROFILING
872 if (RtsFlags.ProfFlags.bioSelector != NULL) {
873 if (prim)
874 ctr->c.ldv.prim += real_size;
875 else if ((LDVW(p) & LDV_STATE_MASK) == LDV_STATE_CREATE)
876 ctr->c.ldv.not_used += real_size;
877 else
878 ctr->c.ldv.used += real_size;
879 } else
880 #endif
881 {
882 ctr->c.resid += real_size;
883 }
884 } else {
885 ctr = arenaAlloc( census->arena, sizeof(counter) );
886 initLDVCtr(ctr);
887 insertHashTable( census->hash, (StgWord)identity, ctr );
888 ctr->identity = identity;
889 ctr->next = census->ctrs;
890 census->ctrs = ctr;
891
892 #ifdef PROFILING
893 if (RtsFlags.ProfFlags.bioSelector != NULL) {
894 if (prim)
895 ctr->c.ldv.prim = real_size;
896 else if ((LDVW(p) & LDV_STATE_MASK) == LDV_STATE_CREATE)
897 ctr->c.ldv.not_used = real_size;
898 else
899 ctr->c.ldv.used = real_size;
900 } else
901 #endif
902 {
903 ctr->c.resid = real_size;
904 }
905 }
906 }
907 }
908 }
909 }
910
911 /* -----------------------------------------------------------------------------
912 * Code to perform a heap census.
913 * -------------------------------------------------------------------------- */
914 static void
915 heapCensusChain( Census *census, bdescr *bd )
916 {
917 StgPtr p;
918 StgInfoTable *info;
919 nat size;
920 rtsBool prim;
921
922 for (; bd != NULL; bd = bd->link) {
923
924 // HACK: pretend a pinned block is just one big ARR_WORDS
925 // owned by CCS_PINNED. These blocks can be full of holes due
926 // to alignment constraints so we can't traverse the memory
927 // and do a proper census.
928 if (bd->flags & BF_PINNED) {
929 StgClosure arr;
930 SET_HDR(&arr, &stg_ARR_WORDS_info, CCS_PINNED);
931 heapProfObject(census, &arr, bd->blocks * BLOCK_SIZE_W, rtsTrue);
932 continue;
933 }
934
935 p = bd->start;
936 while (p < bd->free) {
937 info = get_itbl((StgClosure *)p);
938 prim = rtsFalse;
939
940 switch (info->type) {
941
942 case THUNK:
943 size = thunk_sizeW_fromITBL(info);
944 break;
945
946 case THUNK_1_1:
947 case THUNK_0_2:
948 case THUNK_2_0:
949 size = sizeofW(StgThunkHeader) + 2;
950 break;
951
952 case THUNK_1_0:
953 case THUNK_0_1:
954 case THUNK_SELECTOR:
955 size = sizeofW(StgThunkHeader) + 1;
956 break;
957
958 case CONSTR:
959 case FUN:
960 case IND_PERM:
961 case BLACKHOLE:
962 case BLOCKING_QUEUE:
963 case FUN_1_0:
964 case FUN_0_1:
965 case FUN_1_1:
966 case FUN_0_2:
967 case FUN_2_0:
968 case CONSTR_1_0:
969 case CONSTR_0_1:
970 case CONSTR_1_1:
971 case CONSTR_0_2:
972 case CONSTR_2_0:
973 size = sizeW_fromITBL(info);
974 break;
975
976 case IND:
977 // Special case/Delicate Hack: INDs don't normally
978 // appear, since we're doing this heap census right
979 // after GC. However, GarbageCollect() also does
980 // resurrectThreads(), which can update some
981 // blackholes when it calls raiseAsync() on the
982 // resurrected threads. So we know that any IND will
983 // be the size of a BLACKHOLE.
984 size = BLACKHOLE_sizeW();
985 break;
986
987 case BCO:
988 prim = rtsTrue;
989 size = bco_sizeW((StgBCO *)p);
990 break;
991
992 case MVAR_CLEAN:
993 case MVAR_DIRTY:
994 case TVAR:
995 case WEAK:
996 case PRIM:
997 case MUT_PRIM:
998 case MUT_VAR_CLEAN:
999 case MUT_VAR_DIRTY:
1000 prim = rtsTrue;
1001 size = sizeW_fromITBL(info);
1002 break;
1003
1004 case AP:
1005 size = ap_sizeW((StgAP *)p);
1006 break;
1007
1008 case PAP:
1009 size = pap_sizeW((StgPAP *)p);
1010 break;
1011
1012 case AP_STACK:
1013 size = ap_stack_sizeW((StgAP_STACK *)p);
1014 break;
1015
1016 case ARR_WORDS:
1017 prim = rtsTrue;
1018 size = arr_words_sizeW((StgArrWords*)p);
1019 break;
1020
1021 case MUT_ARR_PTRS_CLEAN:
1022 case MUT_ARR_PTRS_DIRTY:
1023 case MUT_ARR_PTRS_FROZEN:
1024 case MUT_ARR_PTRS_FROZEN0:
1025 prim = rtsTrue;
1026 size = mut_arr_ptrs_sizeW((StgMutArrPtrs *)p);
1027 break;
1028
1029 case SMALL_MUT_ARR_PTRS_CLEAN:
1030 case SMALL_MUT_ARR_PTRS_DIRTY:
1031 case SMALL_MUT_ARR_PTRS_FROZEN:
1032 case SMALL_MUT_ARR_PTRS_FROZEN0:
1033 prim = rtsTrue;
1034 size = small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs *)p);
1035 break;
1036
1037 case TSO:
1038 prim = rtsTrue;
1039 #ifdef PROFILING
1040 if (RtsFlags.ProfFlags.includeTSOs) {
1041 size = sizeofW(StgTSO);
1042 break;
1043 } else {
1044 // Skip this TSO and move on to the next object
1045 p += sizeofW(StgTSO);
1046 continue;
1047 }
1048 #else
1049 size = sizeofW(StgTSO);
1050 break;
1051 #endif
1052
1053 case STACK:
1054 prim = rtsTrue;
1055 #ifdef PROFILING
1056 if (RtsFlags.ProfFlags.includeTSOs) {
1057 size = stack_sizeW((StgStack*)p);
1058 break;
1059 } else {
1060 // Skip this TSO and move on to the next object
1061 p += stack_sizeW((StgStack*)p);
1062 continue;
1063 }
1064 #else
1065 size = stack_sizeW((StgStack*)p);
1066 break;
1067 #endif
1068
1069 case TREC_CHUNK:
1070 prim = rtsTrue;
1071 size = sizeofW(StgTRecChunk);
1072 break;
1073
1074 default:
1075 barf("heapCensus, unknown object: %d", info->type);
1076 }
1077
1078 heapProfObject(census,(StgClosure*)p,size,prim);
1079
1080 p += size;
1081 }
1082 }
1083 }
1084
1085 void heapCensus (Time t)
1086 {
1087 nat g, n;
1088 Census *census;
1089 gen_workspace *ws;
1090
1091 census = &censuses[era];
1092 census->time = mut_user_time_until(t);
1093
1094 // calculate retainer sets if necessary
1095 #ifdef PROFILING
1096 if (doingRetainerProfiling()) {
1097 retainerProfile();
1098 }
1099 #endif
1100
1101 #ifdef PROFILING
1102 stat_startHeapCensus();
1103 #endif
1104
1105 // Traverse the heap, collecting the census info
1106 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
1107 heapCensusChain( census, generations[g].blocks );
1108 // Are we interested in large objects? might be
1109 // confusing to include the stack in a heap profile.
1110 heapCensusChain( census, generations[g].large_objects );
1111
1112 for (n = 0; n < n_capabilities; n++) {
1113 ws = &gc_threads[n]->gens[g];
1114 heapCensusChain(census, ws->todo_bd);
1115 heapCensusChain(census, ws->part_list);
1116 heapCensusChain(census, ws->scavd_list);
1117 }
1118 }
1119
1120 // dump out the census info
1121 #ifdef PROFILING
1122 // We can't generate any info for LDV profiling until
1123 // the end of the run...
1124 if (!doingLDVProfiling())
1125 dumpCensus( census );
1126 #else
1127 dumpCensus( census );
1128 #endif
1129
1130
1131 // free our storage, unless we're keeping all the census info for
1132 // future restriction by biography.
1133 #ifdef PROFILING
1134 if (RtsFlags.ProfFlags.bioSelector == NULL)
1135 {
1136 freeEra(census);
1137 census->hash = NULL;
1138 census->arena = NULL;
1139 }
1140 #endif
1141
1142 // we're into the next time period now
1143 nextEra();
1144
1145 #ifdef PROFILING
1146 stat_endHeapCensus();
1147 #endif
1148 }
1149