(partial) Merge branch 'master' into type-nats
[ghc.git] / rts / ProfHeap.c
1 /* ----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 1998-2003
4 *
5 * Support for heap profiling
6 *
7 * --------------------------------------------------------------------------*/
8
9 #include "PosixSource.h"
10 #include "Rts.h"
11
12 #include "RtsUtils.h"
13 #include "Profiling.h"
14 #include "ProfHeap.h"
15 #include "Stats.h"
16 #include "Hash.h"
17 #include "RetainerProfile.h"
18 #include "LdvProfile.h"
19 #include "Arena.h"
20 #include "Printer.h"
21 #include "sm/GCThread.h"
22
23 #include <string.h>
24
25 /* -----------------------------------------------------------------------------
26 * era stores the current time period. It is the same as the
27 * number of censuses that have been performed.
28 *
29 * RESTRICTION:
30 * era must be no longer than LDV_SHIFT (15 or 30) bits.
31 * Invariants:
32 * era is initialized to 1 in initHeapProfiling().
33 *
34 * max_era is initialized to 2^LDV_SHIFT in initHeapProfiling().
35 * When era reaches max_era, the profiling stops because a closure can
36 * store only up to (max_era - 1) as its creation or last use time.
37 * -------------------------------------------------------------------------- */
38 unsigned int era;
39 static nat max_era;
40
41 /* -----------------------------------------------------------------------------
42 * Counters
43 *
44 * For most heap profiles each closure identity gets a simple count
45 * of live words in the heap at each census. However, if we're
46 * selecting by biography, then we have to keep the various
47 * lag/drag/void counters for each identity.
48 * -------------------------------------------------------------------------- */
49 typedef struct _counter {
50 void *identity;
51 union {
52 nat resid;
53 struct {
54 int prim; // total size of 'inherently used' closures
55 int not_used; // total size of 'never used' closures
56 int used; // total size of 'used at least once' closures
57 int void_total; // current total size of 'destroyed without being used' closures
58 int drag_total; // current total size of 'used at least once and waiting to die'
59 } ldv;
60 } c;
61 struct _counter *next;
62 } counter;
63
64 STATIC_INLINE void
65 initLDVCtr( counter *ctr )
66 {
67 ctr->c.ldv.prim = 0;
68 ctr->c.ldv.not_used = 0;
69 ctr->c.ldv.used = 0;
70 ctr->c.ldv.void_total = 0;
71 ctr->c.ldv.drag_total = 0;
72 }
73
74 typedef struct {
75 double time; // the time in MUT time when the census is made
76 HashTable * hash;
77 counter * ctrs;
78 Arena * arena;
79
80 // for LDV profiling, when just displaying by LDV
81 int prim;
82 int not_used;
83 int used;
84 int void_total;
85 int drag_total;
86 } Census;
87
88 static Census *censuses = NULL;
89 static nat n_censuses = 0;
90
91 #ifdef PROFILING
92 static void aggregateCensusInfo( void );
93 #endif
94
95 static void dumpCensus( Census *census );
96
97 static rtsBool closureSatisfiesConstraints( StgClosure* p );
98
99 /* ----------------------------------------------------------------------------
100 * Find the "closure identity", which is a unique pointer reresenting
101 * the band to which this closure's heap space is attributed in the
102 * heap profile.
103 * ------------------------------------------------------------------------- */
104 static void *
105 closureIdentity( StgClosure *p )
106 {
107 switch (RtsFlags.ProfFlags.doHeapProfile) {
108
109 #ifdef PROFILING
110 case HEAP_BY_CCS:
111 return p->header.prof.ccs;
112 case HEAP_BY_MOD:
113 return p->header.prof.ccs->cc->module;
114 case HEAP_BY_DESCR:
115 return GET_PROF_DESC(get_itbl(p));
116 case HEAP_BY_TYPE:
117 return GET_PROF_TYPE(get_itbl(p));
118 case HEAP_BY_RETAINER:
119 // AFAIK, the only closures in the heap which might not have a
120 // valid retainer set are DEAD_WEAK closures.
121 if (isRetainerSetFieldValid(p))
122 return retainerSetOf(p);
123 else
124 return NULL;
125
126 #else
127 case HEAP_BY_CLOSURE_TYPE:
128 {
129 StgInfoTable *info;
130 info = get_itbl(p);
131 switch (info->type) {
132 case CONSTR:
133 case CONSTR_1_0:
134 case CONSTR_0_1:
135 case CONSTR_2_0:
136 case CONSTR_1_1:
137 case CONSTR_0_2:
138 case CONSTR_STATIC:
139 case CONSTR_NOCAF_STATIC:
140 return GET_CON_DESC(itbl_to_con_itbl(info));
141 default:
142 return closure_type_names[info->type];
143 }
144 }
145
146 #endif
147 default:
148 barf("closureIdentity");
149 }
150 }
151
152 /* --------------------------------------------------------------------------
153 * Profiling type predicates
154 * ----------------------------------------------------------------------- */
155 #ifdef PROFILING
156 STATIC_INLINE rtsBool
157 doingLDVProfiling( void )
158 {
159 return (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV
160 || RtsFlags.ProfFlags.bioSelector != NULL);
161 }
162
163 STATIC_INLINE rtsBool
164 doingRetainerProfiling( void )
165 {
166 return (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER
167 || RtsFlags.ProfFlags.retainerSelector != NULL);
168 }
169 #endif /* PROFILING */
170
171 // Precesses a closure 'c' being destroyed whose size is 'size'.
172 // Make sure that LDV_recordDead() is not invoked on 'inherently used' closures
173 // such as TSO; they should not be involved in computing dragNew or voidNew.
174 //
175 // Even though era is checked in both LdvCensusForDead() and
176 // LdvCensusKillAll(), we still need to make sure that era is > 0 because
177 // LDV_recordDead() may be called from elsewhere in the runtime system. E.g.,
178 // when a thunk is replaced by an indirection object.
179
180 #ifdef PROFILING
181 void
182 LDV_recordDead( StgClosure *c, nat size )
183 {
184 void *id;
185 nat t;
186 counter *ctr;
187
188 if (era > 0 && closureSatisfiesConstraints(c)) {
189 size -= sizeofW(StgProfHeader);
190 ASSERT(LDVW(c) != 0);
191 if ((LDVW((c)) & LDV_STATE_MASK) == LDV_STATE_CREATE) {
192 t = (LDVW((c)) & LDV_CREATE_MASK) >> LDV_SHIFT;
193 if (t < era) {
194 if (RtsFlags.ProfFlags.bioSelector == NULL) {
195 censuses[t].void_total += (int)size;
196 censuses[era].void_total -= (int)size;
197 ASSERT(censuses[t].void_total < censuses[t].not_used);
198 } else {
199 id = closureIdentity(c);
200 ctr = lookupHashTable(censuses[t].hash, (StgWord)id);
201 ASSERT( ctr != NULL );
202 ctr->c.ldv.void_total += (int)size;
203 ctr = lookupHashTable(censuses[era].hash, (StgWord)id);
204 if (ctr == NULL) {
205 ctr = arenaAlloc(censuses[era].arena, sizeof(counter));
206 initLDVCtr(ctr);
207 insertHashTable(censuses[era].hash, (StgWord)id, ctr);
208 ctr->identity = id;
209 ctr->next = censuses[era].ctrs;
210 censuses[era].ctrs = ctr;
211 }
212 ctr->c.ldv.void_total -= (int)size;
213 }
214 }
215 } else {
216 t = LDVW((c)) & LDV_LAST_MASK;
217 if (t + 1 < era) {
218 if (RtsFlags.ProfFlags.bioSelector == NULL) {
219 censuses[t+1].drag_total += size;
220 censuses[era].drag_total -= size;
221 } else {
222 void *id;
223 id = closureIdentity(c);
224 ctr = lookupHashTable(censuses[t+1].hash, (StgWord)id);
225 ASSERT( ctr != NULL );
226 ctr->c.ldv.drag_total += (int)size;
227 ctr = lookupHashTable(censuses[era].hash, (StgWord)id);
228 if (ctr == NULL) {
229 ctr = arenaAlloc(censuses[era].arena, sizeof(counter));
230 initLDVCtr(ctr);
231 insertHashTable(censuses[era].hash, (StgWord)id, ctr);
232 ctr->identity = id;
233 ctr->next = censuses[era].ctrs;
234 censuses[era].ctrs = ctr;
235 }
236 ctr->c.ldv.drag_total -= (int)size;
237 }
238 }
239 }
240 }
241 }
242 #endif
243
244 /* --------------------------------------------------------------------------
245 * Initialize censuses[era];
246 * ----------------------------------------------------------------------- */
247
248 STATIC_INLINE void
249 initEra(Census *census)
250 {
251 census->hash = allocHashTable();
252 census->ctrs = NULL;
253 census->arena = newArena();
254
255 census->not_used = 0;
256 census->used = 0;
257 census->prim = 0;
258 census->void_total = 0;
259 census->drag_total = 0;
260 }
261
262 STATIC_INLINE void
263 freeEra(Census *census)
264 {
265 if (RtsFlags.ProfFlags.bioSelector != NULL)
266 // when bioSelector==NULL, these are freed in heapCensus()
267 {
268 arenaFree(census->arena);
269 freeHashTable(census->hash, NULL);
270 }
271 }
272
273 /* --------------------------------------------------------------------------
274 * Increases era by 1 and initialize census[era].
275 * Reallocates gi[] and increases its size if needed.
276 * ----------------------------------------------------------------------- */
277
278 static void
279 nextEra( void )
280 {
281 #ifdef PROFILING
282 if (doingLDVProfiling()) {
283 era++;
284
285 if (era == max_era) {
286 errorBelch("maximum number of censuses reached; use +RTS -i to reduce");
287 stg_exit(EXIT_FAILURE);
288 }
289
290 if (era == n_censuses) {
291 n_censuses *= 2;
292 censuses = stgReallocBytes(censuses, sizeof(Census) * n_censuses,
293 "nextEra");
294 }
295 }
296 #endif /* PROFILING */
297
298 initEra( &censuses[era] );
299 }
300
301 /* ----------------------------------------------------------------------------
302 * Heap profiling by info table
303 * ------------------------------------------------------------------------- */
304
305 #if !defined(PROFILING)
306 FILE *hp_file;
307 static char *hp_filename;
308
309 void initProfiling1 (void)
310 {
311 }
312
313 void freeProfiling (void)
314 {
315 }
316
317 void initProfiling2 (void)
318 {
319 char *prog;
320
321 prog = stgMallocBytes(strlen(prog_name) + 1, "initProfiling2");
322 strcpy(prog, prog_name);
323 #ifdef mingw32_HOST_OS
324 // on Windows, drop the .exe suffix if there is one
325 {
326 char *suff;
327 suff = strrchr(prog,'.');
328 if (suff != NULL && !strcmp(suff,".exe")) {
329 *suff = '\0';
330 }
331 }
332 #endif
333
334 if (RtsFlags.ProfFlags.doHeapProfile) {
335 /* Initialise the log file name */
336 hp_filename = stgMallocBytes(strlen(prog) + 6, "hpFileName");
337 sprintf(hp_filename, "%s.hp", prog);
338
339 /* open the log file */
340 if ((hp_file = fopen(hp_filename, "w")) == NULL) {
341 debugBelch("Can't open profiling report file %s\n",
342 hp_filename);
343 RtsFlags.ProfFlags.doHeapProfile = 0;
344 return;
345 }
346 }
347
348 stgFree(prog);
349
350 initHeapProfiling();
351 }
352
353 void endProfiling( void )
354 {
355 endHeapProfiling();
356 }
357 #endif /* !PROFILING */
358
359 static void
360 printSample(rtsBool beginSample, StgDouble sampleValue)
361 {
362 StgDouble fractionalPart, integralPart;
363 fractionalPart = modf(sampleValue, &integralPart);
364 fprintf(hp_file, "%s %" FMT_Word64 ".%02" FMT_Word64 "\n",
365 (beginSample ? "BEGIN_SAMPLE" : "END_SAMPLE"),
366 (StgWord64)integralPart, (StgWord64)(fractionalPart * 100));
367 }
368
369 /* --------------------------------------------------------------------------
370 * Initialize the heap profilier
371 * ----------------------------------------------------------------------- */
372 nat
373 initHeapProfiling(void)
374 {
375 if (! RtsFlags.ProfFlags.doHeapProfile) {
376 return 0;
377 }
378
379 #ifdef PROFILING
380 if (doingLDVProfiling() && doingRetainerProfiling()) {
381 errorBelch("cannot mix -hb and -hr");
382 stg_exit(EXIT_FAILURE);
383 }
384 #endif
385
386 // we only count eras if we're doing LDV profiling. Otherwise era
387 // is fixed at zero.
388 #ifdef PROFILING
389 if (doingLDVProfiling()) {
390 era = 1;
391 } else
392 #endif
393 {
394 era = 0;
395 }
396
397 // max_era = 2^LDV_SHIFT
398 max_era = 1 << LDV_SHIFT;
399
400 n_censuses = 32;
401 censuses = stgMallocBytes(sizeof(Census) * n_censuses, "initHeapProfiling");
402
403 initEra( &censuses[era] );
404
405 /* initProfilingLogFile(); */
406 fprintf(hp_file, "JOB \"%s", prog_name);
407
408 #ifdef PROFILING
409 {
410 int count;
411 for(count = 1; count < prog_argc; count++)
412 fprintf(hp_file, " %s", prog_argv[count]);
413 fprintf(hp_file, " +RTS");
414 for(count = 0; count < rts_argc; count++)
415 fprintf(hp_file, " %s", rts_argv[count]);
416 }
417 #endif /* PROFILING */
418
419 fprintf(hp_file, "\"\n" );
420
421 fprintf(hp_file, "DATE \"%s\"\n", time_str());
422
423 fprintf(hp_file, "SAMPLE_UNIT \"seconds\"\n");
424 fprintf(hp_file, "VALUE_UNIT \"bytes\"\n");
425
426 printSample(rtsTrue, 0);
427 printSample(rtsFalse, 0);
428
429 #ifdef PROFILING
430 if (doingRetainerProfiling()) {
431 initRetainerProfiling();
432 }
433 #endif
434
435 return 0;
436 }
437
438 void
439 endHeapProfiling(void)
440 {
441 StgDouble seconds;
442
443 if (! RtsFlags.ProfFlags.doHeapProfile) {
444 return;
445 }
446
447 #ifdef PROFILING
448 if (doingRetainerProfiling()) {
449 endRetainerProfiling();
450 }
451 #endif
452
453 #ifdef PROFILING
454 if (doingLDVProfiling()) {
455 nat t;
456 LdvCensusKillAll();
457 aggregateCensusInfo();
458 for (t = 1; t < era; t++) {
459 dumpCensus( &censuses[t] );
460 }
461 }
462 #endif
463
464 #ifdef PROFILING
465 if (doingLDVProfiling()) {
466 nat t;
467 for (t = 1; t <= era; t++) {
468 freeEra( &censuses[t] );
469 }
470 } else {
471 freeEra( &censuses[0] );
472 }
473 #else
474 freeEra( &censuses[0] );
475 #endif
476
477 stgFree(censuses);
478
479 seconds = mut_user_time();
480 printSample(rtsTrue, seconds);
481 printSample(rtsFalse, seconds);
482 fclose(hp_file);
483 }
484
485
486
487 #ifdef PROFILING
488 static size_t
489 buf_append(char *p, const char *q, char *end)
490 {
491 int m;
492
493 for (m = 0; p < end; p++, q++, m++) {
494 *p = *q;
495 if (*q == '\0') { break; }
496 }
497 return m;
498 }
499
500 static void
501 fprint_ccs(FILE *fp, CostCentreStack *ccs, nat max_length)
502 {
503 char buf[max_length+1], *p, *buf_end;
504
505 // MAIN on its own gets printed as "MAIN", otherwise we ignore MAIN.
506 if (ccs == CCS_MAIN) {
507 fprintf(fp, "MAIN");
508 return;
509 }
510
511 fprintf(fp, "(%ld)", ccs->ccsID);
512
513 p = buf;
514 buf_end = buf + max_length + 1;
515
516 // keep printing components of the stack until we run out of space
517 // in the buffer. If we run out of space, end with "...".
518 for (; ccs != NULL && ccs != CCS_MAIN; ccs = ccs->prevStack) {
519
520 // CAF cost centres print as M.CAF, but we leave the module
521 // name out of all the others to save space.
522 if (!strcmp(ccs->cc->label,"CAF")) {
523 p += buf_append(p, ccs->cc->module, buf_end);
524 p += buf_append(p, ".CAF", buf_end);
525 } else {
526 p += buf_append(p, ccs->cc->label, buf_end);
527 if (ccs->prevStack != NULL && ccs->prevStack != CCS_MAIN) {
528 p += buf_append(p, "/", buf_end);
529 }
530 }
531
532 if (p >= buf_end) {
533 sprintf(buf+max_length-4, "...");
534 break;
535 }
536 }
537 fprintf(fp, "%s", buf);
538 }
539
540 rtsBool
541 strMatchesSelector( char* str, char* sel )
542 {
543 char* p;
544 // debugBelch("str_matches_selector %s %s\n", str, sel);
545 while (1) {
546 // Compare str against wherever we've got to in sel.
547 p = str;
548 while (*p != '\0' && *sel != ',' && *sel != '\0' && *p == *sel) {
549 p++; sel++;
550 }
551 // Match if all of str used and have reached the end of a sel fragment.
552 if (*p == '\0' && (*sel == ',' || *sel == '\0'))
553 return rtsTrue;
554
555 // No match. Advance sel to the start of the next elem.
556 while (*sel != ',' && *sel != '\0') sel++;
557 if (*sel == ',') sel++;
558
559 /* Run out of sel ?? */
560 if (*sel == '\0') return rtsFalse;
561 }
562 }
563
564 #endif /* PROFILING */
565
566 /* -----------------------------------------------------------------------------
567 * Figure out whether a closure should be counted in this census, by
568 * testing against all the specified constraints.
569 * -------------------------------------------------------------------------- */
570 static rtsBool
571 closureSatisfiesConstraints( StgClosure* p )
572 {
573 #if !defined(PROFILING)
574 (void)p; /* keep gcc -Wall happy */
575 return rtsTrue;
576 #else
577 rtsBool b;
578
579 // The CCS has a selected field to indicate whether this closure is
580 // deselected by not being mentioned in the module, CC, or CCS
581 // selectors.
582 if (!p->header.prof.ccs->selected) {
583 return rtsFalse;
584 }
585
586 if (RtsFlags.ProfFlags.descrSelector) {
587 b = strMatchesSelector( (GET_PROF_DESC(get_itbl((StgClosure *)p))),
588 RtsFlags.ProfFlags.descrSelector );
589 if (!b) return rtsFalse;
590 }
591 if (RtsFlags.ProfFlags.typeSelector) {
592 b = strMatchesSelector( (GET_PROF_TYPE(get_itbl((StgClosure *)p))),
593 RtsFlags.ProfFlags.typeSelector );
594 if (!b) return rtsFalse;
595 }
596 if (RtsFlags.ProfFlags.retainerSelector) {
597 RetainerSet *rs;
598 nat i;
599 // We must check that the retainer set is valid here. One
600 // reason it might not be valid is if this closure is a
601 // a newly deceased weak pointer (i.e. a DEAD_WEAK), since
602 // these aren't reached by the retainer profiler's traversal.
603 if (isRetainerSetFieldValid((StgClosure *)p)) {
604 rs = retainerSetOf((StgClosure *)p);
605 if (rs != NULL) {
606 for (i = 0; i < rs->num; i++) {
607 b = strMatchesSelector( rs->element[i]->cc->label,
608 RtsFlags.ProfFlags.retainerSelector );
609 if (b) return rtsTrue;
610 }
611 }
612 }
613 return rtsFalse;
614 }
615 return rtsTrue;
616 #endif /* PROFILING */
617 }
618
619 /* -----------------------------------------------------------------------------
620 * Aggregate the heap census info for biographical profiling
621 * -------------------------------------------------------------------------- */
622 #ifdef PROFILING
623 static void
624 aggregateCensusInfo( void )
625 {
626 HashTable *acc;
627 nat t;
628 counter *c, *d, *ctrs;
629 Arena *arena;
630
631 if (!doingLDVProfiling()) return;
632
633 // Aggregate the LDV counters when displaying by biography.
634 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV) {
635 int void_total, drag_total;
636
637 // Now we compute void_total and drag_total for each census
638 // After the program has finished, the void_total field of
639 // each census contains the count of words that were *created*
640 // in this era and were eventually void. Conversely, if a
641 // void closure was destroyed in this era, it will be
642 // represented by a negative count of words in void_total.
643 //
644 // To get the count of live words that are void at each
645 // census, just propagate the void_total count forwards:
646
647 void_total = 0;
648 drag_total = 0;
649 for (t = 1; t < era; t++) { // note: start at 1, not 0
650 void_total += censuses[t].void_total;
651 drag_total += censuses[t].drag_total;
652 censuses[t].void_total = void_total;
653 censuses[t].drag_total = drag_total;
654
655 ASSERT( censuses[t].void_total <= censuses[t].not_used );
656 // should be true because: void_total is the count of
657 // live words that are void at this census, which *must*
658 // be less than the number of live words that have not
659 // been used yet.
660
661 ASSERT( censuses[t].drag_total <= censuses[t].used );
662 // similar reasoning as above.
663 }
664
665 return;
666 }
667
668 // otherwise... we're doing a heap profile that is restricted to
669 // some combination of lag, drag, void or use. We've kept all the
670 // census info for all censuses so far, but we still need to
671 // aggregate the counters forwards.
672
673 arena = newArena();
674 acc = allocHashTable();
675 ctrs = NULL;
676
677 for (t = 1; t < era; t++) {
678
679 // first look through all the counters we're aggregating
680 for (c = ctrs; c != NULL; c = c->next) {
681 // if one of the totals is non-zero, then this closure
682 // type must be present in the heap at this census time...
683 d = lookupHashTable(censuses[t].hash, (StgWord)c->identity);
684
685 if (d == NULL) {
686 // if this closure identity isn't present in the
687 // census for this time period, then our running
688 // totals *must* be zero.
689 ASSERT(c->c.ldv.void_total == 0 && c->c.ldv.drag_total == 0);
690
691 // debugCCS(c->identity);
692 // debugBelch(" census=%d void_total=%d drag_total=%d\n",
693 // t, c->c.ldv.void_total, c->c.ldv.drag_total);
694 } else {
695 d->c.ldv.void_total += c->c.ldv.void_total;
696 d->c.ldv.drag_total += c->c.ldv.drag_total;
697 c->c.ldv.void_total = d->c.ldv.void_total;
698 c->c.ldv.drag_total = d->c.ldv.drag_total;
699
700 ASSERT( c->c.ldv.void_total >= 0 );
701 ASSERT( c->c.ldv.drag_total >= 0 );
702 }
703 }
704
705 // now look through the counters in this census to find new ones
706 for (c = censuses[t].ctrs; c != NULL; c = c->next) {
707 d = lookupHashTable(acc, (StgWord)c->identity);
708 if (d == NULL) {
709 d = arenaAlloc( arena, sizeof(counter) );
710 initLDVCtr(d);
711 insertHashTable( acc, (StgWord)c->identity, d );
712 d->identity = c->identity;
713 d->next = ctrs;
714 ctrs = d;
715 d->c.ldv.void_total = c->c.ldv.void_total;
716 d->c.ldv.drag_total = c->c.ldv.drag_total;
717 }
718 ASSERT( c->c.ldv.void_total >= 0 );
719 ASSERT( c->c.ldv.drag_total >= 0 );
720 }
721 }
722
723 freeHashTable(acc, NULL);
724 arenaFree(arena);
725 }
726 #endif
727
728 /* -----------------------------------------------------------------------------
729 * Print out the results of a heap census.
730 * -------------------------------------------------------------------------- */
731 static void
732 dumpCensus( Census *census )
733 {
734 counter *ctr;
735 int count;
736
737 printSample(rtsTrue, census->time);
738
739 #ifdef PROFILING
740 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV) {
741 fprintf(hp_file, "VOID\t%lu\n", (unsigned long)(census->void_total) * sizeof(W_));
742 fprintf(hp_file, "LAG\t%lu\n",
743 (unsigned long)(census->not_used - census->void_total) * sizeof(W_));
744 fprintf(hp_file, "USE\t%lu\n",
745 (unsigned long)(census->used - census->drag_total) * sizeof(W_));
746 fprintf(hp_file, "INHERENT_USE\t%lu\n",
747 (unsigned long)(census->prim) * sizeof(W_));
748 fprintf(hp_file, "DRAG\t%lu\n",
749 (unsigned long)(census->drag_total) * sizeof(W_));
750 printSample(rtsFalse, census->time);
751 return;
752 }
753 #endif
754
755 for (ctr = census->ctrs; ctr != NULL; ctr = ctr->next) {
756
757 #ifdef PROFILING
758 if (RtsFlags.ProfFlags.bioSelector != NULL) {
759 count = 0;
760 if (strMatchesSelector("lag", RtsFlags.ProfFlags.bioSelector))
761 count += ctr->c.ldv.not_used - ctr->c.ldv.void_total;
762 if (strMatchesSelector("drag", RtsFlags.ProfFlags.bioSelector))
763 count += ctr->c.ldv.drag_total;
764 if (strMatchesSelector("void", RtsFlags.ProfFlags.bioSelector))
765 count += ctr->c.ldv.void_total;
766 if (strMatchesSelector("use", RtsFlags.ProfFlags.bioSelector))
767 count += ctr->c.ldv.used - ctr->c.ldv.drag_total;
768 } else
769 #endif
770 {
771 count = ctr->c.resid;
772 }
773
774 ASSERT( count >= 0 );
775
776 if (count == 0) continue;
777
778 #if !defined(PROFILING)
779 switch (RtsFlags.ProfFlags.doHeapProfile) {
780 case HEAP_BY_CLOSURE_TYPE:
781 fprintf(hp_file, "%s", (char *)ctr->identity);
782 break;
783 }
784 #endif
785
786 #ifdef PROFILING
787 switch (RtsFlags.ProfFlags.doHeapProfile) {
788 case HEAP_BY_CCS:
789 fprint_ccs(hp_file, (CostCentreStack *)ctr->identity, RtsFlags.ProfFlags.ccsLength);
790 break;
791 case HEAP_BY_MOD:
792 case HEAP_BY_DESCR:
793 case HEAP_BY_TYPE:
794 fprintf(hp_file, "%s", (char *)ctr->identity);
795 break;
796 case HEAP_BY_RETAINER:
797 {
798 RetainerSet *rs = (RetainerSet *)ctr->identity;
799
800 // it might be the distinguished retainer set rs_MANY:
801 if (rs == &rs_MANY) {
802 fprintf(hp_file, "MANY");
803 break;
804 }
805
806 // Mark this retainer set by negating its id, because it
807 // has appeared in at least one census. We print the
808 // values of all such retainer sets into the log file at
809 // the end. A retainer set may exist but not feature in
810 // any censuses if it arose as the intermediate retainer
811 // set for some closure during retainer set calculation.
812 if (rs->id > 0)
813 rs->id = -(rs->id);
814
815 // report in the unit of bytes: * sizeof(StgWord)
816 printRetainerSetShort(hp_file, rs, RtsFlags.ProfFlags.ccsLength);
817 break;
818 }
819 default:
820 barf("dumpCensus; doHeapProfile");
821 }
822 #endif
823
824 fprintf(hp_file, "\t%lu\n", (unsigned long)count * sizeof(W_));
825 }
826
827 printSample(rtsFalse, census->time);
828 }
829
830 /* -----------------------------------------------------------------------------
831 * Code to perform a heap census.
832 * -------------------------------------------------------------------------- */
833 static void
834 heapCensusChain( Census *census, bdescr *bd )
835 {
836 StgPtr p;
837 StgInfoTable *info;
838 void *identity;
839 nat size;
840 counter *ctr;
841 nat real_size;
842 rtsBool prim;
843
844 for (; bd != NULL; bd = bd->link) {
845
846 // HACK: ignore pinned blocks, because they contain gaps.
847 // It's not clear exactly what we'd like to do here, since we
848 // can't tell which objects in the block are actually alive.
849 // Perhaps the whole block should be counted as SYSTEM memory.
850 if (bd->flags & BF_PINNED) {
851 continue;
852 }
853
854 p = bd->start;
855 while (p < bd->free) {
856 info = get_itbl((StgClosure *)p);
857 prim = rtsFalse;
858
859 switch (info->type) {
860
861 case THUNK:
862 size = thunk_sizeW_fromITBL(info);
863 break;
864
865 case THUNK_1_1:
866 case THUNK_0_2:
867 case THUNK_2_0:
868 size = sizeofW(StgThunkHeader) + 2;
869 break;
870
871 case THUNK_1_0:
872 case THUNK_0_1:
873 case THUNK_SELECTOR:
874 size = sizeofW(StgThunkHeader) + 1;
875 break;
876
877 case CONSTR:
878 case FUN:
879 case IND_PERM:
880 case BLACKHOLE:
881 case BLOCKING_QUEUE:
882 case FUN_1_0:
883 case FUN_0_1:
884 case FUN_1_1:
885 case FUN_0_2:
886 case FUN_2_0:
887 case CONSTR_1_0:
888 case CONSTR_0_1:
889 case CONSTR_1_1:
890 case CONSTR_0_2:
891 case CONSTR_2_0:
892 size = sizeW_fromITBL(info);
893 break;
894
895 case IND:
896 // Special case/Delicate Hack: INDs don't normally
897 // appear, since we're doing this heap census right
898 // after GC. However, GarbageCollect() also does
899 // resurrectThreads(), which can update some
900 // blackholes when it calls raiseAsync() on the
901 // resurrected threads. So we know that any IND will
902 // be the size of a BLACKHOLE.
903 size = BLACKHOLE_sizeW();
904 break;
905
906 case BCO:
907 prim = rtsTrue;
908 size = bco_sizeW((StgBCO *)p);
909 break;
910
911 case MVAR_CLEAN:
912 case MVAR_DIRTY:
913 case WEAK:
914 case PRIM:
915 case MUT_PRIM:
916 case MUT_VAR_CLEAN:
917 case MUT_VAR_DIRTY:
918 prim = rtsTrue;
919 size = sizeW_fromITBL(info);
920 break;
921
922 case AP:
923 size = ap_sizeW((StgAP *)p);
924 break;
925
926 case PAP:
927 size = pap_sizeW((StgPAP *)p);
928 break;
929
930 case AP_STACK:
931 size = ap_stack_sizeW((StgAP_STACK *)p);
932 break;
933
934 case ARR_WORDS:
935 prim = rtsTrue;
936 size = arr_words_sizeW((StgArrWords*)p);
937 break;
938
939 case MUT_ARR_PTRS_CLEAN:
940 case MUT_ARR_PTRS_DIRTY:
941 case MUT_ARR_PTRS_FROZEN:
942 case MUT_ARR_PTRS_FROZEN0:
943 prim = rtsTrue;
944 size = mut_arr_ptrs_sizeW((StgMutArrPtrs *)p);
945 break;
946
947 case TSO:
948 prim = rtsTrue;
949 #ifdef PROFILING
950 if (RtsFlags.ProfFlags.includeTSOs) {
951 size = sizeofW(StgTSO);
952 break;
953 } else {
954 // Skip this TSO and move on to the next object
955 p += sizeofW(StgTSO);
956 continue;
957 }
958 #else
959 size = sizeofW(StgTSO);
960 break;
961 #endif
962
963 case STACK:
964 prim = rtsTrue;
965 #ifdef PROFILING
966 if (RtsFlags.ProfFlags.includeTSOs) {
967 size = stack_sizeW((StgStack*)p);
968 break;
969 } else {
970 // Skip this TSO and move on to the next object
971 p += stack_sizeW((StgStack*)p);
972 continue;
973 }
974 #else
975 size = stack_sizeW((StgStack*)p);
976 break;
977 #endif
978
979 case TREC_CHUNK:
980 prim = rtsTrue;
981 size = sizeofW(StgTRecChunk);
982 break;
983
984 default:
985 barf("heapCensus, unknown object: %d", info->type);
986 }
987
988 identity = NULL;
989
990 #ifdef PROFILING
991 // subtract the profiling overhead
992 real_size = size - sizeofW(StgProfHeader);
993 #else
994 real_size = size;
995 #endif
996
997 if (closureSatisfiesConstraints((StgClosure*)p)) {
998 #ifdef PROFILING
999 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV) {
1000 if (prim)
1001 census->prim += real_size;
1002 else if ((LDVW(p) & LDV_STATE_MASK) == LDV_STATE_CREATE)
1003 census->not_used += real_size;
1004 else
1005 census->used += real_size;
1006 } else
1007 #endif
1008 {
1009 identity = closureIdentity((StgClosure *)p);
1010
1011 if (identity != NULL) {
1012 ctr = lookupHashTable( census->hash, (StgWord)identity );
1013 if (ctr != NULL) {
1014 #ifdef PROFILING
1015 if (RtsFlags.ProfFlags.bioSelector != NULL) {
1016 if (prim)
1017 ctr->c.ldv.prim += real_size;
1018 else if ((LDVW(p) & LDV_STATE_MASK) == LDV_STATE_CREATE)
1019 ctr->c.ldv.not_used += real_size;
1020 else
1021 ctr->c.ldv.used += real_size;
1022 } else
1023 #endif
1024 {
1025 ctr->c.resid += real_size;
1026 }
1027 } else {
1028 ctr = arenaAlloc( census->arena, sizeof(counter) );
1029 initLDVCtr(ctr);
1030 insertHashTable( census->hash, (StgWord)identity, ctr );
1031 ctr->identity = identity;
1032 ctr->next = census->ctrs;
1033 census->ctrs = ctr;
1034
1035 #ifdef PROFILING
1036 if (RtsFlags.ProfFlags.bioSelector != NULL) {
1037 if (prim)
1038 ctr->c.ldv.prim = real_size;
1039 else if ((LDVW(p) & LDV_STATE_MASK) == LDV_STATE_CREATE)
1040 ctr->c.ldv.not_used = real_size;
1041 else
1042 ctr->c.ldv.used = real_size;
1043 } else
1044 #endif
1045 {
1046 ctr->c.resid = real_size;
1047 }
1048 }
1049 }
1050 }
1051 }
1052
1053 p += size;
1054 }
1055 }
1056 }
1057
1058 void
1059 heapCensus( void )
1060 {
1061 nat g, n;
1062 Census *census;
1063 gen_workspace *ws;
1064
1065 census = &censuses[era];
1066 census->time = mut_user_time();
1067
1068 // calculate retainer sets if necessary
1069 #ifdef PROFILING
1070 if (doingRetainerProfiling()) {
1071 retainerProfile();
1072 }
1073 #endif
1074
1075 #ifdef PROFILING
1076 stat_startHeapCensus();
1077 #endif
1078
1079 // Traverse the heap, collecting the census info
1080 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
1081 heapCensusChain( census, generations[g].blocks );
1082 // Are we interested in large objects? might be
1083 // confusing to include the stack in a heap profile.
1084 heapCensusChain( census, generations[g].large_objects );
1085
1086 for (n = 0; n < n_capabilities; n++) {
1087 ws = &gc_threads[n]->gens[g];
1088 heapCensusChain(census, ws->todo_bd);
1089 heapCensusChain(census, ws->part_list);
1090 heapCensusChain(census, ws->scavd_list);
1091 }
1092 }
1093
1094 // dump out the census info
1095 #ifdef PROFILING
1096 // We can't generate any info for LDV profiling until
1097 // the end of the run...
1098 if (!doingLDVProfiling())
1099 dumpCensus( census );
1100 #else
1101 dumpCensus( census );
1102 #endif
1103
1104
1105 // free our storage, unless we're keeping all the census info for
1106 // future restriction by biography.
1107 #ifdef PROFILING
1108 if (RtsFlags.ProfFlags.bioSelector == NULL)
1109 {
1110 freeHashTable( census->hash, NULL/* don't free the elements */ );
1111 arenaFree( census->arena );
1112 census->hash = NULL;
1113 census->arena = NULL;
1114 }
1115 #endif
1116
1117 // we're into the next time period now
1118 nextEra();
1119
1120 #ifdef PROFILING
1121 stat_endHeapCensus();
1122 #endif
1123 }
1124