testsuite: attempt fixing fallout from 089b72f52
[ghc.git] / rts / ProfHeap.c
1 /* ----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 1998-2003
4 *
5 * Support for heap profiling
6 *
7 * --------------------------------------------------------------------------*/
8
9 #include "PosixSource.h"
10 #include "Rts.h"
11
12 #include "RtsFlags.h"
13 #include "RtsUtils.h"
14 #include "Profiling.h"
15 #include "ProfHeap.h"
16 #include "Stats.h"
17 #include "Hash.h"
18 #include "RetainerProfile.h"
19 #include "LdvProfile.h"
20 #include "Arena.h"
21 #include "Printer.h"
22 #include "sm/GCThread.h"
23
24 #include <string.h>
25
26 /* -----------------------------------------------------------------------------
27 * era stores the current time period. It is the same as the
28 * number of censuses that have been performed.
29 *
30 * RESTRICTION:
31 * era must be no longer than LDV_SHIFT (15 or 30) bits.
32 * Invariants:
33 * era is initialized to 1 in initHeapProfiling().
34 *
35 * max_era is initialized to 2^LDV_SHIFT in initHeapProfiling().
36 * When era reaches max_era, the profiling stops because a closure can
37 * store only up to (max_era - 1) as its creation or last use time.
38 * -------------------------------------------------------------------------- */
39 unsigned int era;
40 static nat max_era;
41
42 /* -----------------------------------------------------------------------------
43 * Counters
44 *
45 * For most heap profiles each closure identity gets a simple count
46 * of live words in the heap at each census. However, if we're
47 * selecting by biography, then we have to keep the various
48 * lag/drag/void counters for each identity.
49 * -------------------------------------------------------------------------- */
50 typedef struct _counter {
51 void *identity;
52 union {
53 nat resid;
54 struct {
55 long prim; // total size of 'inherently used' closures
56 long not_used; // total size of 'never used' closures
57 long used; // total size of 'used at least once' closures
58 long void_total; // current total size of 'destroyed without being used' closures
59 long drag_total; // current total size of 'used at least once and waiting to die'
60 } ldv;
61 } c;
62 struct _counter *next;
63 } counter;
64
65 STATIC_INLINE void
66 initLDVCtr( counter *ctr )
67 {
68 ctr->c.ldv.prim = 0;
69 ctr->c.ldv.not_used = 0;
70 ctr->c.ldv.used = 0;
71 ctr->c.ldv.void_total = 0;
72 ctr->c.ldv.drag_total = 0;
73 }
74
75 typedef struct {
76 double time; // the time in MUT time when the census is made
77 HashTable * hash;
78 counter * ctrs;
79 Arena * arena;
80
81 // for LDV profiling, when just displaying by LDV
82 long prim;
83 long not_used;
84 long used;
85 long void_total;
86 long drag_total;
87 } Census;
88
89 static Census *censuses = NULL;
90 static nat n_censuses = 0;
91
92 #ifdef PROFILING
93 static void aggregateCensusInfo( void );
94 #endif
95
96 static void dumpCensus( Census *census );
97
98 static rtsBool closureSatisfiesConstraints( StgClosure* p );
99
100 /* ----------------------------------------------------------------------------
101 * Find the "closure identity", which is a unique pointer representing
102 * the band to which this closure's heap space is attributed in the
103 * heap profile.
104 * ------------------------------------------------------------------------- */
105 static void *
106 closureIdentity( StgClosure *p )
107 {
108 switch (RtsFlags.ProfFlags.doHeapProfile) {
109
110 #ifdef PROFILING
111 case HEAP_BY_CCS:
112 return p->header.prof.ccs;
113 case HEAP_BY_MOD:
114 return p->header.prof.ccs->cc->module;
115 case HEAP_BY_DESCR:
116 return GET_PROF_DESC(get_itbl(p));
117 case HEAP_BY_TYPE:
118 return GET_PROF_TYPE(get_itbl(p));
119 case HEAP_BY_RETAINER:
120 // AFAIK, the only closures in the heap which might not have a
121 // valid retainer set are DEAD_WEAK closures.
122 if (isRetainerSetFieldValid(p))
123 return retainerSetOf(p);
124 else
125 return NULL;
126
127 #else
128 case HEAP_BY_CLOSURE_TYPE:
129 {
130 StgInfoTable *info;
131 info = get_itbl(p);
132 switch (info->type) {
133 case CONSTR:
134 case CONSTR_1_0:
135 case CONSTR_0_1:
136 case CONSTR_2_0:
137 case CONSTR_1_1:
138 case CONSTR_0_2:
139 case CONSTR_STATIC:
140 case CONSTR_NOCAF_STATIC:
141 return GET_CON_DESC(itbl_to_con_itbl(info));
142 default:
143 return closure_type_names[info->type];
144 }
145 }
146
147 #endif
148 default:
149 barf("closureIdentity");
150 }
151 }
152
153 /* --------------------------------------------------------------------------
154 * Profiling type predicates
155 * ----------------------------------------------------------------------- */
156 #ifdef PROFILING
157 STATIC_INLINE rtsBool
158 doingLDVProfiling( void )
159 {
160 return (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV
161 || RtsFlags.ProfFlags.bioSelector != NULL);
162 }
163
164 STATIC_INLINE rtsBool
165 doingRetainerProfiling( void )
166 {
167 return (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER
168 || RtsFlags.ProfFlags.retainerSelector != NULL);
169 }
170 #endif /* PROFILING */
171
172 // Precesses a closure 'c' being destroyed whose size is 'size'.
173 // Make sure that LDV_recordDead() is not invoked on 'inherently used' closures
174 // such as TSO; they should not be involved in computing dragNew or voidNew.
175 //
176 // Even though era is checked in both LdvCensusForDead() and
177 // LdvCensusKillAll(), we still need to make sure that era is > 0 because
178 // LDV_recordDead() may be called from elsewhere in the runtime system. E.g.,
179 // when a thunk is replaced by an indirection object.
180
181 #ifdef PROFILING
182 void
183 LDV_recordDead( StgClosure *c, nat size )
184 {
185 void *id;
186 nat t;
187 counter *ctr;
188
189 if (era > 0 && closureSatisfiesConstraints(c)) {
190 size -= sizeofW(StgProfHeader);
191 ASSERT(LDVW(c) != 0);
192 if ((LDVW((c)) & LDV_STATE_MASK) == LDV_STATE_CREATE) {
193 t = (LDVW((c)) & LDV_CREATE_MASK) >> LDV_SHIFT;
194 if (t < era) {
195 if (RtsFlags.ProfFlags.bioSelector == NULL) {
196 censuses[t].void_total += (long)size;
197 censuses[era].void_total -= (long)size;
198 ASSERT(censuses[t].void_total < censuses[t].not_used);
199 } else {
200 id = closureIdentity(c);
201 ctr = lookupHashTable(censuses[t].hash, (StgWord)id);
202 ASSERT( ctr != NULL );
203 ctr->c.ldv.void_total += (long)size;
204 ctr = lookupHashTable(censuses[era].hash, (StgWord)id);
205 if (ctr == NULL) {
206 ctr = arenaAlloc(censuses[era].arena, sizeof(counter));
207 initLDVCtr(ctr);
208 insertHashTable(censuses[era].hash, (StgWord)id, ctr);
209 ctr->identity = id;
210 ctr->next = censuses[era].ctrs;
211 censuses[era].ctrs = ctr;
212 }
213 ctr->c.ldv.void_total -= (long)size;
214 }
215 }
216 } else {
217 t = LDVW((c)) & LDV_LAST_MASK;
218 if (t + 1 < era) {
219 if (RtsFlags.ProfFlags.bioSelector == NULL) {
220 censuses[t+1].drag_total += size;
221 censuses[era].drag_total -= size;
222 } else {
223 void *id;
224 id = closureIdentity(c);
225 ctr = lookupHashTable(censuses[t+1].hash, (StgWord)id);
226 ASSERT( ctr != NULL );
227 ctr->c.ldv.drag_total += (long)size;
228 ctr = lookupHashTable(censuses[era].hash, (StgWord)id);
229 if (ctr == NULL) {
230 ctr = arenaAlloc(censuses[era].arena, sizeof(counter));
231 initLDVCtr(ctr);
232 insertHashTable(censuses[era].hash, (StgWord)id, ctr);
233 ctr->identity = id;
234 ctr->next = censuses[era].ctrs;
235 censuses[era].ctrs = ctr;
236 }
237 ctr->c.ldv.drag_total -= (long)size;
238 }
239 }
240 }
241 }
242 }
243 #endif
244
245 /* --------------------------------------------------------------------------
246 * Initialize censuses[era];
247 * ----------------------------------------------------------------------- */
248
249 STATIC_INLINE void
250 initEra(Census *census)
251 {
252 census->hash = allocHashTable();
253 census->ctrs = NULL;
254 census->arena = newArena();
255
256 census->not_used = 0;
257 census->used = 0;
258 census->prim = 0;
259 census->void_total = 0;
260 census->drag_total = 0;
261 }
262
263 STATIC_INLINE void
264 freeEra(Census *census)
265 {
266 arenaFree(census->arena);
267 freeHashTable(census->hash, NULL);
268 }
269
270 /* --------------------------------------------------------------------------
271 * Increases era by 1 and initialize census[era].
272 * Reallocates gi[] and increases its size if needed.
273 * ----------------------------------------------------------------------- */
274
275 static void
276 nextEra( void )
277 {
278 #ifdef PROFILING
279 if (doingLDVProfiling()) {
280 era++;
281
282 if (era == max_era) {
283 errorBelch("Maximum number of censuses reached.");
284 if (rtsConfig.rts_opts_suggestions == rtsTrue) {
285 if (rtsConfig.rts_opts_enabled == RtsOptsAll) {
286 errorBelch("Use `+RTS -i' to reduce censuses.");
287 } else {
288 errorBelch("Relink with -rtsopts and "
289 "use `+RTS -i' to reduce censuses.");
290 }
291 }
292 stg_exit(EXIT_FAILURE);
293 }
294
295 if (era == n_censuses) {
296 n_censuses *= 2;
297 censuses = stgReallocBytes(censuses, sizeof(Census) * n_censuses,
298 "nextEra");
299 }
300 }
301 #endif /* PROFILING */
302
303 initEra( &censuses[era] );
304 }
305
306 /* ----------------------------------------------------------------------------
307 * Heap profiling by info table
308 * ------------------------------------------------------------------------- */
309
310 #if !defined(PROFILING)
311 FILE *hp_file;
312 static char *hp_filename;
313
314 void initProfiling1 (void)
315 {
316 }
317
318 void freeProfiling (void)
319 {
320 }
321
322 void initProfiling2 (void)
323 {
324 char *prog;
325
326 prog = stgMallocBytes(strlen(prog_name) + 1, "initProfiling2");
327 strcpy(prog, prog_name);
328 #ifdef mingw32_HOST_OS
329 // on Windows, drop the .exe suffix if there is one
330 {
331 char *suff;
332 suff = strrchr(prog,'.');
333 if (suff != NULL && !strcmp(suff,".exe")) {
334 *suff = '\0';
335 }
336 }
337 #endif
338
339 if (RtsFlags.ProfFlags.doHeapProfile) {
340 /* Initialise the log file name */
341 hp_filename = stgMallocBytes(strlen(prog) + 6, "hpFileName");
342 sprintf(hp_filename, "%s.hp", prog);
343
344 /* open the log file */
345 if ((hp_file = fopen(hp_filename, "w")) == NULL) {
346 debugBelch("Can't open profiling report file %s\n",
347 hp_filename);
348 RtsFlags.ProfFlags.doHeapProfile = 0;
349 stgFree(prog);
350 return;
351 }
352 }
353
354 stgFree(prog);
355
356 initHeapProfiling();
357 }
358
359 void endProfiling( void )
360 {
361 endHeapProfiling();
362 }
363 #endif /* !PROFILING */
364
365 static void
366 printSample(rtsBool beginSample, StgDouble sampleValue)
367 {
368 fprintf(hp_file, "%s %f\n",
369 (beginSample ? "BEGIN_SAMPLE" : "END_SAMPLE"),
370 sampleValue);
371 if (!beginSample) {
372 fflush(hp_file);
373 }
374 }
375
376 /* --------------------------------------------------------------------------
377 * Initialize the heap profilier
378 * ----------------------------------------------------------------------- */
379 nat
380 initHeapProfiling(void)
381 {
382 if (! RtsFlags.ProfFlags.doHeapProfile) {
383 return 0;
384 }
385
386 #ifdef PROFILING
387 if (doingLDVProfiling() && doingRetainerProfiling()) {
388 errorBelch("cannot mix -hb and -hr");
389 stg_exit(EXIT_FAILURE);
390 }
391 #endif
392
393 // we only count eras if we're doing LDV profiling. Otherwise era
394 // is fixed at zero.
395 #ifdef PROFILING
396 if (doingLDVProfiling()) {
397 era = 1;
398 } else
399 #endif
400 {
401 era = 0;
402 }
403
404 // max_era = 2^LDV_SHIFT
405 max_era = 1 << LDV_SHIFT;
406
407 n_censuses = 32;
408 censuses = stgMallocBytes(sizeof(Census) * n_censuses, "initHeapProfiling");
409
410 initEra( &censuses[era] );
411
412 /* initProfilingLogFile(); */
413 fprintf(hp_file, "JOB \"%s", prog_name);
414
415 #ifdef PROFILING
416 {
417 int count;
418 for(count = 1; count < prog_argc; count++)
419 fprintf(hp_file, " %s", prog_argv[count]);
420 fprintf(hp_file, " +RTS");
421 for(count = 0; count < rts_argc; count++)
422 fprintf(hp_file, " %s", rts_argv[count]);
423 }
424 #endif /* PROFILING */
425
426 fprintf(hp_file, "\"\n" );
427
428 fprintf(hp_file, "DATE \"%s\"\n", time_str());
429
430 fprintf(hp_file, "SAMPLE_UNIT \"seconds\"\n");
431 fprintf(hp_file, "VALUE_UNIT \"bytes\"\n");
432
433 printSample(rtsTrue, 0);
434 printSample(rtsFalse, 0);
435
436 #ifdef PROFILING
437 if (doingRetainerProfiling()) {
438 initRetainerProfiling();
439 }
440 #endif
441
442 return 0;
443 }
444
445 void
446 endHeapProfiling(void)
447 {
448 StgDouble seconds;
449
450 if (! RtsFlags.ProfFlags.doHeapProfile) {
451 return;
452 }
453
454 #ifdef PROFILING
455 if (doingRetainerProfiling()) {
456 endRetainerProfiling();
457 }
458 #endif
459
460 #ifdef PROFILING
461 if (doingLDVProfiling()) {
462 nat t;
463 LdvCensusKillAll();
464 aggregateCensusInfo();
465 for (t = 1; t < era; t++) {
466 dumpCensus( &censuses[t] );
467 }
468 }
469 #endif
470
471 #ifdef PROFILING
472 if (doingLDVProfiling()) {
473 nat t;
474 if (RtsFlags.ProfFlags.bioSelector != NULL) {
475 for (t = 1; t <= era; t++) {
476 freeEra( &censuses[t] );
477 }
478 } else {
479 freeEra( &censuses[era] );
480 }
481 } else {
482 freeEra( &censuses[0] );
483 }
484 #else
485 freeEra( &censuses[0] );
486 #endif
487
488 stgFree(censuses);
489
490 seconds = mut_user_time();
491 printSample(rtsTrue, seconds);
492 printSample(rtsFalse, seconds);
493 fclose(hp_file);
494 }
495
496
497
498 #ifdef PROFILING
499 static size_t
500 buf_append(char *p, const char *q, char *end)
501 {
502 int m;
503
504 for (m = 0; p < end; p++, q++, m++) {
505 *p = *q;
506 if (*q == '\0') { break; }
507 }
508 return m;
509 }
510
511 static void
512 fprint_ccs(FILE *fp, CostCentreStack *ccs, nat max_length)
513 {
514 char buf[max_length+1], *p, *buf_end;
515
516 // MAIN on its own gets printed as "MAIN", otherwise we ignore MAIN.
517 if (ccs == CCS_MAIN) {
518 fprintf(fp, "MAIN");
519 return;
520 }
521
522 fprintf(fp, "(%ld)", ccs->ccsID);
523
524 p = buf;
525 buf_end = buf + max_length + 1;
526
527 // keep printing components of the stack until we run out of space
528 // in the buffer. If we run out of space, end with "...".
529 for (; ccs != NULL && ccs != CCS_MAIN; ccs = ccs->prevStack) {
530
531 // CAF cost centres print as M.CAF, but we leave the module
532 // name out of all the others to save space.
533 if (!strcmp(ccs->cc->label,"CAF")) {
534 p += buf_append(p, ccs->cc->module, buf_end);
535 p += buf_append(p, ".CAF", buf_end);
536 } else {
537 p += buf_append(p, ccs->cc->label, buf_end);
538 if (ccs->prevStack != NULL && ccs->prevStack != CCS_MAIN) {
539 p += buf_append(p, "/", buf_end);
540 }
541 }
542
543 if (p >= buf_end) {
544 sprintf(buf+max_length-4, "...");
545 break;
546 }
547 }
548 fprintf(fp, "%s", buf);
549 }
550
551 rtsBool
552 strMatchesSelector( char* str, char* sel )
553 {
554 char* p;
555 // debugBelch("str_matches_selector %s %s\n", str, sel);
556 while (1) {
557 // Compare str against wherever we've got to in sel.
558 p = str;
559 while (*p != '\0' && *sel != ',' && *sel != '\0' && *p == *sel) {
560 p++; sel++;
561 }
562 // Match if all of str used and have reached the end of a sel fragment.
563 if (*p == '\0' && (*sel == ',' || *sel == '\0'))
564 return rtsTrue;
565
566 // No match. Advance sel to the start of the next elem.
567 while (*sel != ',' && *sel != '\0') sel++;
568 if (*sel == ',') sel++;
569
570 /* Run out of sel ?? */
571 if (*sel == '\0') return rtsFalse;
572 }
573 }
574
575 #endif /* PROFILING */
576
577 /* -----------------------------------------------------------------------------
578 * Figure out whether a closure should be counted in this census, by
579 * testing against all the specified constraints.
580 * -------------------------------------------------------------------------- */
581 static rtsBool
582 closureSatisfiesConstraints( StgClosure* p )
583 {
584 #if !defined(PROFILING)
585 (void)p; /* keep gcc -Wall happy */
586 return rtsTrue;
587 #else
588 rtsBool b;
589
590 // The CCS has a selected field to indicate whether this closure is
591 // deselected by not being mentioned in the module, CC, or CCS
592 // selectors.
593 if (!p->header.prof.ccs->selected) {
594 return rtsFalse;
595 }
596
597 if (RtsFlags.ProfFlags.descrSelector) {
598 b = strMatchesSelector( (GET_PROF_DESC(get_itbl((StgClosure *)p))),
599 RtsFlags.ProfFlags.descrSelector );
600 if (!b) return rtsFalse;
601 }
602 if (RtsFlags.ProfFlags.typeSelector) {
603 b = strMatchesSelector( (GET_PROF_TYPE(get_itbl((StgClosure *)p))),
604 RtsFlags.ProfFlags.typeSelector );
605 if (!b) return rtsFalse;
606 }
607 if (RtsFlags.ProfFlags.retainerSelector) {
608 RetainerSet *rs;
609 nat i;
610 // We must check that the retainer set is valid here. One
611 // reason it might not be valid is if this closure is a
612 // a newly deceased weak pointer (i.e. a DEAD_WEAK), since
613 // these aren't reached by the retainer profiler's traversal.
614 if (isRetainerSetFieldValid((StgClosure *)p)) {
615 rs = retainerSetOf((StgClosure *)p);
616 if (rs != NULL) {
617 for (i = 0; i < rs->num; i++) {
618 b = strMatchesSelector( rs->element[i]->cc->label,
619 RtsFlags.ProfFlags.retainerSelector );
620 if (b) return rtsTrue;
621 }
622 }
623 }
624 return rtsFalse;
625 }
626 return rtsTrue;
627 #endif /* PROFILING */
628 }
629
630 /* -----------------------------------------------------------------------------
631 * Aggregate the heap census info for biographical profiling
632 * -------------------------------------------------------------------------- */
633 #ifdef PROFILING
634 static void
635 aggregateCensusInfo( void )
636 {
637 HashTable *acc;
638 nat t;
639 counter *c, *d, *ctrs;
640 Arena *arena;
641
642 if (!doingLDVProfiling()) return;
643
644 // Aggregate the LDV counters when displaying by biography.
645 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV) {
646 long void_total, drag_total;
647
648 // Now we compute void_total and drag_total for each census
649 // After the program has finished, the void_total field of
650 // each census contains the count of words that were *created*
651 // in this era and were eventually void. Conversely, if a
652 // void closure was destroyed in this era, it will be
653 // represented by a negative count of words in void_total.
654 //
655 // To get the count of live words that are void at each
656 // census, just propagate the void_total count forwards:
657
658 void_total = 0;
659 drag_total = 0;
660 for (t = 1; t < era; t++) { // note: start at 1, not 0
661 void_total += censuses[t].void_total;
662 drag_total += censuses[t].drag_total;
663 censuses[t].void_total = void_total;
664 censuses[t].drag_total = drag_total;
665
666 ASSERT( censuses[t].void_total <= censuses[t].not_used );
667 // should be true because: void_total is the count of
668 // live words that are void at this census, which *must*
669 // be less than the number of live words that have not
670 // been used yet.
671
672 ASSERT( censuses[t].drag_total <= censuses[t].used );
673 // similar reasoning as above.
674 }
675
676 return;
677 }
678
679 // otherwise... we're doing a heap profile that is restricted to
680 // some combination of lag, drag, void or use. We've kept all the
681 // census info for all censuses so far, but we still need to
682 // aggregate the counters forwards.
683
684 arena = newArena();
685 acc = allocHashTable();
686 ctrs = NULL;
687
688 for (t = 1; t < era; t++) {
689
690 // first look through all the counters we're aggregating
691 for (c = ctrs; c != NULL; c = c->next) {
692 // if one of the totals is non-zero, then this closure
693 // type must be present in the heap at this census time...
694 d = lookupHashTable(censuses[t].hash, (StgWord)c->identity);
695
696 if (d == NULL) {
697 // if this closure identity isn't present in the
698 // census for this time period, then our running
699 // totals *must* be zero.
700 ASSERT(c->c.ldv.void_total == 0 && c->c.ldv.drag_total == 0);
701
702 // debugCCS(c->identity);
703 // debugBelch(" census=%d void_total=%d drag_total=%d\n",
704 // t, c->c.ldv.void_total, c->c.ldv.drag_total);
705 } else {
706 d->c.ldv.void_total += c->c.ldv.void_total;
707 d->c.ldv.drag_total += c->c.ldv.drag_total;
708 c->c.ldv.void_total = d->c.ldv.void_total;
709 c->c.ldv.drag_total = d->c.ldv.drag_total;
710
711 ASSERT( c->c.ldv.void_total >= 0 );
712 ASSERT( c->c.ldv.drag_total >= 0 );
713 }
714 }
715
716 // now look through the counters in this census to find new ones
717 for (c = censuses[t].ctrs; c != NULL; c = c->next) {
718 d = lookupHashTable(acc, (StgWord)c->identity);
719 if (d == NULL) {
720 d = arenaAlloc( arena, sizeof(counter) );
721 initLDVCtr(d);
722 insertHashTable( acc, (StgWord)c->identity, d );
723 d->identity = c->identity;
724 d->next = ctrs;
725 ctrs = d;
726 d->c.ldv.void_total = c->c.ldv.void_total;
727 d->c.ldv.drag_total = c->c.ldv.drag_total;
728 }
729 ASSERT( c->c.ldv.void_total >= 0 );
730 ASSERT( c->c.ldv.drag_total >= 0 );
731 }
732 }
733
734 freeHashTable(acc, NULL);
735 arenaFree(arena);
736 }
737 #endif
738
739 /* -----------------------------------------------------------------------------
740 * Print out the results of a heap census.
741 * -------------------------------------------------------------------------- */
742 static void
743 dumpCensus( Census *census )
744 {
745 counter *ctr;
746 long count;
747
748 printSample(rtsTrue, census->time);
749
750 #ifdef PROFILING
751 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV) {
752 fprintf(hp_file, "VOID\t%lu\n", (unsigned long)(census->void_total) * sizeof(W_));
753 fprintf(hp_file, "LAG\t%lu\n",
754 (unsigned long)(census->not_used - census->void_total) * sizeof(W_));
755 fprintf(hp_file, "USE\t%lu\n",
756 (unsigned long)(census->used - census->drag_total) * sizeof(W_));
757 fprintf(hp_file, "INHERENT_USE\t%lu\n",
758 (unsigned long)(census->prim) * sizeof(W_));
759 fprintf(hp_file, "DRAG\t%lu\n",
760 (unsigned long)(census->drag_total) * sizeof(W_));
761 printSample(rtsFalse, census->time);
762 return;
763 }
764 #endif
765
766 for (ctr = census->ctrs; ctr != NULL; ctr = ctr->next) {
767
768 #ifdef PROFILING
769 if (RtsFlags.ProfFlags.bioSelector != NULL) {
770 count = 0;
771 if (strMatchesSelector("lag", RtsFlags.ProfFlags.bioSelector))
772 count += ctr->c.ldv.not_used - ctr->c.ldv.void_total;
773 if (strMatchesSelector("drag", RtsFlags.ProfFlags.bioSelector))
774 count += ctr->c.ldv.drag_total;
775 if (strMatchesSelector("void", RtsFlags.ProfFlags.bioSelector))
776 count += ctr->c.ldv.void_total;
777 if (strMatchesSelector("use", RtsFlags.ProfFlags.bioSelector))
778 count += ctr->c.ldv.used - ctr->c.ldv.drag_total;
779 } else
780 #endif
781 {
782 count = ctr->c.resid;
783 }
784
785 ASSERT( count >= 0 );
786
787 if (count == 0) continue;
788
789 #if !defined(PROFILING)
790 switch (RtsFlags.ProfFlags.doHeapProfile) {
791 case HEAP_BY_CLOSURE_TYPE:
792 fprintf(hp_file, "%s", (char *)ctr->identity);
793 break;
794 }
795 #endif
796
797 #ifdef PROFILING
798 switch (RtsFlags.ProfFlags.doHeapProfile) {
799 case HEAP_BY_CCS:
800 fprint_ccs(hp_file, (CostCentreStack *)ctr->identity, RtsFlags.ProfFlags.ccsLength);
801 break;
802 case HEAP_BY_MOD:
803 case HEAP_BY_DESCR:
804 case HEAP_BY_TYPE:
805 fprintf(hp_file, "%s", (char *)ctr->identity);
806 break;
807 case HEAP_BY_RETAINER:
808 {
809 RetainerSet *rs = (RetainerSet *)ctr->identity;
810
811 // it might be the distinguished retainer set rs_MANY:
812 if (rs == &rs_MANY) {
813 fprintf(hp_file, "MANY");
814 break;
815 }
816
817 // Mark this retainer set by negating its id, because it
818 // has appeared in at least one census. We print the
819 // values of all such retainer sets into the log file at
820 // the end. A retainer set may exist but not feature in
821 // any censuses if it arose as the intermediate retainer
822 // set for some closure during retainer set calculation.
823 if (rs->id > 0)
824 rs->id = -(rs->id);
825
826 // report in the unit of bytes: * sizeof(StgWord)
827 printRetainerSetShort(hp_file, rs, RtsFlags.ProfFlags.ccsLength);
828 break;
829 }
830 default:
831 barf("dumpCensus; doHeapProfile");
832 }
833 #endif
834
835 fprintf(hp_file, "\t%" FMT_SizeT "\n", (W_)count * sizeof(W_));
836 }
837
838 printSample(rtsFalse, census->time);
839 }
840
841
842 static void heapProfObject(Census *census, StgClosure *p, nat size,
843 rtsBool prim
844 #ifndef PROFILING
845 STG_UNUSED
846 #endif
847 )
848 {
849 void *identity;
850 nat real_size;
851 counter *ctr;
852
853 identity = NULL;
854
855 #ifdef PROFILING
856 // subtract the profiling overhead
857 real_size = size - sizeofW(StgProfHeader);
858 #else
859 real_size = size;
860 #endif
861
862 if (closureSatisfiesConstraints((StgClosure*)p)) {
863 #ifdef PROFILING
864 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV) {
865 if (prim)
866 census->prim += real_size;
867 else if ((LDVW(p) & LDV_STATE_MASK) == LDV_STATE_CREATE)
868 census->not_used += real_size;
869 else
870 census->used += real_size;
871 } else
872 #endif
873 {
874 identity = closureIdentity((StgClosure *)p);
875
876 if (identity != NULL) {
877 ctr = lookupHashTable( census->hash, (StgWord)identity );
878 if (ctr != NULL) {
879 #ifdef PROFILING
880 if (RtsFlags.ProfFlags.bioSelector != NULL) {
881 if (prim)
882 ctr->c.ldv.prim += real_size;
883 else if ((LDVW(p) & LDV_STATE_MASK) == LDV_STATE_CREATE)
884 ctr->c.ldv.not_used += real_size;
885 else
886 ctr->c.ldv.used += real_size;
887 } else
888 #endif
889 {
890 ctr->c.resid += real_size;
891 }
892 } else {
893 ctr = arenaAlloc( census->arena, sizeof(counter) );
894 initLDVCtr(ctr);
895 insertHashTable( census->hash, (StgWord)identity, ctr );
896 ctr->identity = identity;
897 ctr->next = census->ctrs;
898 census->ctrs = ctr;
899
900 #ifdef PROFILING
901 if (RtsFlags.ProfFlags.bioSelector != NULL) {
902 if (prim)
903 ctr->c.ldv.prim = real_size;
904 else if ((LDVW(p) & LDV_STATE_MASK) == LDV_STATE_CREATE)
905 ctr->c.ldv.not_used = real_size;
906 else
907 ctr->c.ldv.used = real_size;
908 } else
909 #endif
910 {
911 ctr->c.resid = real_size;
912 }
913 }
914 }
915 }
916 }
917 }
918
919 /* -----------------------------------------------------------------------------
920 * Code to perform a heap census.
921 * -------------------------------------------------------------------------- */
922 static void
923 heapCensusChain( Census *census, bdescr *bd )
924 {
925 StgPtr p;
926 StgInfoTable *info;
927 nat size;
928 rtsBool prim;
929
930 for (; bd != NULL; bd = bd->link) {
931
932 // HACK: pretend a pinned block is just one big ARR_WORDS
933 // owned by CCS_PINNED. These blocks can be full of holes due
934 // to alignment constraints so we can't traverse the memory
935 // and do a proper census.
936 if (bd->flags & BF_PINNED) {
937 StgClosure arr;
938 SET_HDR(&arr, &stg_ARR_WORDS_info, CCS_PINNED);
939 heapProfObject(census, &arr, bd->blocks * BLOCK_SIZE_W, rtsTrue);
940 continue;
941 }
942
943 p = bd->start;
944 while (p < bd->free) {
945 info = get_itbl((StgClosure *)p);
946 prim = rtsFalse;
947
948 switch (info->type) {
949
950 case THUNK:
951 size = thunk_sizeW_fromITBL(info);
952 break;
953
954 case THUNK_1_1:
955 case THUNK_0_2:
956 case THUNK_2_0:
957 size = sizeofW(StgThunkHeader) + 2;
958 break;
959
960 case THUNK_1_0:
961 case THUNK_0_1:
962 case THUNK_SELECTOR:
963 size = sizeofW(StgThunkHeader) + 1;
964 break;
965
966 case CONSTR:
967 case FUN:
968 case IND_PERM:
969 case BLACKHOLE:
970 case BLOCKING_QUEUE:
971 case FUN_1_0:
972 case FUN_0_1:
973 case FUN_1_1:
974 case FUN_0_2:
975 case FUN_2_0:
976 case CONSTR_1_0:
977 case CONSTR_0_1:
978 case CONSTR_1_1:
979 case CONSTR_0_2:
980 case CONSTR_2_0:
981 size = sizeW_fromITBL(info);
982 break;
983
984 case IND:
985 // Special case/Delicate Hack: INDs don't normally
986 // appear, since we're doing this heap census right
987 // after GC. However, GarbageCollect() also does
988 // resurrectThreads(), which can update some
989 // blackholes when it calls raiseAsync() on the
990 // resurrected threads. So we know that any IND will
991 // be the size of a BLACKHOLE.
992 size = BLACKHOLE_sizeW();
993 break;
994
995 case BCO:
996 prim = rtsTrue;
997 size = bco_sizeW((StgBCO *)p);
998 break;
999
1000 case MVAR_CLEAN:
1001 case MVAR_DIRTY:
1002 case TVAR:
1003 case WEAK:
1004 case PRIM:
1005 case MUT_PRIM:
1006 case MUT_VAR_CLEAN:
1007 case MUT_VAR_DIRTY:
1008 prim = rtsTrue;
1009 size = sizeW_fromITBL(info);
1010 break;
1011
1012 case AP:
1013 size = ap_sizeW((StgAP *)p);
1014 break;
1015
1016 case PAP:
1017 size = pap_sizeW((StgPAP *)p);
1018 break;
1019
1020 case AP_STACK:
1021 size = ap_stack_sizeW((StgAP_STACK *)p);
1022 break;
1023
1024 case ARR_WORDS:
1025 prim = rtsTrue;
1026 size = arr_words_sizeW((StgArrBytes*)p);
1027 break;
1028
1029 case MUT_ARR_PTRS_CLEAN:
1030 case MUT_ARR_PTRS_DIRTY:
1031 case MUT_ARR_PTRS_FROZEN:
1032 case MUT_ARR_PTRS_FROZEN0:
1033 prim = rtsTrue;
1034 size = mut_arr_ptrs_sizeW((StgMutArrPtrs *)p);
1035 break;
1036
1037 case SMALL_MUT_ARR_PTRS_CLEAN:
1038 case SMALL_MUT_ARR_PTRS_DIRTY:
1039 case SMALL_MUT_ARR_PTRS_FROZEN:
1040 case SMALL_MUT_ARR_PTRS_FROZEN0:
1041 prim = rtsTrue;
1042 size = small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs *)p);
1043 break;
1044
1045 case TSO:
1046 prim = rtsTrue;
1047 #ifdef PROFILING
1048 if (RtsFlags.ProfFlags.includeTSOs) {
1049 size = sizeofW(StgTSO);
1050 break;
1051 } else {
1052 // Skip this TSO and move on to the next object
1053 p += sizeofW(StgTSO);
1054 continue;
1055 }
1056 #else
1057 size = sizeofW(StgTSO);
1058 break;
1059 #endif
1060
1061 case STACK:
1062 prim = rtsTrue;
1063 #ifdef PROFILING
1064 if (RtsFlags.ProfFlags.includeTSOs) {
1065 size = stack_sizeW((StgStack*)p);
1066 break;
1067 } else {
1068 // Skip this TSO and move on to the next object
1069 p += stack_sizeW((StgStack*)p);
1070 continue;
1071 }
1072 #else
1073 size = stack_sizeW((StgStack*)p);
1074 break;
1075 #endif
1076
1077 case TREC_CHUNK:
1078 prim = rtsTrue;
1079 size = sizeofW(StgTRecChunk);
1080 break;
1081
1082 default:
1083 barf("heapCensus, unknown object: %d", info->type);
1084 }
1085
1086 heapProfObject(census,(StgClosure*)p,size,prim);
1087
1088 p += size;
1089 }
1090 }
1091 }
1092
1093 void heapCensus (Time t)
1094 {
1095 nat g, n;
1096 Census *census;
1097 gen_workspace *ws;
1098
1099 census = &censuses[era];
1100 census->time = mut_user_time_until(t);
1101
1102 // calculate retainer sets if necessary
1103 #ifdef PROFILING
1104 if (doingRetainerProfiling()) {
1105 retainerProfile();
1106 }
1107 #endif
1108
1109 #ifdef PROFILING
1110 stat_startHeapCensus();
1111 #endif
1112
1113 // Traverse the heap, collecting the census info
1114 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
1115 heapCensusChain( census, generations[g].blocks );
1116 // Are we interested in large objects? might be
1117 // confusing to include the stack in a heap profile.
1118 heapCensusChain( census, generations[g].large_objects );
1119
1120 for (n = 0; n < n_capabilities; n++) {
1121 ws = &gc_threads[n]->gens[g];
1122 heapCensusChain(census, ws->todo_bd);
1123 heapCensusChain(census, ws->part_list);
1124 heapCensusChain(census, ws->scavd_list);
1125 }
1126 }
1127
1128 // dump out the census info
1129 #ifdef PROFILING
1130 // We can't generate any info for LDV profiling until
1131 // the end of the run...
1132 if (!doingLDVProfiling())
1133 dumpCensus( census );
1134 #else
1135 dumpCensus( census );
1136 #endif
1137
1138
1139 // free our storage, unless we're keeping all the census info for
1140 // future restriction by biography.
1141 #ifdef PROFILING
1142 if (RtsFlags.ProfFlags.bioSelector == NULL)
1143 {
1144 freeEra(census);
1145 census->hash = NULL;
1146 census->arena = NULL;
1147 }
1148 #endif
1149
1150 // we're into the next time period now
1151 nextEra();
1152
1153 #ifdef PROFILING
1154 stat_endHeapCensus();
1155 #endif
1156 }