8c24b299b6fa00fe612cc24d4cd96744e847d4be
[ghc.git] / rts / Stats.c
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 1998-2005
4 *
5 * Statistics and timing-related functions.
6 *
7 * ---------------------------------------------------------------------------*/
8
9 #include "Rts.h"
10 #include "RtsFlags.h"
11 #include "RtsUtils.h"
12 #include "MBlock.h"
13 #include "Storage.h"
14 #include "Schedule.h"
15 #include "Stats.h"
16 #include "ParTicky.h" /* ToDo: move into Rts.h */
17 #include "Profiling.h"
18 #include "GetTime.h"
19 #include "GC.h"
20 #include "GCUtils.h"
21 #include "Evac.h"
22
23 #if USE_PAPI
24 #include "Papi.h"
25 #endif
26
27 /* huh? */
28 #define BIG_STRING_LEN 512
29
30 #define TICK_TO_DBL(t) ((double)(t) / TICKS_PER_SECOND)
31
32 static Ticks ElapsedTimeStart = 0;
33
34 static Ticks InitUserTime = 0;
35 static Ticks InitElapsedTime = 0;
36 static Ticks InitElapsedStamp = 0;
37
38 static Ticks MutUserTime = 0;
39 static Ticks MutElapsedTime = 0;
40 static Ticks MutElapsedStamp = 0;
41
42 static Ticks ExitUserTime = 0;
43 static Ticks ExitElapsedTime = 0;
44
45 static ullong GC_tot_alloc = 0;
46 static ullong GC_tot_copied = 0;
47
48 static Ticks GC_start_time = 0, GC_tot_time = 0; /* User GC Time */
49 static Ticks GCe_start_time = 0, GCe_tot_time = 0; /* Elapsed GC time */
50
51 #ifdef PROFILING
52 static Ticks RP_start_time = 0, RP_tot_time = 0; /* retainer prof user time */
53 static Ticks RPe_start_time = 0, RPe_tot_time = 0; /* retainer prof elap time */
54
55 static Ticks HC_start_time, HC_tot_time = 0; // heap census prof user time
56 static Ticks HCe_start_time, HCe_tot_time = 0; // heap census prof elap time
57 #endif
58
59 #ifdef PROFILING
60 #define PROF_VAL(x) (x)
61 #else
62 #define PROF_VAL(x) 0
63 #endif
64
65 static lnat MaxResidency = 0; // in words; for stats only
66 static lnat AvgResidency = 0;
67 static lnat ResidencySamples = 0; // for stats only
68
69 static lnat GC_start_faults = 0, GC_end_faults = 0;
70
71 static Ticks *GC_coll_times;
72 static Ticks *GC_coll_etimes;
73
74 static void statsFlush( void );
75 static void statsClose( void );
76
77 Ticks stat_getElapsedGCTime(void)
78 {
79 return GCe_tot_time;
80 }
81
82 Ticks stat_getElapsedTime(void)
83 {
84 return getProcessElapsedTime() - ElapsedTimeStart;
85 }
86
87 /* mut_user_time_during_GC() and mut_user_time()
88 *
89 * The former function can be used to get the current mutator time
90 * *during* a GC, i.e. between stat_startGC and stat_endGC. This is
91 * used in the heap profiler for accurately time stamping the heap
92 * sample.
93 *
94 * ATTENTION: mut_user_time_during_GC() relies on GC_start_time being
95 * defined in stat_startGC() - to minimise system calls,
96 * GC_start_time is, however, only defined when really needed (check
97 * stat_startGC() for details)
98 */
99 double
100 mut_user_time_during_GC( void )
101 {
102 return TICK_TO_DBL(GC_start_time - GC_tot_time - PROF_VAL(RP_tot_time + HC_tot_time));
103 }
104
105 double
106 mut_user_time( void )
107 {
108 Ticks user;
109 user = getProcessCPUTime();
110 return TICK_TO_DBL(user - GC_tot_time - PROF_VAL(RP_tot_time + HC_tot_time));
111 }
112
113 #ifdef PROFILING
114 /*
115 mut_user_time_during_RP() is similar to mut_user_time_during_GC();
116 it returns the MUT time during retainer profiling.
117 The same is for mut_user_time_during_HC();
118 */
119 double
120 mut_user_time_during_RP( void )
121 {
122 return TICK_TO_DBL(RP_start_time - GC_tot_time - RP_tot_time - HC_tot_time);
123 }
124
125 double
126 mut_user_time_during_heap_census( void )
127 {
128 return TICK_TO_DBL(HC_start_time - GC_tot_time - RP_tot_time - HC_tot_time);
129 }
130 #endif /* PROFILING */
131
132 void
133 initStats(void)
134 {
135 nat i;
136
137 if (RtsFlags.GcFlags.giveStats >= VERBOSE_GC_STATS) {
138 statsPrintf(" Alloc Copied Live GC GC TOT TOT Page Flts\n");
139 statsPrintf(" bytes bytes bytes user elap user elap\n");
140 }
141 GC_coll_times =
142 (Ticks *)stgMallocBytes(
143 sizeof(Ticks)*RtsFlags.GcFlags.generations,
144 "initStats");
145 GC_coll_etimes =
146 (Ticks *)stgMallocBytes(
147 sizeof(Ticks)*RtsFlags.GcFlags.generations,
148 "initStats");
149 for (i = 0; i < RtsFlags.GcFlags.generations; i++) {
150 GC_coll_times[i] = 0;
151 GC_coll_etimes[i] = 0;
152 }
153 }
154
155 /* -----------------------------------------------------------------------------
156 Initialisation time...
157 -------------------------------------------------------------------------- */
158
159 void
160 stat_startInit(void)
161 {
162 Ticks elapsed;
163
164 elapsed = getProcessElapsedTime();
165 ElapsedTimeStart = elapsed;
166 }
167
168 void
169 stat_endInit(void)
170 {
171 Ticks user, elapsed;
172
173 getProcessTimes(&user, &elapsed);
174
175 InitUserTime = user;
176 InitElapsedStamp = elapsed;
177 if (ElapsedTimeStart > elapsed) {
178 InitElapsedTime = 0;
179 } else {
180 InitElapsedTime = elapsed - ElapsedTimeStart;
181 }
182 #if USE_PAPI
183 /* We start counting events for the mutator
184 * when garbage collection starts
185 * we switch to the GC event set. */
186 papi_start_mutator_count();
187
188 /* This flag is needed to avoid counting the last GC */
189 papi_is_reporting = 1;
190
191 #endif
192 }
193
194 /* -----------------------------------------------------------------------------
195 stat_startExit and stat_endExit
196
197 These two measure the time taken in shutdownHaskell().
198 -------------------------------------------------------------------------- */
199
200 void
201 stat_startExit(void)
202 {
203 Ticks user, elapsed;
204
205 getProcessTimes(&user, &elapsed);
206
207 MutElapsedStamp = elapsed;
208 MutElapsedTime = elapsed - GCe_tot_time -
209 PROF_VAL(RPe_tot_time + HCe_tot_time) - InitElapsedStamp;
210 if (MutElapsedTime < 0) { MutElapsedTime = 0; } /* sometimes -0.00 */
211
212 MutUserTime = user - GC_tot_time - PROF_VAL(RP_tot_time + HC_tot_time) - InitUserTime;
213 if (MutUserTime < 0) { MutUserTime = 0; }
214
215 #if USE_PAPI
216 /* We stop counting mutator events
217 * GC events are not being counted at this point */
218 papi_stop_mutator_count();
219
220 /* This flag is needed, because GC is run once more after this function */
221 papi_is_reporting = 0;
222
223 #endif
224 }
225
226 void
227 stat_endExit(void)
228 {
229 Ticks user, elapsed;
230
231 getProcessTimes(&user, &elapsed);
232
233 ExitUserTime = user - MutUserTime - GC_tot_time - PROF_VAL(RP_tot_time + HC_tot_time) - InitUserTime;
234 ExitElapsedTime = elapsed - MutElapsedStamp;
235 if (ExitUserTime < 0) {
236 ExitUserTime = 0;
237 }
238 if (ExitElapsedTime < 0) {
239 ExitElapsedTime = 0;
240 }
241 }
242
243 /* -----------------------------------------------------------------------------
244 Called at the beginning of each GC
245 -------------------------------------------------------------------------- */
246
247 static nat rub_bell = 0;
248
249 /* initialise global variables needed during GC
250 *
251 * * GC_start_time is read in mut_user_time_during_GC(), which in turn is
252 * needed if either PROFILING or DEBUGing is enabled
253 */
254 void
255 stat_startGC(void)
256 {
257 nat bell = RtsFlags.GcFlags.ringBell;
258
259 if (bell) {
260 if (bell > 1) {
261 debugBelch(" GC ");
262 rub_bell = 1;
263 } else {
264 debugBelch("\007");
265 }
266 }
267
268 #if defined(PROFILING) || defined(DEBUG)
269 GC_start_time = getProcessCPUTime(); // needed in mut_user_time_during_GC()
270 #endif
271
272 if (RtsFlags.GcFlags.giveStats != NO_GC_STATS) {
273 #if !defined(PROFILING) && !defined(DEBUG)
274 GC_start_time = getProcessCPUTime();
275 #endif
276 GCe_start_time = getProcessElapsedTime();
277 if (RtsFlags.GcFlags.giveStats) {
278 GC_start_faults = getPageFaults();
279 }
280 }
281
282 #if USE_PAPI
283 if(papi_is_reporting) {
284 /* Switch to counting GC events */
285 papi_stop_mutator_count();
286 papi_start_gc_count();
287 }
288 #endif
289
290 }
291
292 /* -----------------------------------------------------------------------------
293 Called at the end of each GC
294 -------------------------------------------------------------------------- */
295
296 void
297 stat_endGC (lnat alloc, lnat live, lnat copied, lnat gen)
298 {
299 if (RtsFlags.GcFlags.giveStats != NO_GC_STATS) {
300 Ticks time, etime, gc_time, gc_etime;
301
302 getProcessTimes(&time, &etime);
303 gc_time = time - GC_start_time;
304 gc_etime = etime - GCe_start_time;
305
306 if (RtsFlags.GcFlags.giveStats == VERBOSE_GC_STATS) {
307 nat faults = getPageFaults();
308
309 statsPrintf("%9ld %9ld %9ld",
310 alloc*sizeof(W_), copied*sizeof(W_),
311 live*sizeof(W_));
312 statsPrintf(" %5.2f %5.2f %7.2f %7.2f %4ld %4ld (Gen: %2ld)\n",
313 TICK_TO_DBL(gc_time),
314 TICK_TO_DBL(gc_etime),
315 TICK_TO_DBL(time),
316 TICK_TO_DBL(etime - ElapsedTimeStart),
317 faults - GC_start_faults,
318 GC_start_faults - GC_end_faults,
319 gen);
320
321 GC_end_faults = faults;
322 statsFlush();
323 }
324
325 GC_coll_times[gen] += gc_time;
326 GC_coll_etimes[gen] += gc_etime;
327
328 GC_tot_copied += (ullong) copied;
329 GC_tot_alloc += (ullong) alloc;
330 GC_tot_time += gc_time;
331 GCe_tot_time += gc_etime;
332
333 #if defined(THREADED_RTS)
334 {
335 Task *task;
336 if ((task = myTask()) != NULL) {
337 task->gc_time += gc_time;
338 task->gc_etime += gc_etime;
339 }
340 }
341 #endif
342
343 if (gen == RtsFlags.GcFlags.generations-1) { /* major GC? */
344 if (live > MaxResidency) {
345 MaxResidency = live;
346 }
347 ResidencySamples++;
348 AvgResidency += live;
349 }
350 }
351
352 if (rub_bell) {
353 debugBelch("\b\b\b \b\b\b");
354 rub_bell = 0;
355 }
356
357 #if USE_PAPI
358 if(papi_is_reporting) {
359 /* Switch to counting mutator events */
360 if (gen == 0) {
361 papi_stop_gc0_count();
362 } else {
363 papi_stop_gc1_count();
364 }
365 papi_start_mutator_count();
366 }
367 #endif
368 }
369
370 /* -----------------------------------------------------------------------------
371 Called at the beginning of each Retainer Profiliing
372 -------------------------------------------------------------------------- */
373 #ifdef PROFILING
374 void
375 stat_startRP(void)
376 {
377 Ticks user, elapsed;
378 getProcessTimes( &user, &elapsed );
379
380 RP_start_time = user;
381 RPe_start_time = elapsed;
382 }
383 #endif /* PROFILING */
384
385 /* -----------------------------------------------------------------------------
386 Called at the end of each Retainer Profiliing
387 -------------------------------------------------------------------------- */
388
389 #ifdef PROFILING
390 void
391 stat_endRP(
392 nat retainerGeneration,
393 #ifdef DEBUG_RETAINER
394 nat maxCStackSize,
395 int maxStackSize,
396 #endif
397 double averageNumVisit)
398 {
399 Ticks user, elapsed;
400 getProcessTimes( &user, &elapsed );
401
402 RP_tot_time += user - RP_start_time;
403 RPe_tot_time += elapsed - RPe_start_time;
404
405 fprintf(prof_file, "Retainer Profiling: %d, at %f seconds\n",
406 retainerGeneration, mut_user_time_during_RP());
407 #ifdef DEBUG_RETAINER
408 fprintf(prof_file, "\tMax C stack size = %u\n", maxCStackSize);
409 fprintf(prof_file, "\tMax auxiliary stack size = %u\n", maxStackSize);
410 #endif
411 fprintf(prof_file, "\tAverage number of visits per object = %f\n", averageNumVisit);
412 }
413 #endif /* PROFILING */
414
415 /* -----------------------------------------------------------------------------
416 Called at the beginning of each heap census
417 -------------------------------------------------------------------------- */
418 #ifdef PROFILING
419 void
420 stat_startHeapCensus(void)
421 {
422 Ticks user, elapsed;
423 getProcessTimes( &user, &elapsed );
424
425 HC_start_time = user;
426 HCe_start_time = elapsed;
427 }
428 #endif /* PROFILING */
429
430 /* -----------------------------------------------------------------------------
431 Called at the end of each heap census
432 -------------------------------------------------------------------------- */
433 #ifdef PROFILING
434 void
435 stat_endHeapCensus(void)
436 {
437 Ticks user, elapsed;
438 getProcessTimes( &user, &elapsed );
439
440 HC_tot_time += user - HC_start_time;
441 HCe_tot_time += elapsed - HCe_start_time;
442 }
443 #endif /* PROFILING */
444
445 /* -----------------------------------------------------------------------------
446 Called at the end of execution
447
448 NOTE: number of allocations is not entirely accurate: it doesn't
449 take into account the few bytes at the end of the heap that
450 were left unused when the heap-check failed.
451 -------------------------------------------------------------------------- */
452
453 #ifdef DEBUG
454 #define TICK_VAR(arity) \
455 extern StgInt SLOW_CALLS_##arity; \
456 extern StgInt RIGHT_ARITY_##arity; \
457 extern StgInt TAGGED_PTR_##arity;
458
459 #define TICK_VAR_INI(arity) \
460 StgInt SLOW_CALLS_##arity = 1; \
461 StgInt RIGHT_ARITY_##arity = 1; \
462 StgInt TAGGED_PTR_##arity = 0;
463
464 extern StgInt TOTAL_CALLS;
465
466 TICK_VAR(1)
467 TICK_VAR(2)
468
469 TICK_VAR_INI(1)
470 TICK_VAR_INI(2)
471
472 StgInt TOTAL_CALLS=1;
473 #endif
474
475 /* Report the value of a counter */
476 #define REPORT(counter) \
477 { \
478 ullong_format_string(counter,temp,rtsTrue/*commas*/); \
479 statsPrintf(" (" #counter ") : %s\n",temp); \
480 }
481
482 /* Report the value of a counter as a percentage of another counter */
483 #define REPORT_PCT(counter,countertot) \
484 statsPrintf(" (" #counter ") %% of (" #countertot ") : %.1f%%\n", \
485 counter*100.0/countertot)
486
487 #define TICK_PRINT(arity) \
488 REPORT(SLOW_CALLS_##arity); \
489 REPORT_PCT(RIGHT_ARITY_##arity,SLOW_CALLS_##arity); \
490 REPORT_PCT(TAGGED_PTR_##arity,RIGHT_ARITY_##arity); \
491 REPORT(RIGHT_ARITY_##arity); \
492 REPORT(TAGGED_PTR_##arity)
493
494 #define TICK_PRINT_TOT(arity) \
495 statsPrintf(" (SLOW_CALLS_" #arity ") %% of (TOTAL_CALLS) : %.1f%%\n", \
496 SLOW_CALLS_##arity * 100.0/TOTAL_CALLS)
497
498
499 void
500 stat_exit(int alloc)
501 {
502 if (RtsFlags.GcFlags.giveStats != NO_GC_STATS) {
503
504 char temp[BIG_STRING_LEN];
505 Ticks time;
506 Ticks etime;
507 nat g, total_collections = 0;
508
509 getProcessTimes( &time, &etime );
510 etime -= ElapsedTimeStart;
511
512 GC_tot_alloc += alloc;
513
514 /* Count total garbage collections */
515 for (g = 0; g < RtsFlags.GcFlags.generations; g++)
516 total_collections += generations[g].collections;
517
518 /* avoid divide by zero if time is measured as 0.00 seconds -- SDM */
519 if (time == 0.0) time = 1;
520 if (etime == 0.0) etime = 1;
521
522 if (RtsFlags.GcFlags.giveStats >= VERBOSE_GC_STATS) {
523 statsPrintf("%9ld %9.9s %9.9s", (lnat)alloc*sizeof(W_), "", "");
524 statsPrintf(" %5.2f %5.2f\n\n", 0.0, 0.0);
525 }
526
527 if (RtsFlags.GcFlags.giveStats >= SUMMARY_GC_STATS) {
528 ullong_format_string(GC_tot_alloc*sizeof(W_),
529 temp, rtsTrue/*commas*/);
530 statsPrintf("%11s bytes allocated in the heap\n", temp);
531
532 ullong_format_string(GC_tot_copied*sizeof(W_),
533 temp, rtsTrue/*commas*/);
534 statsPrintf("%11s bytes copied during GC\n", temp);
535
536 if ( ResidencySamples > 0 ) {
537 ullong_format_string(MaxResidency*sizeof(W_),
538 temp, rtsTrue/*commas*/);
539 statsPrintf("%11s bytes maximum residency (%ld sample(s))\n",
540 temp, ResidencySamples);
541 }
542 statsPrintf("\n");
543
544 /* Print garbage collections in each gen */
545 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
546 statsPrintf("%11d collections in generation %d, %6.2fs, %6.2fs elapsed\n",
547 generations[g].collections, g,
548 TICK_TO_DBL(GC_coll_times[g]),
549 TICK_TO_DBL(GC_coll_etimes[g]));
550 }
551
552 statsPrintf("\n%11ld MB total memory in use\n\n",
553 mblocks_allocated * MBLOCK_SIZE / (1024 * 1024));
554
555 #if defined(THREADED_RTS)
556 {
557 nat i;
558 Task *task;
559 for (i = 0, task = all_tasks;
560 task != NULL;
561 i++, task = task->all_link) {
562 statsPrintf(" Task %2d %-8s : MUT time: %6.2fs (%6.2fs elapsed)\n"
563 " GC time: %6.2fs (%6.2fs elapsed)\n\n",
564 i,
565 (task->tso == NULL) ? "(worker)" : "(bound)",
566 TICK_TO_DBL(task->mut_time),
567 TICK_TO_DBL(task->mut_etime),
568 TICK_TO_DBL(task->gc_time),
569 TICK_TO_DBL(task->gc_etime));
570 }
571 }
572 #endif
573
574 statsPrintf(" INIT time %6.2fs (%6.2fs elapsed)\n",
575 TICK_TO_DBL(InitUserTime), TICK_TO_DBL(InitElapsedTime));
576 statsPrintf(" MUT time %6.2fs (%6.2fs elapsed)\n",
577 TICK_TO_DBL(MutUserTime), TICK_TO_DBL(MutElapsedTime));
578 statsPrintf(" GC time %6.2fs (%6.2fs elapsed)\n",
579 TICK_TO_DBL(GC_tot_time), TICK_TO_DBL(GCe_tot_time));
580 #ifdef PROFILING
581 statsPrintf(" RP time %6.2fs (%6.2fs elapsed)\n",
582 TICK_TO_DBL(RP_tot_time), TICK_TO_DBL(RPe_tot_time));
583 statsPrintf(" PROF time %6.2fs (%6.2fs elapsed)\n",
584 TICK_TO_DBL(HC_tot_time), TICK_TO_DBL(HCe_tot_time));
585 #endif
586 statsPrintf(" EXIT time %6.2fs (%6.2fs elapsed)\n",
587 TICK_TO_DBL(ExitUserTime), TICK_TO_DBL(ExitElapsedTime));
588 statsPrintf(" Total time %6.2fs (%6.2fs elapsed)\n\n",
589 TICK_TO_DBL(time), TICK_TO_DBL(etime));
590 statsPrintf(" %%GC time %5.1f%% (%.1f%% elapsed)\n\n",
591 TICK_TO_DBL(GC_tot_time)*100/TICK_TO_DBL(time),
592 TICK_TO_DBL(GCe_tot_time)*100/TICK_TO_DBL(etime));
593
594 if (time - GC_tot_time - PROF_VAL(RP_tot_time + HC_tot_time) == 0)
595 ullong_format_string(0, temp, rtsTrue/*commas*/);
596 else
597 ullong_format_string(
598 (ullong)((GC_tot_alloc*sizeof(W_))/
599 TICK_TO_DBL(time - GC_tot_time -
600 PROF_VAL(RP_tot_time + HC_tot_time))),
601 temp, rtsTrue/*commas*/);
602
603 statsPrintf(" Alloc rate %s bytes per MUT second\n\n", temp);
604
605 statsPrintf(" Productivity %5.1f%% of total user, %.1f%% of total elapsed\n\n",
606 TICK_TO_DBL(time - GC_tot_time -
607 PROF_VAL(RP_tot_time + HC_tot_time) - InitUserTime) * 100
608 / TICK_TO_DBL(time),
609 TICK_TO_DBL(time - GC_tot_time -
610 PROF_VAL(RP_tot_time + HC_tot_time) - InitUserTime) * 100
611 / TICK_TO_DBL(etime));
612
613 /*
614 TICK_PRINT(1);
615 TICK_PRINT(2);
616 REPORT(TOTAL_CALLS);
617 TICK_PRINT_TOT(1);
618 TICK_PRINT_TOT(2);
619 */
620
621 #if USE_PAPI
622 papi_stats_report();
623 #endif
624 }
625
626 if (RtsFlags.GcFlags.giveStats == ONELINE_GC_STATS) {
627 /* print the long long separately to avoid bugginess on mingwin (2001-07-02, mingw-0.5) */
628 statsPrintf("<<ghc: %llu bytes, ", GC_tot_alloc*(ullong)sizeof(W_));
629 statsPrintf("%d GCs, %ld/%ld avg/max bytes residency (%ld samples), %luM in use, %.2f INIT (%.2f elapsed), %.2f MUT (%.2f elapsed), %.2f GC (%.2f elapsed) :ghc>>\n",
630 total_collections,
631 ResidencySamples == 0 ? 0 :
632 AvgResidency*sizeof(W_)/ResidencySamples,
633 MaxResidency*sizeof(W_),
634 ResidencySamples,
635 (unsigned long)(mblocks_allocated * MBLOCK_SIZE / (1024L * 1024L)),
636 TICK_TO_DBL(InitUserTime), TICK_TO_DBL(InitElapsedTime),
637 TICK_TO_DBL(MutUserTime), TICK_TO_DBL(MutElapsedTime),
638 TICK_TO_DBL(GC_tot_time), TICK_TO_DBL(GCe_tot_time));
639 }
640
641 statsFlush();
642 statsClose();
643 }
644 if (GC_coll_times)
645 stgFree(GC_coll_times);
646 GC_coll_times = NULL;
647
648 #if defined(THREADED_RTS) && defined(PROF_SPIN)
649 {
650 nat g, s;
651
652 statsPrintf("recordMutableGen_sync: %"FMT_Word64"\n", recordMutableGen_sync.spin);
653 statsPrintf("gc_alloc_block_sync: %"FMT_Word64"\n", gc_alloc_block_sync.spin);
654 statsPrintf("static_objects_sync: %"FMT_Word64"\n", static_objects_sync.spin);
655 statsPrintf("whitehole_spin: %"FMT_Word64"\n", whitehole_spin);
656 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
657 for (s = 0; s < generations[g].n_steps; s++) {
658 statsPrintf("gen[%d].steps[%d].sync_todo: %"FMT_Word64"\n", g, s, generations[g].steps[s].sync_todo.spin);
659 statsPrintf("gen[%d].steps[%d].sync_large_objects: %"FMT_Word64"\n", g, s, generations[g].steps[s].sync_large_objects.spin);
660 }
661 }
662 }
663 #endif
664 }
665
666 /* -----------------------------------------------------------------------------
667 stat_describe_gens
668
669 Produce some detailed info on the state of the generational GC.
670 -------------------------------------------------------------------------- */
671 #ifdef DEBUG
672 void
673 statDescribeGens(void)
674 {
675 nat g, s, mut, lge;
676 lnat live;
677 bdescr *bd;
678 step *step;
679
680 debugBelch(
681 " Gen Steps Max Mut-list Step Blocks Live Large\n"
682 " Blocks Bytes Objects\n");
683
684 mut = 0;
685 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
686 for (bd = generations[g].mut_list; bd != NULL; bd = bd->link) {
687 mut += (bd->free - bd->start) * sizeof(W_);
688 }
689
690 debugBelch("%8d %8d %8d %9d", g, generations[g].n_steps,
691 generations[g].max_blocks, mut);
692
693 for (s = 0; s < generations[g].n_steps; s++) {
694 step = &generations[g].steps[s];
695 live = 0;
696 for (bd = step->large_objects, lge = 0; bd; bd = bd->link) {
697 lge++;
698 }
699 // This live figure will be slightly less that the "live" figure
700 // given by +RTS -Sstderr, because we take don't count the
701 // slop at the end of each block.
702 live += countOccupied(step->blocks) + countOccupied(step->large_objects);
703 if (s != 0) {
704 debugBelch("%36s","");
705 }
706 debugBelch("%6d %8d %8ld %8d\n", s, step->n_blocks,
707 live, lge);
708 }
709 }
710 debugBelch("\n");
711 }
712 #endif
713
714 /* -----------------------------------------------------------------------------
715 Stats available via a programmatic interface, so eg. GHCi can time
716 each compilation and expression evaluation.
717 -------------------------------------------------------------------------- */
718
719 extern HsInt64 getAllocations( void )
720 { return (HsInt64)total_allocated * sizeof(W_); }
721
722 /* -----------------------------------------------------------------------------
723 Dumping stuff in the stats file, or via the debug message interface
724 -------------------------------------------------------------------------- */
725
726 void
727 statsPrintf( char *s, ... )
728 {
729 FILE *sf = RtsFlags.GcFlags.statsFile;
730 va_list ap;
731
732 va_start(ap,s);
733 if (sf == NULL) {
734 vdebugBelch(s,ap);
735 } else {
736 vfprintf(sf, s, ap);
737 }
738 va_end(ap);
739 }
740
741 static void
742 statsFlush( void )
743 {
744 FILE *sf = RtsFlags.GcFlags.statsFile;
745 if (sf != NULL) {
746 fflush(sf);
747 }
748 }
749
750 static void
751 statsClose( void )
752 {
753 FILE *sf = RtsFlags.GcFlags.statsFile;
754 if (sf != NULL) {
755 fclose(sf);
756 }
757 }