1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team 2006-2009
5 * Debug and performance tracing
7 * ---------------------------------------------------------------------------*/
20 #include "eventlog/EventLog.h"
21 #include "rts/EventLogWriter.h"
26 #if defined(HAVE_UNISTD_H)
33 int TRACE_nonmoving_gc
;
34 int TRACE_spark_sampled
;
39 #if defined(THREADED_RTS)
40 static Mutex trace_utx
;
43 /* ---------------------------------------------------------------------------
44 Starting up / shutting down the tracing facilities
45 --------------------------------------------------------------------------- */
47 void initTracing (void)
49 #if defined(THREADED_RTS)
50 initMutex(&trace_utx
);
53 // -Ds turns on scheduler tracing too
55 RtsFlags
.TraceFlags
.scheduler
||
56 RtsFlags
.DebugFlags
.scheduler
;
58 // -Dg turns on gc tracing too
60 RtsFlags
.TraceFlags
.gc
||
61 RtsFlags
.DebugFlags
.gc
||
62 RtsFlags
.DebugFlags
.scheduler
;
63 if (TRACE_gc
&& RtsFlags
.GcFlags
.giveStats
== NO_GC_STATS
) {
64 RtsFlags
.GcFlags
.giveStats
= COLLECT_GC_STATS
;
68 RtsFlags
.TraceFlags
.nonmoving_gc
;
71 RtsFlags
.TraceFlags
.sparks_sampled
;
73 // -Dr turns on full spark tracing
75 RtsFlags
.TraceFlags
.sparks_full
||
76 RtsFlags
.DebugFlags
.sparks
;
79 RtsFlags
.TraceFlags
.user
;
81 // We trace cap events if we're tracing anything else
85 TRACE_spark_sampled
||
89 /* Note: we can have any of the TRACE_* flags turned on even when
90 eventlog_enabled is off. In the DEBUG way we may be tracing to stderr.
94 if (RtsFlags
.TraceFlags
.tracing
== TRACE_EVENTLOG
95 && rtsConfig
.eventlog_writer
!= NULL
) {
96 startEventLogging(rtsConfig
.eventlog_writer
);
100 void endTracing (void)
102 if (eventlog_enabled
) {
107 void freeTracing (void)
109 if (eventlog_enabled
) {
114 // Used to reset tracing in a forked child
115 void resetTracing (void)
117 restartEventLogging();
120 void flushTrace (void)
122 if (eventlog_enabled
) {
127 void tracingAddCapapilities (uint32_t from
, uint32_t to
)
129 if (eventlog_enabled
) {
130 moreCapEventBufs(from
,to
);
134 /* ---------------------------------------------------------------------------
135 Emitting trace messages/events
136 --------------------------------------------------------------------------- */
139 static void tracePreface (void)
141 #if defined(THREADED_RTS)
142 debugBelch("%12lx: ", (unsigned long)osThreadId());
144 if (RtsFlags
.TraceFlags
.timestamp
) {
145 debugBelch("%9" FMT_Word64
": ", stat_getElapsedTime());
151 static char *thread_stop_reasons
[] = {
152 [HeapOverflow
] = "heap overflow",
153 [StackOverflow
] = "stack overflow",
154 [ThreadYielding
] = "yielding",
155 [ThreadBlocked
] = "blocked",
156 [ThreadFinished
] = "finished",
157 [THREAD_SUSPENDED_FOREIGN_CALL
] = "suspended while making a foreign call",
158 [6 + BlockedOnMVar
] = "blocked on an MVar",
159 [6 + BlockedOnMVarRead
] = "blocked on an atomic MVar read",
160 [6 + BlockedOnBlackHole
] = "blocked on a black hole",
161 [6 + BlockedOnRead
] = "blocked on a read operation",
162 [6 + BlockedOnWrite
] = "blocked on a write operation",
163 [6 + BlockedOnDelay
] = "blocked on a delay operation",
164 [6 + BlockedOnSTM
] = "blocked on STM",
165 [6 + BlockedOnDoProc
] = "blocked on asyncDoProc",
166 [6 + BlockedOnCCall
] = "blocked on a foreign call",
167 [6 + BlockedOnCCall_Interruptible
] = "blocked on a foreign call (interruptible)",
168 [6 + BlockedOnMsgThrowTo
] = "blocked on throwTo",
169 [6 + ThreadMigrating
] = "migrating"
174 static void traceSchedEvent_stderr (Capability
*cap
, EventTypeNum tag
,
176 StgWord info1 STG_UNUSED
,
177 StgWord info2 STG_UNUSED
)
179 ACQUIRE_LOCK(&trace_utx
);
183 case EVENT_CREATE_THREAD
: // (cap, thread)
184 debugBelch("cap %d: created thread %" FMT_Word
"\n",
185 cap
->no
, (W_
)tso
->id
);
187 case EVENT_RUN_THREAD
: // (cap, thread)
188 debugBelch("cap %d: running thread %" FMT_Word
" (%s)\n",
189 cap
->no
, (W_
)tso
->id
, what_next_strs
[tso
->what_next
]);
191 case EVENT_THREAD_RUNNABLE
: // (cap, thread)
192 debugBelch("cap %d: thread %" FMT_Word
" appended to run queue\n",
193 cap
->no
, (W_
)tso
->id
);
195 case EVENT_MIGRATE_THREAD
: // (cap, thread, new_cap)
196 debugBelch("cap %d: thread %" FMT_Word
" migrating to cap %d\n",
197 cap
->no
, (W_
)tso
->id
, (int)info1
);
199 case EVENT_THREAD_WAKEUP
: // (cap, thread, info1_cap)
200 debugBelch("cap %d: waking up thread %" FMT_Word
" on cap %d\n",
201 cap
->no
, (W_
)tso
->id
, (int)info1
);
204 case EVENT_STOP_THREAD
: // (cap, thread, status)
205 if (info1
== 6 + BlockedOnBlackHole
) {
206 debugBelch("cap %d: thread %" FMT_Word
" stopped (blocked on black hole owned by thread %lu)\n",
207 cap
->no
, (W_
)tso
->id
, (long)info2
);
208 } else if (info1
== StackOverflow
) {
209 debugBelch("cap %d: thead %" FMT_Word
210 " stopped (stack overflow, size %lu)\n",
211 cap
->no
, (W_
)tso
->id
, (long)info2
);
214 debugBelch("cap %d: thread %" FMT_Word
" stopped (%s)\n",
215 cap
->no
, (W_
)tso
->id
, thread_stop_reasons
[info1
]);
219 debugBelch("cap %d: thread %" FMT_Word
": event %d\n\n",
220 cap
->no
, (W_
)tso
->id
, tag
);
224 RELEASE_LOCK(&trace_utx
);
228 void traceSchedEvent_ (Capability
*cap
, EventTypeNum tag
,
229 StgTSO
*tso
, StgWord info1
, StgWord info2
)
232 if (RtsFlags
.TraceFlags
.tracing
== TRACE_STDERR
) {
233 traceSchedEvent_stderr(cap
, tag
, tso
, info1
, info2
);
237 postSchedEvent(cap
,tag
,tso ? tso
->id
: 0, info1
, info2
);
242 static void traceGcEvent_stderr (Capability
*cap
, EventTypeNum tag
)
244 ACQUIRE_LOCK(&trace_utx
);
248 case EVENT_REQUEST_SEQ_GC
: // (cap)
249 debugBelch("cap %d: requesting sequential GC\n", cap
->no
);
251 case EVENT_REQUEST_PAR_GC
: // (cap)
252 debugBelch("cap %d: requesting parallel GC\n", cap
->no
);
254 case EVENT_GC_START
: // (cap)
255 debugBelch("cap %d: starting GC\n", cap
->no
);
257 case EVENT_GC_END
: // (cap)
258 debugBelch("cap %d: finished GC\n", cap
->no
);
260 case EVENT_GC_IDLE
: // (cap)
261 debugBelch("cap %d: GC idle\n", cap
->no
);
263 case EVENT_GC_WORK
: // (cap)
264 debugBelch("cap %d: GC working\n", cap
->no
);
266 case EVENT_GC_DONE
: // (cap)
267 debugBelch("cap %d: GC done\n", cap
->no
);
269 case EVENT_GC_GLOBAL_SYNC
: // (cap)
270 debugBelch("cap %d: all caps stopped for GC\n", cap
->no
);
273 barf("traceGcEvent: unknown event tag %d", tag
);
277 RELEASE_LOCK(&trace_utx
);
281 void traceGcEvent_ (Capability
*cap
, EventTypeNum tag
)
284 if (RtsFlags
.TraceFlags
.tracing
== TRACE_STDERR
) {
285 traceGcEvent_stderr(cap
, tag
);
289 /* currently all GC events are nullary events */
294 void traceGcEventAtT_ (Capability
*cap
, StgWord64 ts
, EventTypeNum tag
)
297 if (RtsFlags
.TraceFlags
.tracing
== TRACE_STDERR
) {
298 traceGcEvent_stderr(cap
, tag
);
302 /* assuming nullary events and explicitly inserting a timestamp */
303 postEventAtTimestamp(cap
, ts
, tag
);
307 void traceHeapEvent_ (Capability
*cap
,
309 CapsetID heap_capset
,
313 if (RtsFlags
.TraceFlags
.tracing
== TRACE_STDERR
) {
314 /* no stderr equivalent for these ones */
318 postHeapEvent(cap
, tag
, heap_capset
, info1
);
322 void traceEventHeapInfo_ (CapsetID heap_capset
,
330 if (RtsFlags
.TraceFlags
.tracing
== TRACE_STDERR
) {
331 /* no stderr equivalent for these ones */
335 postEventHeapInfo(heap_capset
, gens
,
336 maxHeapSize
, allocAreaSize
,
337 mblockSize
, blockSize
);
341 void traceEventGcStats_ (Capability
*cap
,
342 CapsetID heap_capset
,
347 uint32_t par_n_threads
,
350 W_ par_balanced_copied
)
353 if (RtsFlags
.TraceFlags
.tracing
== TRACE_STDERR
) {
354 /* no stderr equivalent for these ones */
358 postEventGcStats(cap
, heap_capset
, gen
,
359 copied
, slop
, fragmentation
,
360 par_n_threads
, par_max_copied
,
361 par_tot_copied
, par_balanced_copied
);
365 void traceCapEvent_ (Capability
*cap
,
369 if (RtsFlags
.TraceFlags
.tracing
== TRACE_STDERR
) {
370 ACQUIRE_LOCK(&trace_utx
);
374 case EVENT_CAP_CREATE
: // (cap)
375 debugBelch("cap %d: initialised\n", cap
->no
);
377 case EVENT_CAP_DELETE
: // (cap)
378 debugBelch("cap %d: shutting down\n", cap
->no
);
380 case EVENT_CAP_ENABLE
: // (cap)
381 debugBelch("cap %d: enabling capability\n", cap
->no
);
383 case EVENT_CAP_DISABLE
: // (cap)
384 debugBelch("cap %d: disabling capability\n", cap
->no
);
387 RELEASE_LOCK(&trace_utx
);
391 if (eventlog_enabled
) {
392 postCapEvent(tag
, (EventCapNo
)cap
->no
);
397 void traceCapsetEvent_ (EventTypeNum tag
,
402 if (RtsFlags
.TraceFlags
.tracing
== TRACE_STDERR
&& TRACE_sched
)
403 // When events go to stderr, it is annoying to see the capset
404 // events every time, so we only emit them with -Ds.
406 ACQUIRE_LOCK(&trace_utx
);
410 case EVENT_CAPSET_CREATE
: // (capset, capset_type)
411 debugBelch("created capset %" FMT_Word32
" of type %d\n", capset
,
414 case EVENT_CAPSET_DELETE
: // (capset)
415 debugBelch("deleted capset %" FMT_Word32
"\n", capset
);
417 case EVENT_CAPSET_ASSIGN_CAP
: // (capset, capno)
418 debugBelch("assigned cap %" FMT_Word
" to capset %" FMT_Word32
"\n",
421 case EVENT_CAPSET_REMOVE_CAP
: // (capset, capno)
422 debugBelch("removed cap %" FMT_Word
" from capset %" FMT_Word32
426 RELEASE_LOCK(&trace_utx
);
430 if (eventlog_enabled
) {
431 postCapsetEvent(tag
, capset
, info
);
436 void traceWallClockTime_(void) {
437 if (eventlog_enabled
) {
438 postWallClockTime(CAPSET_CLOCKDOMAIN_DEFAULT
);
442 void traceOSProcessInfo_(void) {
443 if (eventlog_enabled
) {
444 postCapsetEvent(EVENT_OSPROCESS_PID
,
445 CAPSET_OSPROCESS_DEFAULT
,
448 #if !defined(mingw32_HOST_OS)
449 /* Windows has no strong concept of process hierarchy, so no getppid().
450 * In any case, this trace event is mainly useful for tracing programs
451 * that use 'forkProcess' which Windows doesn't support anyway.
453 postCapsetEvent(EVENT_OSPROCESS_PPID
,
454 CAPSET_OSPROCESS_DEFAULT
,
459 snprintf(buf
, sizeof(buf
), "GHC-%s %s", ProjectVersion
, RtsWay
);
460 postCapsetStrEvent(EVENT_RTS_IDENTIFIER
,
461 CAPSET_OSPROCESS_DEFAULT
,
465 int argc
= 0; char **argv
;
466 getFullProgArgv(&argc
, &argv
);
468 postCapsetVecEvent(EVENT_PROGRAM_ARGS
,
469 CAPSET_OSPROCESS_DEFAULT
,
477 static void traceSparkEvent_stderr (Capability
*cap
, EventTypeNum tag
,
480 ACQUIRE_LOCK(&trace_utx
);
485 case EVENT_CREATE_SPARK_THREAD
: // (cap, spark_thread)
486 debugBelch("cap %d: creating spark thread %lu\n",
487 cap
->no
, (long)info1
);
489 case EVENT_SPARK_CREATE
: // (cap)
490 debugBelch("cap %d: added spark to pool\n",
493 case EVENT_SPARK_DUD
: // (cap)
494 debugBelch("cap %d: discarded dud spark\n",
497 case EVENT_SPARK_OVERFLOW
: // (cap)
498 debugBelch("cap %d: discarded overflowed spark\n",
501 case EVENT_SPARK_RUN
: // (cap)
502 debugBelch("cap %d: running a spark\n",
505 case EVENT_SPARK_STEAL
: // (cap, victim_cap)
506 debugBelch("cap %d: stealing a spark from cap %d\n",
507 cap
->no
, (int)info1
);
509 case EVENT_SPARK_FIZZLE
: // (cap)
510 debugBelch("cap %d: fizzled spark removed from pool\n",
513 case EVENT_SPARK_GC
: // (cap)
514 debugBelch("cap %d: GCd spark removed from pool\n",
518 barf("traceSparkEvent: unknown event tag %d", tag
);
522 RELEASE_LOCK(&trace_utx
);
526 void traceSparkEvent_ (Capability
*cap
, EventTypeNum tag
, StgWord info1
)
529 if (RtsFlags
.TraceFlags
.tracing
== TRACE_STDERR
) {
530 traceSparkEvent_stderr(cap
, tag
, info1
);
534 postSparkEvent(cap
,tag
,info1
);
538 void traceSparkCounters_ (Capability
*cap
,
539 SparkCounters counters
,
543 if (RtsFlags
.TraceFlags
.tracing
== TRACE_STDERR
) {
544 /* we currently don't do debug tracing of spark stats but we must
545 test for TRACE_STDERR because of the !eventlog_enabled case. */
549 postSparkCountersEvent(cap
, counters
, remaining
);
553 void traceTaskCreate_ (Task
*task
,
557 if (RtsFlags
.TraceFlags
.tracing
== TRACE_STDERR
) {
558 /* We currently don't do debug tracing of tasks but we must
559 test for TRACE_STDERR because of the !eventlog_enabled case. */
563 EventTaskId taskid
= serialisableTaskId(task
);
564 EventKernelThreadId tid
= kernelThreadId();
565 postTaskCreateEvent(taskid
, cap
->no
, tid
);
569 void traceTaskMigrate_ (Task
*task
,
574 if (RtsFlags
.TraceFlags
.tracing
== TRACE_STDERR
) {
575 /* We currently don't do debug tracing of tasks but we must
576 test for TRACE_STDERR because of the !eventlog_enabled case. */
580 EventTaskId taskid
= serialisableTaskId(task
);
581 postTaskMigrateEvent(taskid
, cap
->no
, new_cap
->no
);
585 void traceTaskDelete_ (Task
*task
)
588 if (RtsFlags
.TraceFlags
.tracing
== TRACE_STDERR
) {
589 /* We currently don't do debug tracing of tasks but we must
590 test for TRACE_STDERR because of the !eventlog_enabled case. */
594 EventTaskId taskid
= serialisableTaskId(task
);
595 postTaskDeleteEvent(taskid
);
599 void traceHeapProfBegin(StgWord8 profile_id
)
601 if (eventlog_enabled
) {
602 postHeapProfBegin(profile_id
);
605 void traceHeapBioProfSampleBegin(StgInt era
, StgWord64 time
)
607 if (eventlog_enabled
) {
608 postHeapBioProfSampleBegin(era
, time
);
612 void traceHeapProfSampleBegin(StgInt era
)
614 if (eventlog_enabled
) {
615 postHeapProfSampleBegin(era
);
619 void traceHeapProfSampleEnd(StgInt era
)
621 if (eventlog_enabled
) {
622 postHeapProfSampleEnd(era
);
626 void traceHeapProfSampleString(StgWord8 profile_id
,
627 const char *label
, StgWord residency
)
629 if (eventlog_enabled
) {
630 postHeapProfSampleString(profile_id
, label
, residency
);
634 #if defined(PROFILING)
635 void traceHeapProfCostCentre(StgWord32 ccID
,
641 if (eventlog_enabled
) {
642 postHeapProfCostCentre(ccID
, label
, module
, srcloc
, is_caf
);
646 // This one is for .hp samples
647 void traceHeapProfSampleCostCentre(StgWord8 profile_id
,
648 CostCentreStack
*stack
, StgWord residency
)
650 if (eventlog_enabled
) {
651 postHeapProfSampleCostCentre(profile_id
, stack
, residency
);
655 // This one is for .prof samples
656 void traceProfSampleCostCentre(Capability
*cap
,
657 CostCentreStack
*stack
, StgWord tick
)
659 if (eventlog_enabled
) {
660 postProfSampleCostCentre(cap
, stack
, tick
);
663 void traceProfBegin(void)
665 if (eventlog_enabled
) {
672 static void vtraceCap_stderr(Capability
*cap
, char *msg
, va_list ap
)
674 ACQUIRE_LOCK(&trace_utx
);
677 debugBelch("cap %d: ", cap
->no
);
681 RELEASE_LOCK(&trace_utx
);
684 static void traceCap_stderr(Capability
*cap
, char *msg
, ...)
688 vtraceCap_stderr(cap
, msg
, ap
);
693 void traceCap_(Capability
*cap
, char *msg
, ...)
699 if (RtsFlags
.TraceFlags
.tracing
== TRACE_STDERR
) {
700 vtraceCap_stderr(cap
, msg
, ap
);
704 postCapMsg(cap
, msg
, ap
);
711 static void vtrace_stderr(char *msg
, va_list ap
)
713 ACQUIRE_LOCK(&trace_utx
);
719 RELEASE_LOCK(&trace_utx
);
723 void trace_(char *msg
, ...)
729 if (RtsFlags
.TraceFlags
.tracing
== TRACE_STDERR
) {
730 vtrace_stderr(msg
, ap
);
740 void traceUserMsg(Capability
*cap
, char *msg
)
742 /* Note: normally we don't check the TRACE_* flags here as they're checked
743 by the wrappers in Trace.h. But traceUserMsg is special since it has no
744 wrapper (it's called from cmm code), so we check TRACE_user here
747 if (RtsFlags
.TraceFlags
.tracing
== TRACE_STDERR
&& TRACE_user
) {
748 // Use "%s" as format string to ignore format specifiers in msg (#3874).
749 traceCap_stderr(cap
, "%s", msg
);
753 if (eventlog_enabled
&& TRACE_user
) {
754 postUserEvent(cap
, EVENT_USER_MSG
, msg
);
757 dtraceUserMsg(cap
->no
, msg
);
760 void traceUserBinaryMsg(Capability
*cap
, uint8_t *msg
, size_t size
)
762 /* Note: normally we don't check the TRACE_* flags here as they're checked
763 by the wrappers in Trace.h. But traceUserMsg is special since it has no
764 wrapper (it's called from cmm code), so we check TRACE_user here
766 if (eventlog_enabled
&& TRACE_user
) {
767 postUserBinaryEvent(cap
, EVENT_USER_BINARY_MSG
, msg
, size
);
771 void traceUserMarker(Capability
*cap
, char *markername
)
773 /* Note: traceUserMarker is special since it has no wrapper (it's called
774 from cmm code), so we check eventlog_enabled and TRACE_user here.
777 if (RtsFlags
.TraceFlags
.tracing
== TRACE_STDERR
&& TRACE_user
) {
778 traceCap_stderr(cap
, "User marker: %s", markername
);
782 if (eventlog_enabled
&& TRACE_user
) {
783 postUserEvent(cap
, EVENT_USER_MARKER
, markername
);
786 dtraceUserMarker(cap
->no
, markername
);
790 void traceThreadLabel_(Capability
*cap
,
795 if (RtsFlags
.TraceFlags
.tracing
== TRACE_STDERR
) {
796 ACQUIRE_LOCK(&trace_utx
);
798 debugBelch("cap %d: thread %" FMT_Word
" has label %s\n",
799 cap
->no
, (W_
)tso
->id
, label
);
800 RELEASE_LOCK(&trace_utx
);
804 postThreadLabel(cap
, tso
->id
, label
);
808 void traceConcMarkBegin()
810 if (eventlog_enabled
)
811 postEventNoCap(EVENT_CONC_MARK_BEGIN
);
814 void traceConcMarkEnd(StgWord32 marked_obj_count
)
816 if (eventlog_enabled
)
817 postConcMarkEnd(marked_obj_count
);
820 void traceConcSyncBegin()
822 if (eventlog_enabled
)
823 postEventNoCap(EVENT_CONC_SYNC_BEGIN
);
826 void traceConcSyncEnd()
828 if (eventlog_enabled
)
829 postEventNoCap(EVENT_CONC_SYNC_END
);
832 void traceConcSweepBegin()
834 if (eventlog_enabled
)
835 postEventNoCap(EVENT_CONC_SWEEP_BEGIN
);
838 void traceConcSweepEnd()
840 if (eventlog_enabled
)
841 postEventNoCap(EVENT_CONC_SWEEP_END
);
844 void traceConcUpdRemSetFlush(Capability
*cap
)
846 if (eventlog_enabled
)
847 postConcUpdRemSetFlush(cap
);
850 void traceNonmovingHeapCensus(uint32_t log_blk_size
,
851 const struct NonmovingAllocCensus
*census
)
853 if (eventlog_enabled
&& TRACE_nonmoving_gc
)
854 postNonmovingHeapCensus(log_blk_size
, census
);
857 void traceThreadStatus_ (StgTSO
*tso USED_IF_DEBUG
)
860 if (RtsFlags
.TraceFlags
.tracing
== TRACE_STDERR
) {
861 printThreadStatus(tso
);
865 /* nothing - no event for this one yet */
870 void traceBegin (const char *str
, ...)
875 ACQUIRE_LOCK(&trace_utx
);
885 RELEASE_LOCK(&trace_utx
);
891 // If DTRACE is enabled, but neither DEBUG nor TRACING, we need a C land
892 // wrapper for the user-msg probe (as we can't expand that in PrimOps.cmm)
894 #if !defined(DEBUG) && !defined(TRACING) && defined(DTRACE)
896 void dtraceUserMsgWrapper(Capability
*cap
, char *msg
)
898 dtraceUserMsg(cap
->no
, msg
);
901 void dtraceUserMarkerWrapper(Capability
*cap
, char *msg
)
903 dtraceUserMarker(cap
->no
, msg
);
906 #endif /* !defined(DEBUG) && !defined(TRACING) && defined(DTRACE) */