NonmovingCensus: Emit samples to eventlog
[ghc.git] / rts / Trace.c
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team 2006-2009
4 *
5 * Debug and performance tracing
6 *
7 * ---------------------------------------------------------------------------*/
8
9 // external headers
10 #include "Rts.h"
11
12 // internal headers
13 #include "Trace.h"
14
15 #if defined(TRACING)
16
17 #include "GetTime.h"
18 #include "GetEnv.h"
19 #include "Stats.h"
20 #include "eventlog/EventLog.h"
21 #include "rts/EventLogWriter.h"
22 #include "Threads.h"
23 #include "Printer.h"
24 #include "RtsFlags.h"
25
26 #if defined(HAVE_UNISTD_H)
27 #include <unistd.h>
28 #endif
29
30 // events
31 int TRACE_sched;
32 int TRACE_gc;
33 int TRACE_nonmoving_gc;
34 int TRACE_spark_sampled;
35 int TRACE_spark_full;
36 int TRACE_user;
37 int TRACE_cap;
38
39 #if defined(THREADED_RTS)
40 static Mutex trace_utx;
41 #endif
42
43 static bool eventlog_enabled;
44
45 /* ---------------------------------------------------------------------------
46 Starting up / shutting down the tracing facilities
47 --------------------------------------------------------------------------- */
48
49 static const EventLogWriter *getEventLogWriter(void)
50 {
51 return rtsConfig.eventlog_writer;
52 }
53
54 void initTracing (void)
55 {
56 const EventLogWriter *eventlog_writer = getEventLogWriter();
57
58 #if defined(THREADED_RTS)
59 initMutex(&trace_utx);
60 #endif
61
62 // -Ds turns on scheduler tracing too
63 TRACE_sched =
64 RtsFlags.TraceFlags.scheduler ||
65 RtsFlags.DebugFlags.scheduler;
66
67 // -Dg turns on gc tracing too
68 TRACE_gc =
69 RtsFlags.TraceFlags.gc ||
70 RtsFlags.DebugFlags.gc ||
71 RtsFlags.DebugFlags.scheduler;
72 if (TRACE_gc && RtsFlags.GcFlags.giveStats == NO_GC_STATS) {
73 RtsFlags.GcFlags.giveStats = COLLECT_GC_STATS;
74 }
75
76 TRACE_nonmoving_gc =
77 RtsFlags.TraceFlags.nonmoving_gc;
78
79 TRACE_spark_sampled =
80 RtsFlags.TraceFlags.sparks_sampled;
81
82 // -Dr turns on full spark tracing
83 TRACE_spark_full =
84 RtsFlags.TraceFlags.sparks_full ||
85 RtsFlags.DebugFlags.sparks;
86
87 TRACE_user =
88 RtsFlags.TraceFlags.user;
89
90 // We trace cap events if we're tracing anything else
91 TRACE_cap =
92 TRACE_sched ||
93 TRACE_gc ||
94 TRACE_spark_sampled ||
95 TRACE_spark_full ||
96 TRACE_user;
97
98 eventlog_enabled = RtsFlags.TraceFlags.tracing == TRACE_EVENTLOG &&
99 eventlog_writer != NULL;
100
101 /* Note: we can have any of the TRACE_* flags turned on even when
102 eventlog_enabled is off. In the DEBUG way we may be tracing to stderr.
103 */
104
105 if (eventlog_enabled) {
106 initEventLogging(eventlog_writer);
107 }
108 }
109
110 void endTracing (void)
111 {
112 if (eventlog_enabled) {
113 endEventLogging();
114 }
115 }
116
117 void freeTracing (void)
118 {
119 if (eventlog_enabled) {
120 freeEventLogging();
121 }
122 }
123
124 void resetTracing (void)
125 {
126 const EventLogWriter *eventlog_writer;
127 eventlog_writer = getEventLogWriter();
128
129 if (eventlog_enabled) {
130 abortEventLogging(); // abort eventlog inherited from parent
131 if (eventlog_writer != NULL) {
132 initEventLogging(eventlog_writer); // child starts its own eventlog
133 }
134 }
135 }
136
137 void flushTrace (void)
138 {
139 if (eventlog_enabled) {
140 flushEventLog();
141 }
142 }
143
144 void tracingAddCapapilities (uint32_t from, uint32_t to)
145 {
146 if (eventlog_enabled) {
147 moreCapEventBufs(from,to);
148 }
149 }
150
151 /* ---------------------------------------------------------------------------
152 Emitting trace messages/events
153 --------------------------------------------------------------------------- */
154
155 #if defined(DEBUG)
156 static void tracePreface (void)
157 {
158 #if defined(THREADED_RTS)
159 debugBelch("%12lx: ", (unsigned long)osThreadId());
160 #endif
161 if (RtsFlags.TraceFlags.timestamp) {
162 debugBelch("%9" FMT_Word64 ": ", stat_getElapsedTime());
163 }
164 }
165 #endif
166
167 #if defined(DEBUG)
168 static char *thread_stop_reasons[] = {
169 [HeapOverflow] = "heap overflow",
170 [StackOverflow] = "stack overflow",
171 [ThreadYielding] = "yielding",
172 [ThreadBlocked] = "blocked",
173 [ThreadFinished] = "finished",
174 [THREAD_SUSPENDED_FOREIGN_CALL] = "suspended while making a foreign call",
175 [6 + BlockedOnMVar] = "blocked on an MVar",
176 [6 + BlockedOnMVarRead] = "blocked on an atomic MVar read",
177 [6 + BlockedOnBlackHole] = "blocked on a black hole",
178 [6 + BlockedOnRead] = "blocked on a read operation",
179 [6 + BlockedOnWrite] = "blocked on a write operation",
180 [6 + BlockedOnDelay] = "blocked on a delay operation",
181 [6 + BlockedOnSTM] = "blocked on STM",
182 [6 + BlockedOnDoProc] = "blocked on asyncDoProc",
183 [6 + BlockedOnCCall] = "blocked on a foreign call",
184 [6 + BlockedOnCCall_Interruptible] = "blocked on a foreign call (interruptible)",
185 [6 + BlockedOnMsgThrowTo] = "blocked on throwTo",
186 [6 + ThreadMigrating] = "migrating"
187 };
188 #endif
189
190 #if defined(DEBUG)
191 static void traceSchedEvent_stderr (Capability *cap, EventTypeNum tag,
192 StgTSO *tso,
193 StgWord info1 STG_UNUSED,
194 StgWord info2 STG_UNUSED)
195 {
196 ACQUIRE_LOCK(&trace_utx);
197
198 tracePreface();
199 switch (tag) {
200 case EVENT_CREATE_THREAD: // (cap, thread)
201 debugBelch("cap %d: created thread %" FMT_Word "\n",
202 cap->no, (W_)tso->id);
203 break;
204 case EVENT_RUN_THREAD: // (cap, thread)
205 debugBelch("cap %d: running thread %" FMT_Word " (%s)\n",
206 cap->no, (W_)tso->id, what_next_strs[tso->what_next]);
207 break;
208 case EVENT_THREAD_RUNNABLE: // (cap, thread)
209 debugBelch("cap %d: thread %" FMT_Word " appended to run queue\n",
210 cap->no, (W_)tso->id);
211 break;
212 case EVENT_MIGRATE_THREAD: // (cap, thread, new_cap)
213 debugBelch("cap %d: thread %" FMT_Word " migrating to cap %d\n",
214 cap->no, (W_)tso->id, (int)info1);
215 break;
216 case EVENT_THREAD_WAKEUP: // (cap, thread, info1_cap)
217 debugBelch("cap %d: waking up thread %" FMT_Word " on cap %d\n",
218 cap->no, (W_)tso->id, (int)info1);
219 break;
220
221 case EVENT_STOP_THREAD: // (cap, thread, status)
222 if (info1 == 6 + BlockedOnBlackHole) {
223 debugBelch("cap %d: thread %" FMT_Word " stopped (blocked on black hole owned by thread %lu)\n",
224 cap->no, (W_)tso->id, (long)info2);
225 } else if (info1 == StackOverflow) {
226 debugBelch("cap %d: thead %" FMT_Word
227 " stopped (stack overflow, size %lu)\n",
228 cap->no, (W_)tso->id, (long)info2);
229
230 } else {
231 debugBelch("cap %d: thread %" FMT_Word " stopped (%s)\n",
232 cap->no, (W_)tso->id, thread_stop_reasons[info1]);
233 }
234 break;
235 default:
236 debugBelch("cap %d: thread %" FMT_Word ": event %d\n\n",
237 cap->no, (W_)tso->id, tag);
238 break;
239 }
240
241 RELEASE_LOCK(&trace_utx);
242 }
243 #endif
244
245 void traceSchedEvent_ (Capability *cap, EventTypeNum tag,
246 StgTSO *tso, StgWord info1, StgWord info2)
247 {
248 #if defined(DEBUG)
249 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
250 traceSchedEvent_stderr(cap, tag, tso, info1, info2);
251 } else
252 #endif
253 {
254 postSchedEvent(cap,tag,tso ? tso->id : 0, info1, info2);
255 }
256 }
257
258 #if defined(DEBUG)
259 static void traceGcEvent_stderr (Capability *cap, EventTypeNum tag)
260 {
261 ACQUIRE_LOCK(&trace_utx);
262
263 tracePreface();
264 switch (tag) {
265 case EVENT_REQUEST_SEQ_GC: // (cap)
266 debugBelch("cap %d: requesting sequential GC\n", cap->no);
267 break;
268 case EVENT_REQUEST_PAR_GC: // (cap)
269 debugBelch("cap %d: requesting parallel GC\n", cap->no);
270 break;
271 case EVENT_GC_START: // (cap)
272 debugBelch("cap %d: starting GC\n", cap->no);
273 break;
274 case EVENT_GC_END: // (cap)
275 debugBelch("cap %d: finished GC\n", cap->no);
276 break;
277 case EVENT_GC_IDLE: // (cap)
278 debugBelch("cap %d: GC idle\n", cap->no);
279 break;
280 case EVENT_GC_WORK: // (cap)
281 debugBelch("cap %d: GC working\n", cap->no);
282 break;
283 case EVENT_GC_DONE: // (cap)
284 debugBelch("cap %d: GC done\n", cap->no);
285 break;
286 case EVENT_GC_GLOBAL_SYNC: // (cap)
287 debugBelch("cap %d: all caps stopped for GC\n", cap->no);
288 break;
289 default:
290 barf("traceGcEvent: unknown event tag %d", tag);
291 break;
292 }
293
294 RELEASE_LOCK(&trace_utx);
295 }
296 #endif
297
298 void traceGcEvent_ (Capability *cap, EventTypeNum tag)
299 {
300 #if defined(DEBUG)
301 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
302 traceGcEvent_stderr(cap, tag);
303 } else
304 #endif
305 {
306 /* currently all GC events are nullary events */
307 postEvent(cap, tag);
308 }
309 }
310
311 void traceGcEventAtT_ (Capability *cap, StgWord64 ts, EventTypeNum tag)
312 {
313 #if defined(DEBUG)
314 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
315 traceGcEvent_stderr(cap, tag);
316 } else
317 #endif
318 {
319 /* assuming nullary events and explicitly inserting a timestamp */
320 postEventAtTimestamp(cap, ts, tag);
321 }
322 }
323
324 void traceHeapEvent_ (Capability *cap,
325 EventTypeNum tag,
326 CapsetID heap_capset,
327 W_ info1)
328 {
329 #if defined(DEBUG)
330 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
331 /* no stderr equivalent for these ones */
332 } else
333 #endif
334 {
335 postHeapEvent(cap, tag, heap_capset, info1);
336 }
337 }
338
339 void traceEventHeapInfo_ (CapsetID heap_capset,
340 uint32_t gens,
341 W_ maxHeapSize,
342 W_ allocAreaSize,
343 W_ mblockSize,
344 W_ blockSize)
345 {
346 #if defined(DEBUG)
347 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
348 /* no stderr equivalent for these ones */
349 } else
350 #endif
351 {
352 postEventHeapInfo(heap_capset, gens,
353 maxHeapSize, allocAreaSize,
354 mblockSize, blockSize);
355 }
356 }
357
358 void traceEventGcStats_ (Capability *cap,
359 CapsetID heap_capset,
360 uint32_t gen,
361 W_ copied,
362 W_ slop,
363 W_ fragmentation,
364 uint32_t par_n_threads,
365 W_ par_max_copied,
366 W_ par_tot_copied,
367 W_ par_balanced_copied)
368 {
369 #if defined(DEBUG)
370 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
371 /* no stderr equivalent for these ones */
372 } else
373 #endif
374 {
375 postEventGcStats(cap, heap_capset, gen,
376 copied, slop, fragmentation,
377 par_n_threads, par_max_copied,
378 par_tot_copied, par_balanced_copied);
379 }
380 }
381
382 void traceCapEvent_ (Capability *cap,
383 EventTypeNum tag)
384 {
385 #if defined(DEBUG)
386 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
387 ACQUIRE_LOCK(&trace_utx);
388
389 tracePreface();
390 switch (tag) {
391 case EVENT_CAP_CREATE: // (cap)
392 debugBelch("cap %d: initialised\n", cap->no);
393 break;
394 case EVENT_CAP_DELETE: // (cap)
395 debugBelch("cap %d: shutting down\n", cap->no);
396 break;
397 case EVENT_CAP_ENABLE: // (cap)
398 debugBelch("cap %d: enabling capability\n", cap->no);
399 break;
400 case EVENT_CAP_DISABLE: // (cap)
401 debugBelch("cap %d: disabling capability\n", cap->no);
402 break;
403 }
404 RELEASE_LOCK(&trace_utx);
405 } else
406 #endif
407 {
408 if (eventlog_enabled) {
409 postCapEvent(tag, (EventCapNo)cap->no);
410 }
411 }
412 }
413
414 void traceCapsetEvent_ (EventTypeNum tag,
415 CapsetID capset,
416 StgWord info)
417 {
418 #if defined(DEBUG)
419 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR && TRACE_sched)
420 // When events go to stderr, it is annoying to see the capset
421 // events every time, so we only emit them with -Ds.
422 {
423 ACQUIRE_LOCK(&trace_utx);
424
425 tracePreface();
426 switch (tag) {
427 case EVENT_CAPSET_CREATE: // (capset, capset_type)
428 debugBelch("created capset %" FMT_Word32 " of type %d\n", capset,
429 (int)info);
430 break;
431 case EVENT_CAPSET_DELETE: // (capset)
432 debugBelch("deleted capset %" FMT_Word32 "\n", capset);
433 break;
434 case EVENT_CAPSET_ASSIGN_CAP: // (capset, capno)
435 debugBelch("assigned cap %" FMT_Word " to capset %" FMT_Word32 "\n",
436 info, capset);
437 break;
438 case EVENT_CAPSET_REMOVE_CAP: // (capset, capno)
439 debugBelch("removed cap %" FMT_Word " from capset %" FMT_Word32
440 "\n", info, capset);
441 break;
442 }
443 RELEASE_LOCK(&trace_utx);
444 } else
445 #endif
446 {
447 if (eventlog_enabled) {
448 postCapsetEvent(tag, capset, info);
449 }
450 }
451 }
452
453 void traceWallClockTime_(void) {
454 if (eventlog_enabled) {
455 postWallClockTime(CAPSET_CLOCKDOMAIN_DEFAULT);
456 }
457 }
458
459 void traceOSProcessInfo_(void) {
460 if (eventlog_enabled) {
461 postCapsetEvent(EVENT_OSPROCESS_PID,
462 CAPSET_OSPROCESS_DEFAULT,
463 getpid());
464
465 #if !defined (mingw32_HOST_OS)
466 /* Windows has no strong concept of process hierarchy, so no getppid().
467 * In any case, this trace event is mainly useful for tracing programs
468 * that use 'forkProcess' which Windows doesn't support anyway.
469 */
470 postCapsetEvent(EVENT_OSPROCESS_PPID,
471 CAPSET_OSPROCESS_DEFAULT,
472 getppid());
473 #endif
474 {
475 char buf[256];
476 snprintf(buf, sizeof(buf), "GHC-%s %s", ProjectVersion, RtsWay);
477 postCapsetStrEvent(EVENT_RTS_IDENTIFIER,
478 CAPSET_OSPROCESS_DEFAULT,
479 buf);
480 }
481 {
482 int argc = 0; char **argv;
483 getFullProgArgv(&argc, &argv);
484 if (argc != 0) {
485 postCapsetVecEvent(EVENT_PROGRAM_ARGS,
486 CAPSET_OSPROCESS_DEFAULT,
487 argc, argv);
488 }
489 }
490 }
491 }
492
493 #if defined(DEBUG)
494 static void traceSparkEvent_stderr (Capability *cap, EventTypeNum tag,
495 StgWord info1)
496 {
497 ACQUIRE_LOCK(&trace_utx);
498
499 tracePreface();
500 switch (tag) {
501
502 case EVENT_CREATE_SPARK_THREAD: // (cap, spark_thread)
503 debugBelch("cap %d: creating spark thread %lu\n",
504 cap->no, (long)info1);
505 break;
506 case EVENT_SPARK_CREATE: // (cap)
507 debugBelch("cap %d: added spark to pool\n",
508 cap->no);
509 break;
510 case EVENT_SPARK_DUD: // (cap)
511 debugBelch("cap %d: discarded dud spark\n",
512 cap->no);
513 break;
514 case EVENT_SPARK_OVERFLOW: // (cap)
515 debugBelch("cap %d: discarded overflowed spark\n",
516 cap->no);
517 break;
518 case EVENT_SPARK_RUN: // (cap)
519 debugBelch("cap %d: running a spark\n",
520 cap->no);
521 break;
522 case EVENT_SPARK_STEAL: // (cap, victim_cap)
523 debugBelch("cap %d: stealing a spark from cap %d\n",
524 cap->no, (int)info1);
525 break;
526 case EVENT_SPARK_FIZZLE: // (cap)
527 debugBelch("cap %d: fizzled spark removed from pool\n",
528 cap->no);
529 break;
530 case EVENT_SPARK_GC: // (cap)
531 debugBelch("cap %d: GCd spark removed from pool\n",
532 cap->no);
533 break;
534 default:
535 barf("traceSparkEvent: unknown event tag %d", tag);
536 break;
537 }
538
539 RELEASE_LOCK(&trace_utx);
540 }
541 #endif
542
543 void traceSparkEvent_ (Capability *cap, EventTypeNum tag, StgWord info1)
544 {
545 #if defined(DEBUG)
546 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
547 traceSparkEvent_stderr(cap, tag, info1);
548 } else
549 #endif
550 {
551 postSparkEvent(cap,tag,info1);
552 }
553 }
554
555 void traceSparkCounters_ (Capability *cap,
556 SparkCounters counters,
557 StgWord remaining)
558 {
559 #if defined(DEBUG)
560 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
561 /* we currently don't do debug tracing of spark stats but we must
562 test for TRACE_STDERR because of the !eventlog_enabled case. */
563 } else
564 #endif
565 {
566 postSparkCountersEvent(cap, counters, remaining);
567 }
568 }
569
570 void traceTaskCreate_ (Task *task,
571 Capability *cap)
572 {
573 #if defined(DEBUG)
574 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
575 /* We currently don't do debug tracing of tasks but we must
576 test for TRACE_STDERR because of the !eventlog_enabled case. */
577 } else
578 #endif
579 {
580 EventTaskId taskid = serialisableTaskId(task);
581 EventKernelThreadId tid = kernelThreadId();
582 postTaskCreateEvent(taskid, cap->no, tid);
583 }
584 }
585
586 void traceTaskMigrate_ (Task *task,
587 Capability *cap,
588 Capability *new_cap)
589 {
590 #if defined(DEBUG)
591 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
592 /* We currently don't do debug tracing of tasks but we must
593 test for TRACE_STDERR because of the !eventlog_enabled case. */
594 } else
595 #endif
596 {
597 EventTaskId taskid = serialisableTaskId(task);
598 postTaskMigrateEvent(taskid, cap->no, new_cap->no);
599 }
600 }
601
602 void traceTaskDelete_ (Task *task)
603 {
604 #if defined(DEBUG)
605 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
606 /* We currently don't do debug tracing of tasks but we must
607 test for TRACE_STDERR because of the !eventlog_enabled case. */
608 } else
609 #endif
610 {
611 EventTaskId taskid = serialisableTaskId(task);
612 postTaskDeleteEvent(taskid);
613 }
614 }
615
616 void traceHeapProfBegin(StgWord8 profile_id)
617 {
618 if (eventlog_enabled) {
619 postHeapProfBegin(profile_id);
620 }
621 }
622
623 void traceHeapProfSampleBegin(StgInt era)
624 {
625 if (eventlog_enabled) {
626 postHeapProfSampleBegin(era);
627 }
628 }
629
630 void traceHeapProfSampleString(StgWord8 profile_id,
631 const char *label, StgWord residency)
632 {
633 if (eventlog_enabled) {
634 postHeapProfSampleString(profile_id, label, residency);
635 }
636 }
637
638 #if defined(PROFILING)
639 void traceHeapProfCostCentre(StgWord32 ccID,
640 const char *label,
641 const char *module,
642 const char *srcloc,
643 StgBool is_caf)
644 {
645 if (eventlog_enabled) {
646 postHeapProfCostCentre(ccID, label, module, srcloc, is_caf);
647 }
648 }
649
650 void traceHeapProfSampleCostCentre(StgWord8 profile_id,
651 CostCentreStack *stack, StgWord residency)
652 {
653 if (eventlog_enabled) {
654 postHeapProfSampleCostCentre(profile_id, stack, residency);
655 }
656 }
657 #endif
658
659 #if defined(DEBUG)
660 static void vtraceCap_stderr(Capability *cap, char *msg, va_list ap)
661 {
662 ACQUIRE_LOCK(&trace_utx);
663
664 tracePreface();
665 debugBelch("cap %d: ", cap->no);
666 vdebugBelch(msg,ap);
667 debugBelch("\n");
668
669 RELEASE_LOCK(&trace_utx);
670 }
671
672 static void traceCap_stderr(Capability *cap, char *msg, ...)
673 {
674 va_list ap;
675 va_start(ap,msg);
676 vtraceCap_stderr(cap, msg, ap);
677 va_end(ap);
678 }
679 #endif
680
681 void traceCap_(Capability *cap, char *msg, ...)
682 {
683 va_list ap;
684 va_start(ap,msg);
685
686 #if defined(DEBUG)
687 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
688 vtraceCap_stderr(cap, msg, ap);
689 } else
690 #endif
691 {
692 postCapMsg(cap, msg, ap);
693 }
694
695 va_end(ap);
696 }
697
698 #if defined(DEBUG)
699 static void vtrace_stderr(char *msg, va_list ap)
700 {
701 ACQUIRE_LOCK(&trace_utx);
702
703 tracePreface();
704 vdebugBelch(msg,ap);
705 debugBelch("\n");
706
707 RELEASE_LOCK(&trace_utx);
708 }
709 #endif
710
711 void trace_(char *msg, ...)
712 {
713 va_list ap;
714 va_start(ap,msg);
715
716 #if defined(DEBUG)
717 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
718 vtrace_stderr(msg, ap);
719 } else
720 #endif
721 {
722 postMsg(msg, ap);
723 }
724
725 va_end(ap);
726 }
727
728 void traceUserMsg(Capability *cap, char *msg)
729 {
730 /* Note: normally we don't check the TRACE_* flags here as they're checked
731 by the wrappers in Trace.h. But traceUserMsg is special since it has no
732 wrapper (it's called from cmm code), so we check TRACE_user here
733 */
734 #if defined(DEBUG)
735 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR && TRACE_user) {
736 // Use "%s" as format string to ignore format specifiers in msg (#3874).
737 traceCap_stderr(cap, "%s", msg);
738 } else
739 #endif
740 {
741 if (eventlog_enabled && TRACE_user) {
742 postUserEvent(cap, EVENT_USER_MSG, msg);
743 }
744 }
745 dtraceUserMsg(cap->no, msg);
746 }
747
748 void traceUserBinaryMsg(Capability *cap, uint8_t *msg, size_t size)
749 {
750 /* Note: normally we don't check the TRACE_* flags here as they're checked
751 by the wrappers in Trace.h. But traceUserMsg is special since it has no
752 wrapper (it's called from cmm code), so we check TRACE_user here
753 */
754 if (eventlog_enabled && TRACE_user) {
755 postUserBinaryEvent(cap, EVENT_USER_BINARY_MSG, msg, size);
756 }
757 }
758
759 void traceUserMarker(Capability *cap, char *markername)
760 {
761 /* Note: traceUserMarker is special since it has no wrapper (it's called
762 from cmm code), so we check eventlog_enabled and TRACE_user here.
763 */
764 #if defined(DEBUG)
765 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR && TRACE_user) {
766 traceCap_stderr(cap, "User marker: %s", markername);
767 } else
768 #endif
769 {
770 if (eventlog_enabled && TRACE_user) {
771 postUserEvent(cap, EVENT_USER_MARKER, markername);
772 }
773 }
774 dtraceUserMarker(cap->no, markername);
775 }
776
777
778 void traceThreadLabel_(Capability *cap,
779 StgTSO *tso,
780 char *label)
781 {
782 #if defined(DEBUG)
783 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
784 ACQUIRE_LOCK(&trace_utx);
785 tracePreface();
786 debugBelch("cap %d: thread %" FMT_Word " has label %s\n",
787 cap->no, (W_)tso->id, label);
788 RELEASE_LOCK(&trace_utx);
789 } else
790 #endif
791 {
792 postThreadLabel(cap, tso->id, label);
793 }
794 }
795
796 void traceConcMarkBegin()
797 {
798 if (eventlog_enabled)
799 postEventNoCap(EVENT_CONC_MARK_BEGIN);
800 }
801
802 void traceConcMarkEnd(StgWord32 marked_obj_count)
803 {
804 if (eventlog_enabled)
805 postConcMarkEnd(marked_obj_count);
806 }
807
808 void traceConcSyncBegin()
809 {
810 if (eventlog_enabled)
811 postEventNoCap(EVENT_CONC_SYNC_BEGIN);
812 }
813
814 void traceConcSyncEnd()
815 {
816 if (eventlog_enabled)
817 postEventNoCap(EVENT_CONC_SYNC_END);
818 }
819
820 void traceConcSweepBegin()
821 {
822 if (eventlog_enabled)
823 postEventNoCap(EVENT_CONC_SWEEP_BEGIN);
824 }
825
826 void traceConcSweepEnd()
827 {
828 if (eventlog_enabled)
829 postEventNoCap(EVENT_CONC_SWEEP_END);
830 }
831
832 void traceConcUpdRemSetFlush(Capability *cap)
833 {
834 if (eventlog_enabled)
835 postConcUpdRemSetFlush(cap);
836 }
837
838 void traceNonmovingHeapCensus(uint32_t log_blk_size,
839 const struct NonmovingAllocCensus *census)
840 {
841 if (eventlog_enabled && TRACE_nonmoving_gc)
842 postNonmovingHeapCensus(log_blk_size, census);
843 }
844
845 void traceThreadStatus_ (StgTSO *tso USED_IF_DEBUG)
846 {
847 #if defined(DEBUG)
848 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
849 printThreadStatus(tso);
850 } else
851 #endif
852 {
853 /* nothing - no event for this one yet */
854 }
855 }
856
857 #if defined(DEBUG)
858 void traceBegin (const char *str, ...)
859 {
860 va_list ap;
861 va_start(ap,str);
862
863 ACQUIRE_LOCK(&trace_utx);
864
865 tracePreface();
866 vdebugBelch(str,ap);
867 va_end(ap);
868 }
869
870 void traceEnd (void)
871 {
872 debugBelch("\n");
873 RELEASE_LOCK(&trace_utx);
874 }
875 #endif /* DEBUG */
876
877 #endif /* TRACING */
878
879 // If DTRACE is enabled, but neither DEBUG nor TRACING, we need a C land
880 // wrapper for the user-msg probe (as we can't expand that in PrimOps.cmm)
881 //
882 #if !defined(DEBUG) && !defined(TRACING) && defined(DTRACE)
883
884 void dtraceUserMsgWrapper(Capability *cap, char *msg)
885 {
886 dtraceUserMsg(cap->no, msg);
887 }
888
889 void dtraceUserMarkerWrapper(Capability *cap, char *msg)
890 {
891 dtraceUserMarker(cap->no, msg);
892 }
893
894 #endif /* !defined(DEBUG) && !defined(TRACING) && defined(DTRACE) */