Documentation for (&&) and (&&) states that they are lazy in their second argument...
[ghc.git] / rts / Trace.c
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team 2006-2009
4 *
5 * Debug and performance tracing
6 *
7 * ---------------------------------------------------------------------------*/
8
9 // external headers
10 #include "Rts.h"
11
12 // internal headers
13 #include "Trace.h"
14
15 #if defined(TRACING)
16
17 #include "GetTime.h"
18 #include "GetEnv.h"
19 #include "Stats.h"
20 #include "eventlog/EventLog.h"
21 #include "rts/EventLogWriter.h"
22 #include "Threads.h"
23 #include "Printer.h"
24 #include "RtsFlags.h"
25
26 #if defined(HAVE_UNISTD_H)
27 #include <unistd.h>
28 #endif
29
30 // events
31 int TRACE_sched;
32 int TRACE_gc;
33 int TRACE_spark_sampled;
34 int TRACE_spark_full;
35 int TRACE_user;
36 int TRACE_cap;
37
38 #if defined(THREADED_RTS)
39 static Mutex trace_utx;
40 #endif
41
42 static bool eventlog_enabled;
43
44 /* ---------------------------------------------------------------------------
45 Starting up / shutting down the tracing facilities
46 --------------------------------------------------------------------------- */
47
48 static const EventLogWriter *getEventLogWriter(void)
49 {
50 return rtsConfig.eventlog_writer;
51 }
52
53 void initTracing (void)
54 {
55 const EventLogWriter *eventlog_writer = getEventLogWriter();
56
57 #if defined(THREADED_RTS)
58 initMutex(&trace_utx);
59 #endif
60
61 // -Ds turns on scheduler tracing too
62 TRACE_sched =
63 RtsFlags.TraceFlags.scheduler ||
64 RtsFlags.DebugFlags.scheduler;
65
66 // -Dg turns on gc tracing too
67 TRACE_gc =
68 RtsFlags.TraceFlags.gc ||
69 RtsFlags.DebugFlags.gc ||
70 RtsFlags.DebugFlags.scheduler;
71 if (TRACE_gc && RtsFlags.GcFlags.giveStats == NO_GC_STATS) {
72 RtsFlags.GcFlags.giveStats = COLLECT_GC_STATS;
73 }
74
75 TRACE_spark_sampled =
76 RtsFlags.TraceFlags.sparks_sampled;
77
78 // -Dr turns on full spark tracing
79 TRACE_spark_full =
80 RtsFlags.TraceFlags.sparks_full ||
81 RtsFlags.DebugFlags.sparks;
82
83 TRACE_user =
84 RtsFlags.TraceFlags.user;
85
86 // We trace cap events if we're tracing anything else
87 TRACE_cap =
88 TRACE_sched ||
89 TRACE_gc ||
90 TRACE_spark_sampled ||
91 TRACE_spark_full ||
92 TRACE_user;
93
94 eventlog_enabled = RtsFlags.TraceFlags.tracing == TRACE_EVENTLOG &&
95 eventlog_writer != NULL;
96
97 /* Note: we can have any of the TRACE_* flags turned on even when
98 eventlog_enabled is off. In the DEBUG way we may be tracing to stderr.
99 */
100
101 if (eventlog_enabled) {
102 initEventLogging(eventlog_writer);
103 }
104 }
105
106 void endTracing (void)
107 {
108 if (eventlog_enabled) {
109 endEventLogging();
110 }
111 }
112
113 void freeTracing (void)
114 {
115 if (eventlog_enabled) {
116 freeEventLogging();
117 }
118 }
119
120 void resetTracing (void)
121 {
122 const EventLogWriter *eventlog_writer;
123 eventlog_writer = getEventLogWriter();
124
125 if (eventlog_enabled) {
126 abortEventLogging(); // abort eventlog inherited from parent
127 if (eventlog_writer != NULL) {
128 initEventLogging(eventlog_writer); // child starts its own eventlog
129 }
130 }
131 }
132
133 void flushTrace (void)
134 {
135 if (eventlog_enabled) {
136 flushEventLog();
137 }
138 }
139
140 void tracingAddCapapilities (uint32_t from, uint32_t to)
141 {
142 if (eventlog_enabled) {
143 moreCapEventBufs(from,to);
144 }
145 }
146
147 /* ---------------------------------------------------------------------------
148 Emitting trace messages/events
149 --------------------------------------------------------------------------- */
150
151 #if defined(DEBUG)
152 static void tracePreface (void)
153 {
154 #if defined(THREADED_RTS)
155 debugBelch("%12lx: ", (unsigned long)osThreadId());
156 #endif
157 if (RtsFlags.TraceFlags.timestamp) {
158 debugBelch("%9" FMT_Word64 ": ", stat_getElapsedTime());
159 }
160 }
161 #endif
162
163 #if defined(DEBUG)
164 static char *thread_stop_reasons[] = {
165 [HeapOverflow] = "heap overflow",
166 [StackOverflow] = "stack overflow",
167 [ThreadYielding] = "yielding",
168 [ThreadBlocked] = "blocked",
169 [ThreadFinished] = "finished",
170 [THREAD_SUSPENDED_FOREIGN_CALL] = "suspended while making a foreign call",
171 [6 + BlockedOnMVar] = "blocked on an MVar",
172 [6 + BlockedOnMVarRead] = "blocked on an atomic MVar read",
173 [6 + BlockedOnBlackHole] = "blocked on a black hole",
174 [6 + BlockedOnRead] = "blocked on a read operation",
175 [6 + BlockedOnWrite] = "blocked on a write operation",
176 [6 + BlockedOnDelay] = "blocked on a delay operation",
177 [6 + BlockedOnSTM] = "blocked on STM",
178 [6 + BlockedOnDoProc] = "blocked on asyncDoProc",
179 [6 + BlockedOnCCall] = "blocked on a foreign call",
180 [6 + BlockedOnCCall_Interruptible] = "blocked on a foreign call (interruptible)",
181 [6 + BlockedOnMsgThrowTo] = "blocked on throwTo",
182 [6 + ThreadMigrating] = "migrating"
183 };
184 #endif
185
186 #if defined(DEBUG)
187 static void traceSchedEvent_stderr (Capability *cap, EventTypeNum tag,
188 StgTSO *tso,
189 StgWord info1 STG_UNUSED,
190 StgWord info2 STG_UNUSED)
191 {
192 ACQUIRE_LOCK(&trace_utx);
193
194 tracePreface();
195 switch (tag) {
196 case EVENT_CREATE_THREAD: // (cap, thread)
197 debugBelch("cap %d: created thread %" FMT_Word "\n",
198 cap->no, (W_)tso->id);
199 break;
200 case EVENT_RUN_THREAD: // (cap, thread)
201 debugBelch("cap %d: running thread %" FMT_Word " (%s)\n",
202 cap->no, (W_)tso->id, what_next_strs[tso->what_next]);
203 break;
204 case EVENT_THREAD_RUNNABLE: // (cap, thread)
205 debugBelch("cap %d: thread %" FMT_Word " appended to run queue\n",
206 cap->no, (W_)tso->id);
207 break;
208 case EVENT_MIGRATE_THREAD: // (cap, thread, new_cap)
209 debugBelch("cap %d: thread %" FMT_Word " migrating to cap %d\n",
210 cap->no, (W_)tso->id, (int)info1);
211 break;
212 case EVENT_THREAD_WAKEUP: // (cap, thread, info1_cap)
213 debugBelch("cap %d: waking up thread %" FMT_Word " on cap %d\n",
214 cap->no, (W_)tso->id, (int)info1);
215 break;
216
217 case EVENT_STOP_THREAD: // (cap, thread, status)
218 if (info1 == 6 + BlockedOnBlackHole) {
219 debugBelch("cap %d: thread %" FMT_Word " stopped (blocked on black hole owned by thread %lu)\n",
220 cap->no, (W_)tso->id, (long)info2);
221 } else if (info1 == StackOverflow) {
222 debugBelch("cap %d: thead %" FMT_Word
223 " stopped (stack overflow, size %lu)\n",
224 cap->no, (W_)tso->id, (long)info2);
225
226 } else {
227 debugBelch("cap %d: thread %" FMT_Word " stopped (%s)\n",
228 cap->no, (W_)tso->id, thread_stop_reasons[info1]);
229 }
230 break;
231 default:
232 debugBelch("cap %d: thread %" FMT_Word ": event %d\n\n",
233 cap->no, (W_)tso->id, tag);
234 break;
235 }
236
237 RELEASE_LOCK(&trace_utx);
238 }
239 #endif
240
241 void traceSchedEvent_ (Capability *cap, EventTypeNum tag,
242 StgTSO *tso, StgWord info1, StgWord info2)
243 {
244 #if defined(DEBUG)
245 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
246 traceSchedEvent_stderr(cap, tag, tso, info1, info2);
247 } else
248 #endif
249 {
250 postSchedEvent(cap,tag,tso ? tso->id : 0, info1, info2);
251 }
252 }
253
254 #if defined(DEBUG)
255 static void traceGcEvent_stderr (Capability *cap, EventTypeNum tag)
256 {
257 ACQUIRE_LOCK(&trace_utx);
258
259 tracePreface();
260 switch (tag) {
261 case EVENT_REQUEST_SEQ_GC: // (cap)
262 debugBelch("cap %d: requesting sequential GC\n", cap->no);
263 break;
264 case EVENT_REQUEST_PAR_GC: // (cap)
265 debugBelch("cap %d: requesting parallel GC\n", cap->no);
266 break;
267 case EVENT_GC_START: // (cap)
268 debugBelch("cap %d: starting GC\n", cap->no);
269 break;
270 case EVENT_GC_END: // (cap)
271 debugBelch("cap %d: finished GC\n", cap->no);
272 break;
273 case EVENT_GC_IDLE: // (cap)
274 debugBelch("cap %d: GC idle\n", cap->no);
275 break;
276 case EVENT_GC_WORK: // (cap)
277 debugBelch("cap %d: GC working\n", cap->no);
278 break;
279 case EVENT_GC_DONE: // (cap)
280 debugBelch("cap %d: GC done\n", cap->no);
281 break;
282 case EVENT_GC_GLOBAL_SYNC: // (cap)
283 debugBelch("cap %d: all caps stopped for GC\n", cap->no);
284 break;
285 default:
286 barf("traceGcEvent: unknown event tag %d", tag);
287 break;
288 }
289
290 RELEASE_LOCK(&trace_utx);
291 }
292 #endif
293
294 void traceGcEvent_ (Capability *cap, EventTypeNum tag)
295 {
296 #if defined(DEBUG)
297 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
298 traceGcEvent_stderr(cap, tag);
299 } else
300 #endif
301 {
302 /* currently all GC events are nullary events */
303 postEvent(cap, tag);
304 }
305 }
306
307 void traceGcEventAtT_ (Capability *cap, StgWord64 ts, EventTypeNum tag)
308 {
309 #if defined(DEBUG)
310 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
311 traceGcEvent_stderr(cap, tag);
312 } else
313 #endif
314 {
315 /* assuming nullary events and explicitly inserting a timestamp */
316 postEventAtTimestamp(cap, ts, tag);
317 }
318 }
319
320 void traceHeapEvent_ (Capability *cap,
321 EventTypeNum tag,
322 CapsetID heap_capset,
323 W_ info1)
324 {
325 #if defined(DEBUG)
326 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
327 /* no stderr equivalent for these ones */
328 } else
329 #endif
330 {
331 postHeapEvent(cap, tag, heap_capset, info1);
332 }
333 }
334
335 void traceEventHeapInfo_ (CapsetID heap_capset,
336 uint32_t gens,
337 W_ maxHeapSize,
338 W_ allocAreaSize,
339 W_ mblockSize,
340 W_ blockSize)
341 {
342 #if defined(DEBUG)
343 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
344 /* no stderr equivalent for these ones */
345 } else
346 #endif
347 {
348 postEventHeapInfo(heap_capset, gens,
349 maxHeapSize, allocAreaSize,
350 mblockSize, blockSize);
351 }
352 }
353
354 void traceEventGcStats_ (Capability *cap,
355 CapsetID heap_capset,
356 uint32_t gen,
357 W_ copied,
358 W_ slop,
359 W_ fragmentation,
360 uint32_t par_n_threads,
361 W_ par_max_copied,
362 W_ par_tot_copied,
363 W_ par_balanced_copied)
364 {
365 #if defined(DEBUG)
366 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
367 /* no stderr equivalent for these ones */
368 } else
369 #endif
370 {
371 postEventGcStats(cap, heap_capset, gen,
372 copied, slop, fragmentation,
373 par_n_threads, par_max_copied,
374 par_tot_copied, par_balanced_copied);
375 }
376 }
377
378 void traceCapEvent_ (Capability *cap,
379 EventTypeNum tag)
380 {
381 #if defined(DEBUG)
382 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
383 ACQUIRE_LOCK(&trace_utx);
384
385 tracePreface();
386 switch (tag) {
387 case EVENT_CAP_CREATE: // (cap)
388 debugBelch("cap %d: initialised\n", cap->no);
389 break;
390 case EVENT_CAP_DELETE: // (cap)
391 debugBelch("cap %d: shutting down\n", cap->no);
392 break;
393 case EVENT_CAP_ENABLE: // (cap)
394 debugBelch("cap %d: enabling capability\n", cap->no);
395 break;
396 case EVENT_CAP_DISABLE: // (cap)
397 debugBelch("cap %d: disabling capability\n", cap->no);
398 break;
399 }
400 RELEASE_LOCK(&trace_utx);
401 } else
402 #endif
403 {
404 if (eventlog_enabled) {
405 postCapEvent(tag, (EventCapNo)cap->no);
406 }
407 }
408 }
409
410 void traceCapsetEvent_ (EventTypeNum tag,
411 CapsetID capset,
412 StgWord info)
413 {
414 #if defined(DEBUG)
415 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR && TRACE_sched)
416 // When events go to stderr, it is annoying to see the capset
417 // events every time, so we only emit them with -Ds.
418 {
419 ACQUIRE_LOCK(&trace_utx);
420
421 tracePreface();
422 switch (tag) {
423 case EVENT_CAPSET_CREATE: // (capset, capset_type)
424 debugBelch("created capset %" FMT_Word32 " of type %d\n", capset,
425 (int)info);
426 break;
427 case EVENT_CAPSET_DELETE: // (capset)
428 debugBelch("deleted capset %" FMT_Word32 "\n", capset);
429 break;
430 case EVENT_CAPSET_ASSIGN_CAP: // (capset, capno)
431 debugBelch("assigned cap %" FMT_Word " to capset %" FMT_Word32 "\n",
432 info, capset);
433 break;
434 case EVENT_CAPSET_REMOVE_CAP: // (capset, capno)
435 debugBelch("removed cap %" FMT_Word " from capset %" FMT_Word32
436 "\n", info, capset);
437 break;
438 }
439 RELEASE_LOCK(&trace_utx);
440 } else
441 #endif
442 {
443 if (eventlog_enabled) {
444 postCapsetEvent(tag, capset, info);
445 }
446 }
447 }
448
449 void traceWallClockTime_(void) {
450 if (eventlog_enabled) {
451 postWallClockTime(CAPSET_CLOCKDOMAIN_DEFAULT);
452 }
453 }
454
455 void traceOSProcessInfo_(void) {
456 if (eventlog_enabled) {
457 postCapsetEvent(EVENT_OSPROCESS_PID,
458 CAPSET_OSPROCESS_DEFAULT,
459 getpid());
460
461 #if !defined(mingw32_HOST_OS)
462 /* Windows has no strong concept of process hierarchy, so no getppid().
463 * In any case, this trace event is mainly useful for tracing programs
464 * that use 'forkProcess' which Windows doesn't support anyway.
465 */
466 postCapsetEvent(EVENT_OSPROCESS_PPID,
467 CAPSET_OSPROCESS_DEFAULT,
468 getppid());
469 #endif
470 {
471 char buf[256];
472 snprintf(buf, sizeof(buf), "GHC-%s %s", ProjectVersion, RtsWay);
473 postCapsetStrEvent(EVENT_RTS_IDENTIFIER,
474 CAPSET_OSPROCESS_DEFAULT,
475 buf);
476 }
477 {
478 int argc = 0; char **argv;
479 getFullProgArgv(&argc, &argv);
480 if (argc != 0) {
481 postCapsetVecEvent(EVENT_PROGRAM_ARGS,
482 CAPSET_OSPROCESS_DEFAULT,
483 argc, argv);
484 }
485 }
486 }
487 }
488
489 #if defined(DEBUG)
490 static void traceSparkEvent_stderr (Capability *cap, EventTypeNum tag,
491 StgWord info1)
492 {
493 ACQUIRE_LOCK(&trace_utx);
494
495 tracePreface();
496 switch (tag) {
497
498 case EVENT_CREATE_SPARK_THREAD: // (cap, spark_thread)
499 debugBelch("cap %d: creating spark thread %lu\n",
500 cap->no, (long)info1);
501 break;
502 case EVENT_SPARK_CREATE: // (cap)
503 debugBelch("cap %d: added spark to pool\n",
504 cap->no);
505 break;
506 case EVENT_SPARK_DUD: // (cap)
507 debugBelch("cap %d: discarded dud spark\n",
508 cap->no);
509 break;
510 case EVENT_SPARK_OVERFLOW: // (cap)
511 debugBelch("cap %d: discarded overflowed spark\n",
512 cap->no);
513 break;
514 case EVENT_SPARK_RUN: // (cap)
515 debugBelch("cap %d: running a spark\n",
516 cap->no);
517 break;
518 case EVENT_SPARK_STEAL: // (cap, victim_cap)
519 debugBelch("cap %d: stealing a spark from cap %d\n",
520 cap->no, (int)info1);
521 break;
522 case EVENT_SPARK_FIZZLE: // (cap)
523 debugBelch("cap %d: fizzled spark removed from pool\n",
524 cap->no);
525 break;
526 case EVENT_SPARK_GC: // (cap)
527 debugBelch("cap %d: GCd spark removed from pool\n",
528 cap->no);
529 break;
530 default:
531 barf("traceSparkEvent: unknown event tag %d", tag);
532 break;
533 }
534
535 RELEASE_LOCK(&trace_utx);
536 }
537 #endif
538
539 void traceSparkEvent_ (Capability *cap, EventTypeNum tag, StgWord info1)
540 {
541 #if defined(DEBUG)
542 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
543 traceSparkEvent_stderr(cap, tag, info1);
544 } else
545 #endif
546 {
547 postSparkEvent(cap,tag,info1);
548 }
549 }
550
551 void traceSparkCounters_ (Capability *cap,
552 SparkCounters counters,
553 StgWord remaining)
554 {
555 #if defined(DEBUG)
556 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
557 /* we currently don't do debug tracing of spark stats but we must
558 test for TRACE_STDERR because of the !eventlog_enabled case. */
559 } else
560 #endif
561 {
562 postSparkCountersEvent(cap, counters, remaining);
563 }
564 }
565
566 void traceTaskCreate_ (Task *task,
567 Capability *cap)
568 {
569 #if defined(DEBUG)
570 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
571 /* We currently don't do debug tracing of tasks but we must
572 test for TRACE_STDERR because of the !eventlog_enabled case. */
573 } else
574 #endif
575 {
576 EventTaskId taskid = serialisableTaskId(task);
577 EventKernelThreadId tid = kernelThreadId();
578 postTaskCreateEvent(taskid, cap->no, tid);
579 }
580 }
581
582 void traceTaskMigrate_ (Task *task,
583 Capability *cap,
584 Capability *new_cap)
585 {
586 #if defined(DEBUG)
587 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
588 /* We currently don't do debug tracing of tasks but we must
589 test for TRACE_STDERR because of the !eventlog_enabled case. */
590 } else
591 #endif
592 {
593 EventTaskId taskid = serialisableTaskId(task);
594 postTaskMigrateEvent(taskid, cap->no, new_cap->no);
595 }
596 }
597
598 void traceTaskDelete_ (Task *task)
599 {
600 #if defined(DEBUG)
601 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
602 /* We currently don't do debug tracing of tasks but we must
603 test for TRACE_STDERR because of the !eventlog_enabled case. */
604 } else
605 #endif
606 {
607 EventTaskId taskid = serialisableTaskId(task);
608 postTaskDeleteEvent(taskid);
609 }
610 }
611
612 void traceHeapProfBegin(StgWord8 profile_id)
613 {
614 if (eventlog_enabled) {
615 postHeapProfBegin(profile_id);
616 }
617 }
618 void traceHeapBioProfSampleBegin(StgInt era, StgWord64 time)
619 {
620 if (eventlog_enabled) {
621 postHeapBioProfSampleBegin(era, time);
622 }
623 }
624
625 void traceHeapProfSampleBegin(StgInt era)
626 {
627 if (eventlog_enabled) {
628 postHeapProfSampleBegin(era);
629 }
630 }
631
632 void traceHeapProfSampleEnd(StgInt era)
633 {
634 if (eventlog_enabled) {
635 postHeapProfSampleEnd(era);
636 }
637 }
638
639 void traceHeapProfSampleString(StgWord8 profile_id,
640 const char *label, StgWord residency)
641 {
642 if (eventlog_enabled) {
643 postHeapProfSampleString(profile_id, label, residency);
644 }
645 }
646
647 #if defined(PROFILING)
648 void traceHeapProfCostCentre(StgWord32 ccID,
649 const char *label,
650 const char *module,
651 const char *srcloc,
652 StgBool is_caf)
653 {
654 if (eventlog_enabled) {
655 postHeapProfCostCentre(ccID, label, module, srcloc, is_caf);
656 }
657 }
658
659 void traceHeapProfSampleCostCentre(StgWord8 profile_id,
660 CostCentreStack *stack, StgWord residency)
661 {
662 if (eventlog_enabled) {
663 postHeapProfSampleCostCentre(profile_id, stack, residency);
664 }
665 }
666 #endif
667
668 #if defined(DEBUG)
669 static void vtraceCap_stderr(Capability *cap, char *msg, va_list ap)
670 {
671 ACQUIRE_LOCK(&trace_utx);
672
673 tracePreface();
674 debugBelch("cap %d: ", cap->no);
675 vdebugBelch(msg,ap);
676 debugBelch("\n");
677
678 RELEASE_LOCK(&trace_utx);
679 }
680
681 static void traceCap_stderr(Capability *cap, char *msg, ...)
682 {
683 va_list ap;
684 va_start(ap,msg);
685 vtraceCap_stderr(cap, msg, ap);
686 va_end(ap);
687 }
688 #endif
689
690 void traceCap_(Capability *cap, char *msg, ...)
691 {
692 va_list ap;
693 va_start(ap,msg);
694
695 #if defined(DEBUG)
696 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
697 vtraceCap_stderr(cap, msg, ap);
698 } else
699 #endif
700 {
701 postCapMsg(cap, msg, ap);
702 }
703
704 va_end(ap);
705 }
706
707 #if defined(DEBUG)
708 static void vtrace_stderr(char *msg, va_list ap)
709 {
710 ACQUIRE_LOCK(&trace_utx);
711
712 tracePreface();
713 vdebugBelch(msg,ap);
714 debugBelch("\n");
715
716 RELEASE_LOCK(&trace_utx);
717 }
718 #endif
719
720 void trace_(char *msg, ...)
721 {
722 va_list ap;
723 va_start(ap,msg);
724
725 #if defined(DEBUG)
726 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
727 vtrace_stderr(msg, ap);
728 } else
729 #endif
730 {
731 postMsg(msg, ap);
732 }
733
734 va_end(ap);
735 }
736
737 void traceUserMsg(Capability *cap, char *msg)
738 {
739 /* Note: normally we don't check the TRACE_* flags here as they're checked
740 by the wrappers in Trace.h. But traceUserMsg is special since it has no
741 wrapper (it's called from cmm code), so we check TRACE_user here
742 */
743 #if defined(DEBUG)
744 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR && TRACE_user) {
745 // Use "%s" as format string to ignore format specifiers in msg (#3874).
746 traceCap_stderr(cap, "%s", msg);
747 } else
748 #endif
749 {
750 if (eventlog_enabled && TRACE_user) {
751 postUserEvent(cap, EVENT_USER_MSG, msg);
752 }
753 }
754 dtraceUserMsg(cap->no, msg);
755 }
756
757 void traceUserBinaryMsg(Capability *cap, uint8_t *msg, size_t size)
758 {
759 /* Note: normally we don't check the TRACE_* flags here as they're checked
760 by the wrappers in Trace.h. But traceUserMsg is special since it has no
761 wrapper (it's called from cmm code), so we check TRACE_user here
762 */
763 if (eventlog_enabled && TRACE_user) {
764 postUserBinaryEvent(cap, EVENT_USER_BINARY_MSG, msg, size);
765 }
766 }
767
768 void traceUserMarker(Capability *cap, char *markername)
769 {
770 /* Note: traceUserMarker is special since it has no wrapper (it's called
771 from cmm code), so we check eventlog_enabled and TRACE_user here.
772 */
773 #if defined(DEBUG)
774 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR && TRACE_user) {
775 traceCap_stderr(cap, "User marker: %s", markername);
776 } else
777 #endif
778 {
779 if (eventlog_enabled && TRACE_user) {
780 postUserEvent(cap, EVENT_USER_MARKER, markername);
781 }
782 }
783 dtraceUserMarker(cap->no, markername);
784 }
785
786
787 void traceThreadLabel_(Capability *cap,
788 StgTSO *tso,
789 char *label)
790 {
791 #if defined(DEBUG)
792 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
793 ACQUIRE_LOCK(&trace_utx);
794 tracePreface();
795 debugBelch("cap %d: thread %" FMT_Word " has label %s\n",
796 cap->no, (W_)tso->id, label);
797 RELEASE_LOCK(&trace_utx);
798 } else
799 #endif
800 {
801 postThreadLabel(cap, tso->id, label);
802 }
803 }
804
805 void traceThreadStatus_ (StgTSO *tso USED_IF_DEBUG)
806 {
807 #if defined(DEBUG)
808 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
809 printThreadStatus(tso);
810 } else
811 #endif
812 {
813 /* nothing - no event for this one yet */
814 }
815 }
816
817 #if defined(DEBUG)
818 void traceBegin (const char *str, ...)
819 {
820 va_list ap;
821 va_start(ap,str);
822
823 ACQUIRE_LOCK(&trace_utx);
824
825 tracePreface();
826 vdebugBelch(str,ap);
827 va_end(ap);
828 }
829
830 void traceEnd (void)
831 {
832 debugBelch("\n");
833 RELEASE_LOCK(&trace_utx);
834 }
835 #endif /* DEBUG */
836
837 #endif /* TRACING */
838
839 // If DTRACE is enabled, but neither DEBUG nor TRACING, we need a C land
840 // wrapper for the user-msg probe (as we can't expand that in PrimOps.cmm)
841 //
842 #if !defined(DEBUG) && !defined(TRACING) && defined(DTRACE)
843
844 void dtraceUserMsgWrapper(Capability *cap, char *msg)
845 {
846 dtraceUserMsg(cap->no, msg);
847 }
848
849 void dtraceUserMarkerWrapper(Capability *cap, char *msg)
850 {
851 dtraceUserMarker(cap->no, msg);
852 }
853
854 #endif /* !defined(DEBUG) && !defined(TRACING) && defined(DTRACE) */