Remove explicit recursion in retainer profiling (fixes #14758)
[ghc.git] / rts / Trace.c
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team 2006-2009
4 *
5 * Debug and performance tracing
6 *
7 * ---------------------------------------------------------------------------*/
8
9 // external headers
10 #include "Rts.h"
11
12 // internal headers
13 #include "Trace.h"
14
15 #if defined(TRACING)
16
17 #include "GetTime.h"
18 #include "GetEnv.h"
19 #include "Stats.h"
20 #include "eventlog/EventLog.h"
21 #include "rts/EventLogWriter.h"
22 #include "Threads.h"
23 #include "Printer.h"
24 #include "RtsFlags.h"
25
26 #if defined(HAVE_UNISTD_H)
27 #include <unistd.h>
28 #endif
29
30 // events
31 int TRACE_sched;
32 int TRACE_gc;
33 int TRACE_spark_sampled;
34 int TRACE_spark_full;
35 int TRACE_user;
36 int TRACE_cap;
37
38 #if defined(THREADED_RTS)
39 static Mutex trace_utx;
40 #endif
41
42 static bool eventlog_enabled;
43
44 /* ---------------------------------------------------------------------------
45 Starting up / shutting down the tracing facilities
46 --------------------------------------------------------------------------- */
47
48 static const EventLogWriter *getEventLogWriter(void)
49 {
50 return rtsConfig.eventlog_writer;
51 }
52
53 void initTracing (void)
54 {
55 const EventLogWriter *eventlog_writer = getEventLogWriter();
56
57 #if defined(THREADED_RTS)
58 initMutex(&trace_utx);
59 #endif
60
61 // -Ds turns on scheduler tracing too
62 TRACE_sched =
63 RtsFlags.TraceFlags.scheduler ||
64 RtsFlags.DebugFlags.scheduler;
65
66 // -Dg turns on gc tracing too
67 TRACE_gc =
68 RtsFlags.TraceFlags.gc ||
69 RtsFlags.DebugFlags.gc ||
70 RtsFlags.DebugFlags.scheduler;
71 if (TRACE_gc && RtsFlags.GcFlags.giveStats == NO_GC_STATS) {
72 RtsFlags.GcFlags.giveStats = COLLECT_GC_STATS;
73 }
74
75 TRACE_spark_sampled =
76 RtsFlags.TraceFlags.sparks_sampled;
77
78 // -Dr turns on full spark tracing
79 TRACE_spark_full =
80 RtsFlags.TraceFlags.sparks_full ||
81 RtsFlags.DebugFlags.sparks;
82
83 TRACE_user =
84 RtsFlags.TraceFlags.user;
85
86 // We trace cap events if we're tracing anything else
87 TRACE_cap =
88 TRACE_sched ||
89 TRACE_gc ||
90 TRACE_spark_sampled ||
91 TRACE_spark_full ||
92 TRACE_user;
93
94 eventlog_enabled = RtsFlags.TraceFlags.tracing == TRACE_EVENTLOG &&
95 eventlog_writer != NULL;
96
97 /* Note: we can have any of the TRACE_* flags turned on even when
98 eventlog_enabled is off. In the DEBUG way we may be tracing to stderr.
99 */
100
101 if (eventlog_enabled) {
102 initEventLogging(eventlog_writer);
103 }
104 }
105
106 void endTracing (void)
107 {
108 if (eventlog_enabled) {
109 endEventLogging();
110 }
111 }
112
113 void freeTracing (void)
114 {
115 if (eventlog_enabled) {
116 freeEventLogging();
117 }
118 }
119
120 void resetTracing (void)
121 {
122 const EventLogWriter *eventlog_writer;
123 eventlog_writer = getEventLogWriter();
124
125 if (eventlog_enabled) {
126 abortEventLogging(); // abort eventlog inherited from parent
127 if (eventlog_writer != NULL) {
128 initEventLogging(eventlog_writer); // child starts its own eventlog
129 }
130 }
131 }
132
133 void flushTrace (void)
134 {
135 if (eventlog_enabled) {
136 flushEventLog();
137 }
138 }
139
140 void tracingAddCapapilities (uint32_t from, uint32_t to)
141 {
142 if (eventlog_enabled) {
143 moreCapEventBufs(from,to);
144 }
145 }
146
147 /* ---------------------------------------------------------------------------
148 Emitting trace messages/events
149 --------------------------------------------------------------------------- */
150
151 #if defined(DEBUG)
152 static void tracePreface (void)
153 {
154 #if defined(THREADED_RTS)
155 debugBelch("%12lx: ", (unsigned long)osThreadId());
156 #endif
157 if (RtsFlags.TraceFlags.timestamp) {
158 debugBelch("%9" FMT_Word64 ": ", stat_getElapsedTime());
159 }
160 }
161 #endif
162
163 #if defined(DEBUG)
164 static char *thread_stop_reasons[] = {
165 [HeapOverflow] = "heap overflow",
166 [StackOverflow] = "stack overflow",
167 [ThreadYielding] = "yielding",
168 [ThreadBlocked] = "blocked",
169 [ThreadFinished] = "finished",
170 [THREAD_SUSPENDED_FOREIGN_CALL] = "suspended while making a foreign call",
171 [6 + BlockedOnMVar] = "blocked on an MVar",
172 [6 + BlockedOnMVarRead] = "blocked on an atomic MVar read",
173 [6 + BlockedOnBlackHole] = "blocked on a black hole",
174 [6 + BlockedOnRead] = "blocked on a read operation",
175 [6 + BlockedOnWrite] = "blocked on a write operation",
176 [6 + BlockedOnDelay] = "blocked on a delay operation",
177 [6 + BlockedOnSTM] = "blocked on STM",
178 [6 + BlockedOnDoProc] = "blocked on asyncDoProc",
179 [6 + BlockedOnCCall] = "blocked on a foreign call",
180 [6 + BlockedOnCCall_Interruptible] = "blocked on a foreign call (interruptible)",
181 [6 + BlockedOnMsgThrowTo] = "blocked on throwTo",
182 [6 + ThreadMigrating] = "migrating"
183 };
184 #endif
185
186 #if defined(DEBUG)
187 static void traceSchedEvent_stderr (Capability *cap, EventTypeNum tag,
188 StgTSO *tso,
189 StgWord info1 STG_UNUSED,
190 StgWord info2 STG_UNUSED)
191 {
192 ACQUIRE_LOCK(&trace_utx);
193
194 tracePreface();
195 switch (tag) {
196 case EVENT_CREATE_THREAD: // (cap, thread)
197 debugBelch("cap %d: created thread %" FMT_Word "\n",
198 cap->no, (W_)tso->id);
199 break;
200 case EVENT_RUN_THREAD: // (cap, thread)
201 debugBelch("cap %d: running thread %" FMT_Word " (%s)\n",
202 cap->no, (W_)tso->id, what_next_strs[tso->what_next]);
203 break;
204 case EVENT_THREAD_RUNNABLE: // (cap, thread)
205 debugBelch("cap %d: thread %" FMT_Word " appended to run queue\n",
206 cap->no, (W_)tso->id);
207 break;
208 case EVENT_MIGRATE_THREAD: // (cap, thread, new_cap)
209 debugBelch("cap %d: thread %" FMT_Word " migrating to cap %d\n",
210 cap->no, (W_)tso->id, (int)info1);
211 break;
212 case EVENT_THREAD_WAKEUP: // (cap, thread, info1_cap)
213 debugBelch("cap %d: waking up thread %" FMT_Word " on cap %d\n",
214 cap->no, (W_)tso->id, (int)info1);
215 break;
216
217 case EVENT_STOP_THREAD: // (cap, thread, status)
218 if (info1 == 6 + BlockedOnBlackHole) {
219 debugBelch("cap %d: thread %" FMT_Word " stopped (blocked on black hole owned by thread %lu)\n",
220 cap->no, (W_)tso->id, (long)info2);
221 } else if (info1 == StackOverflow) {
222 debugBelch("cap %d: thead %" FMT_Word
223 " stopped (stack overflow, size %lu)\n",
224 cap->no, (W_)tso->id, (long)info2);
225
226 } else {
227 debugBelch("cap %d: thread %" FMT_Word " stopped (%s)\n",
228 cap->no, (W_)tso->id, thread_stop_reasons[info1]);
229 }
230 break;
231 default:
232 debugBelch("cap %d: thread %" FMT_Word ": event %d\n\n",
233 cap->no, (W_)tso->id, tag);
234 break;
235 }
236
237 RELEASE_LOCK(&trace_utx);
238 }
239 #endif
240
241 void traceSchedEvent_ (Capability *cap, EventTypeNum tag,
242 StgTSO *tso, StgWord info1, StgWord info2)
243 {
244 #if defined(DEBUG)
245 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
246 traceSchedEvent_stderr(cap, tag, tso, info1, info2);
247 } else
248 #endif
249 {
250 postSchedEvent(cap,tag,tso ? tso->id : 0, info1, info2);
251 }
252 }
253
254 #if defined(DEBUG)
255 static void traceGcEvent_stderr (Capability *cap, EventTypeNum tag)
256 {
257 ACQUIRE_LOCK(&trace_utx);
258
259 tracePreface();
260 switch (tag) {
261 case EVENT_REQUEST_SEQ_GC: // (cap)
262 debugBelch("cap %d: requesting sequential GC\n", cap->no);
263 break;
264 case EVENT_REQUEST_PAR_GC: // (cap)
265 debugBelch("cap %d: requesting parallel GC\n", cap->no);
266 break;
267 case EVENT_GC_START: // (cap)
268 debugBelch("cap %d: starting GC\n", cap->no);
269 break;
270 case EVENT_GC_END: // (cap)
271 debugBelch("cap %d: finished GC\n", cap->no);
272 break;
273 case EVENT_GC_IDLE: // (cap)
274 debugBelch("cap %d: GC idle\n", cap->no);
275 break;
276 case EVENT_GC_WORK: // (cap)
277 debugBelch("cap %d: GC working\n", cap->no);
278 break;
279 case EVENT_GC_DONE: // (cap)
280 debugBelch("cap %d: GC done\n", cap->no);
281 break;
282 case EVENT_GC_GLOBAL_SYNC: // (cap)
283 debugBelch("cap %d: all caps stopped for GC\n", cap->no);
284 break;
285 default:
286 barf("traceGcEvent: unknown event tag %d", tag);
287 break;
288 }
289
290 RELEASE_LOCK(&trace_utx);
291 }
292 #endif
293
294 void traceGcEvent_ (Capability *cap, EventTypeNum tag)
295 {
296 #if defined(DEBUG)
297 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
298 traceGcEvent_stderr(cap, tag);
299 } else
300 #endif
301 {
302 /* currently all GC events are nullary events */
303 postEvent(cap, tag);
304 }
305 }
306
307 void traceGcEventAtT_ (Capability *cap, StgWord64 ts, EventTypeNum tag)
308 {
309 #if defined(DEBUG)
310 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
311 traceGcEvent_stderr(cap, tag);
312 } else
313 #endif
314 {
315 /* assuming nullary events and explicitly inserting a timestamp */
316 postEventAtTimestamp(cap, ts, tag);
317 }
318 }
319
320 void traceHeapEvent_ (Capability *cap,
321 EventTypeNum tag,
322 CapsetID heap_capset,
323 W_ info1)
324 {
325 #if defined(DEBUG)
326 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
327 /* no stderr equivalent for these ones */
328 } else
329 #endif
330 {
331 postHeapEvent(cap, tag, heap_capset, info1);
332 }
333 }
334
335 void traceEventHeapInfo_ (CapsetID heap_capset,
336 uint32_t gens,
337 W_ maxHeapSize,
338 W_ allocAreaSize,
339 W_ mblockSize,
340 W_ blockSize)
341 {
342 #if defined(DEBUG)
343 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
344 /* no stderr equivalent for these ones */
345 } else
346 #endif
347 {
348 postEventHeapInfo(heap_capset, gens,
349 maxHeapSize, allocAreaSize,
350 mblockSize, blockSize);
351 }
352 }
353
354 void traceEventGcStats_ (Capability *cap,
355 CapsetID heap_capset,
356 uint32_t gen,
357 W_ copied,
358 W_ slop,
359 W_ fragmentation,
360 uint32_t par_n_threads,
361 W_ par_max_copied,
362 W_ par_tot_copied,
363 W_ par_balanced_copied)
364 {
365 #if defined(DEBUG)
366 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
367 /* no stderr equivalent for these ones */
368 } else
369 #endif
370 {
371 postEventGcStats(cap, heap_capset, gen,
372 copied, slop, fragmentation,
373 par_n_threads, par_max_copied,
374 par_tot_copied, par_balanced_copied);
375 }
376 }
377
378 void traceCapEvent_ (Capability *cap,
379 EventTypeNum tag)
380 {
381 #if defined(DEBUG)
382 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
383 ACQUIRE_LOCK(&trace_utx);
384
385 tracePreface();
386 switch (tag) {
387 case EVENT_CAP_CREATE: // (cap)
388 debugBelch("cap %d: initialised\n", cap->no);
389 break;
390 case EVENT_CAP_DELETE: // (cap)
391 debugBelch("cap %d: shutting down\n", cap->no);
392 break;
393 case EVENT_CAP_ENABLE: // (cap)
394 debugBelch("cap %d: enabling capability\n", cap->no);
395 break;
396 case EVENT_CAP_DISABLE: // (cap)
397 debugBelch("cap %d: disabling capability\n", cap->no);
398 break;
399 }
400 RELEASE_LOCK(&trace_utx);
401 } else
402 #endif
403 {
404 if (eventlog_enabled) {
405 postCapEvent(tag, (EventCapNo)cap->no);
406 }
407 }
408 }
409
410 void traceCapsetEvent_ (EventTypeNum tag,
411 CapsetID capset,
412 StgWord info)
413 {
414 #if defined(DEBUG)
415 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR && TRACE_sched)
416 // When events go to stderr, it is annoying to see the capset
417 // events every time, so we only emit them with -Ds.
418 {
419 ACQUIRE_LOCK(&trace_utx);
420
421 tracePreface();
422 switch (tag) {
423 case EVENT_CAPSET_CREATE: // (capset, capset_type)
424 debugBelch("created capset %" FMT_Word32 " of type %d\n", capset,
425 (int)info);
426 break;
427 case EVENT_CAPSET_DELETE: // (capset)
428 debugBelch("deleted capset %" FMT_Word32 "\n", capset);
429 break;
430 case EVENT_CAPSET_ASSIGN_CAP: // (capset, capno)
431 debugBelch("assigned cap %" FMT_Word " to capset %" FMT_Word32 "\n",
432 info, capset);
433 break;
434 case EVENT_CAPSET_REMOVE_CAP: // (capset, capno)
435 debugBelch("removed cap %" FMT_Word " from capset %" FMT_Word32
436 "\n", info, capset);
437 break;
438 }
439 RELEASE_LOCK(&trace_utx);
440 } else
441 #endif
442 {
443 if (eventlog_enabled) {
444 postCapsetEvent(tag, capset, info);
445 }
446 }
447 }
448
449 void traceWallClockTime_(void) {
450 if (eventlog_enabled) {
451 postWallClockTime(CAPSET_CLOCKDOMAIN_DEFAULT);
452 }
453 }
454
455 void traceOSProcessInfo_(void) {
456 if (eventlog_enabled) {
457 postCapsetEvent(EVENT_OSPROCESS_PID,
458 CAPSET_OSPROCESS_DEFAULT,
459 getpid());
460
461 #if !defined (mingw32_HOST_OS)
462 /* Windows has no strong concept of process hierarchy, so no getppid().
463 * In any case, this trace event is mainly useful for tracing programs
464 * that use 'forkProcess' which Windows doesn't support anyway.
465 */
466 postCapsetEvent(EVENT_OSPROCESS_PPID,
467 CAPSET_OSPROCESS_DEFAULT,
468 getppid());
469 #endif
470 {
471 char buf[256];
472 snprintf(buf, sizeof(buf), "GHC-%s %s", ProjectVersion, RtsWay);
473 postCapsetStrEvent(EVENT_RTS_IDENTIFIER,
474 CAPSET_OSPROCESS_DEFAULT,
475 buf);
476 }
477 {
478 int argc = 0; char **argv;
479 getFullProgArgv(&argc, &argv);
480 if (argc != 0) {
481 postCapsetVecEvent(EVENT_PROGRAM_ARGS,
482 CAPSET_OSPROCESS_DEFAULT,
483 argc, argv);
484 }
485 }
486 }
487 }
488
489 #if defined(DEBUG)
490 static void traceSparkEvent_stderr (Capability *cap, EventTypeNum tag,
491 StgWord info1)
492 {
493 ACQUIRE_LOCK(&trace_utx);
494
495 tracePreface();
496 switch (tag) {
497
498 case EVENT_CREATE_SPARK_THREAD: // (cap, spark_thread)
499 debugBelch("cap %d: creating spark thread %lu\n",
500 cap->no, (long)info1);
501 break;
502 case EVENT_SPARK_CREATE: // (cap)
503 debugBelch("cap %d: added spark to pool\n",
504 cap->no);
505 break;
506 case EVENT_SPARK_DUD: // (cap)
507 debugBelch("cap %d: discarded dud spark\n",
508 cap->no);
509 break;
510 case EVENT_SPARK_OVERFLOW: // (cap)
511 debugBelch("cap %d: discarded overflowed spark\n",
512 cap->no);
513 break;
514 case EVENT_SPARK_RUN: // (cap)
515 debugBelch("cap %d: running a spark\n",
516 cap->no);
517 break;
518 case EVENT_SPARK_STEAL: // (cap, victim_cap)
519 debugBelch("cap %d: stealing a spark from cap %d\n",
520 cap->no, (int)info1);
521 break;
522 case EVENT_SPARK_FIZZLE: // (cap)
523 debugBelch("cap %d: fizzled spark removed from pool\n",
524 cap->no);
525 break;
526 case EVENT_SPARK_GC: // (cap)
527 debugBelch("cap %d: GCd spark removed from pool\n",
528 cap->no);
529 break;
530 default:
531 barf("traceSparkEvent: unknown event tag %d", tag);
532 break;
533 }
534
535 RELEASE_LOCK(&trace_utx);
536 }
537 #endif
538
539 void traceSparkEvent_ (Capability *cap, EventTypeNum tag, StgWord info1)
540 {
541 #if defined(DEBUG)
542 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
543 traceSparkEvent_stderr(cap, tag, info1);
544 } else
545 #endif
546 {
547 postSparkEvent(cap,tag,info1);
548 }
549 }
550
551 void traceSparkCounters_ (Capability *cap,
552 SparkCounters counters,
553 StgWord remaining)
554 {
555 #if defined(DEBUG)
556 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
557 /* we currently don't do debug tracing of spark stats but we must
558 test for TRACE_STDERR because of the !eventlog_enabled case. */
559 } else
560 #endif
561 {
562 postSparkCountersEvent(cap, counters, remaining);
563 }
564 }
565
566 void traceTaskCreate_ (Task *task,
567 Capability *cap)
568 {
569 #if defined(DEBUG)
570 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
571 /* We currently don't do debug tracing of tasks but we must
572 test for TRACE_STDERR because of the !eventlog_enabled case. */
573 } else
574 #endif
575 {
576 EventTaskId taskid = serialisableTaskId(task);
577 EventKernelThreadId tid = kernelThreadId();
578 postTaskCreateEvent(taskid, cap->no, tid);
579 }
580 }
581
582 void traceTaskMigrate_ (Task *task,
583 Capability *cap,
584 Capability *new_cap)
585 {
586 #if defined(DEBUG)
587 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
588 /* We currently don't do debug tracing of tasks but we must
589 test for TRACE_STDERR because of the !eventlog_enabled case. */
590 } else
591 #endif
592 {
593 EventTaskId taskid = serialisableTaskId(task);
594 postTaskMigrateEvent(taskid, cap->no, new_cap->no);
595 }
596 }
597
598 void traceTaskDelete_ (Task *task)
599 {
600 #if defined(DEBUG)
601 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
602 /* We currently don't do debug tracing of tasks but we must
603 test for TRACE_STDERR because of the !eventlog_enabled case. */
604 } else
605 #endif
606 {
607 EventTaskId taskid = serialisableTaskId(task);
608 postTaskDeleteEvent(taskid);
609 }
610 }
611
612 void traceHeapProfBegin(StgWord8 profile_id)
613 {
614 if (eventlog_enabled) {
615 postHeapProfBegin(profile_id);
616 }
617 }
618
619 void traceHeapProfSampleBegin(StgInt era)
620 {
621 if (eventlog_enabled) {
622 postHeapProfSampleBegin(era);
623 }
624 }
625
626 void traceHeapProfSampleString(StgWord8 profile_id,
627 const char *label, StgWord residency)
628 {
629 if (eventlog_enabled) {
630 postHeapProfSampleString(profile_id, label, residency);
631 }
632 }
633
634 #if defined(PROFILING)
635 void traceHeapProfCostCentre(StgWord32 ccID,
636 const char *label,
637 const char *module,
638 const char *srcloc,
639 StgBool is_caf)
640 {
641 if (eventlog_enabled) {
642 postHeapProfCostCentre(ccID, label, module, srcloc, is_caf);
643 }
644 }
645
646 void traceHeapProfSampleCostCentre(StgWord8 profile_id,
647 CostCentreStack *stack, StgWord residency)
648 {
649 if (eventlog_enabled) {
650 postHeapProfSampleCostCentre(profile_id, stack, residency);
651 }
652 }
653 #endif
654
655 #if defined(DEBUG)
656 static void vtraceCap_stderr(Capability *cap, char *msg, va_list ap)
657 {
658 ACQUIRE_LOCK(&trace_utx);
659
660 tracePreface();
661 debugBelch("cap %d: ", cap->no);
662 vdebugBelch(msg,ap);
663 debugBelch("\n");
664
665 RELEASE_LOCK(&trace_utx);
666 }
667
668 static void traceCap_stderr(Capability *cap, char *msg, ...)
669 {
670 va_list ap;
671 va_start(ap,msg);
672 vtraceCap_stderr(cap, msg, ap);
673 va_end(ap);
674 }
675 #endif
676
677 void traceCap_(Capability *cap, char *msg, ...)
678 {
679 va_list ap;
680 va_start(ap,msg);
681
682 #if defined(DEBUG)
683 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
684 vtraceCap_stderr(cap, msg, ap);
685 } else
686 #endif
687 {
688 postCapMsg(cap, msg, ap);
689 }
690
691 va_end(ap);
692 }
693
694 #if defined(DEBUG)
695 static void vtrace_stderr(char *msg, va_list ap)
696 {
697 ACQUIRE_LOCK(&trace_utx);
698
699 tracePreface();
700 vdebugBelch(msg,ap);
701 debugBelch("\n");
702
703 RELEASE_LOCK(&trace_utx);
704 }
705 #endif
706
707 void trace_(char *msg, ...)
708 {
709 va_list ap;
710 va_start(ap,msg);
711
712 #if defined(DEBUG)
713 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
714 vtrace_stderr(msg, ap);
715 } else
716 #endif
717 {
718 postMsg(msg, ap);
719 }
720
721 va_end(ap);
722 }
723
724 void traceUserMsg(Capability *cap, char *msg)
725 {
726 /* Note: normally we don't check the TRACE_* flags here as they're checked
727 by the wrappers in Trace.h. But traceUserMsg is special since it has no
728 wrapper (it's called from cmm code), so we check TRACE_user here
729 */
730 #if defined(DEBUG)
731 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR && TRACE_user) {
732 // Use "%s" as format string to ignore format specifiers in msg (#3874).
733 traceCap_stderr(cap, "%s", msg);
734 } else
735 #endif
736 {
737 if (eventlog_enabled && TRACE_user) {
738 postUserEvent(cap, EVENT_USER_MSG, msg);
739 }
740 }
741 dtraceUserMsg(cap->no, msg);
742 }
743
744 void traceUserBinaryMsg(Capability *cap, uint8_t *msg, size_t size)
745 {
746 /* Note: normally we don't check the TRACE_* flags here as they're checked
747 by the wrappers in Trace.h. But traceUserMsg is special since it has no
748 wrapper (it's called from cmm code), so we check TRACE_user here
749 */
750 if (eventlog_enabled && TRACE_user) {
751 postUserBinaryEvent(cap, EVENT_USER_BINARY_MSG, msg, size);
752 }
753 }
754
755 void traceUserMarker(Capability *cap, char *markername)
756 {
757 /* Note: traceUserMarker is special since it has no wrapper (it's called
758 from cmm code), so we check eventlog_enabled and TRACE_user here.
759 */
760 #if defined(DEBUG)
761 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR && TRACE_user) {
762 traceCap_stderr(cap, "User marker: %s", markername);
763 } else
764 #endif
765 {
766 if (eventlog_enabled && TRACE_user) {
767 postUserEvent(cap, EVENT_USER_MARKER, markername);
768 }
769 }
770 dtraceUserMarker(cap->no, markername);
771 }
772
773
774 void traceThreadLabel_(Capability *cap,
775 StgTSO *tso,
776 char *label)
777 {
778 #if defined(DEBUG)
779 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
780 ACQUIRE_LOCK(&trace_utx);
781 tracePreface();
782 debugBelch("cap %d: thread %" FMT_Word " has label %s\n",
783 cap->no, (W_)tso->id, label);
784 RELEASE_LOCK(&trace_utx);
785 } else
786 #endif
787 {
788 postThreadLabel(cap, tso->id, label);
789 }
790 }
791
792 void traceThreadStatus_ (StgTSO *tso USED_IF_DEBUG)
793 {
794 #if defined(DEBUG)
795 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
796 printThreadStatus(tso);
797 } else
798 #endif
799 {
800 /* nothing - no event for this one yet */
801 }
802 }
803
804 #if defined(DEBUG)
805 void traceBegin (const char *str, ...)
806 {
807 va_list ap;
808 va_start(ap,str);
809
810 ACQUIRE_LOCK(&trace_utx);
811
812 tracePreface();
813 vdebugBelch(str,ap);
814 va_end(ap);
815 }
816
817 void traceEnd (void)
818 {
819 debugBelch("\n");
820 RELEASE_LOCK(&trace_utx);
821 }
822 #endif /* DEBUG */
823
824 #endif /* TRACING */
825
826 // If DTRACE is enabled, but neither DEBUG nor TRACING, we need a C land
827 // wrapper for the user-msg probe (as we can't expand that in PrimOps.cmm)
828 //
829 #if !defined(DEBUG) && !defined(TRACING) && defined(DTRACE)
830
831 void dtraceUserMsgWrapper(Capability *cap, char *msg)
832 {
833 dtraceUserMsg(cap->no, msg);
834 }
835
836 void dtraceUserMarkerWrapper(Capability *cap, char *msg)
837 {
838 dtraceUserMarker(cap->no, msg);
839 }
840
841 #endif /* !defined(DEBUG) && !defined(TRACING) && defined(DTRACE) */