A bunch of typofixes
[ghc.git] / rts / Trace.c
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team 2006-2009
4 *
5 * Debug and performance tracing
6 *
7 * ---------------------------------------------------------------------------*/
8
9 // external headers
10 #include "Rts.h"
11
12 // internal headers
13 #include "Trace.h"
14
15 #if defined(TRACING)
16
17 #include "GetTime.h"
18 #include "GetEnv.h"
19 #include "Stats.h"
20 #include "eventlog/EventLog.h"
21 #include "rts/EventLogWriter.h"
22 #include "Threads.h"
23 #include "Printer.h"
24 #include "RtsFlags.h"
25
26 #if defined(HAVE_UNISTD_H)
27 #include <unistd.h>
28 #endif
29
30 // events
31 int TRACE_sched;
32 int TRACE_gc;
33 int TRACE_spark_sampled;
34 int TRACE_spark_full;
35 int TRACE_user;
36 int TRACE_cap;
37
38 #if defined(THREADED_RTS)
39 static Mutex trace_utx;
40 #endif
41
42 static bool eventlog_enabled;
43
44 /* ---------------------------------------------------------------------------
45 Starting up / shutting down the tracing facilities
46 --------------------------------------------------------------------------- */
47
48 static const EventLogWriter *getEventLogWriter(void)
49 {
50 return rtsConfig.eventlog_writer;
51 }
52
53 void initTracing (void)
54 {
55 const EventLogWriter *eventlog_writer = getEventLogWriter();
56
57 #if defined(THREADED_RTS)
58 initMutex(&trace_utx);
59 #endif
60
61 // -Ds turns on scheduler tracing too
62 TRACE_sched =
63 RtsFlags.TraceFlags.scheduler ||
64 RtsFlags.DebugFlags.scheduler;
65
66 // -Dg turns on gc tracing too
67 TRACE_gc =
68 RtsFlags.TraceFlags.gc ||
69 RtsFlags.DebugFlags.gc ||
70 RtsFlags.DebugFlags.scheduler;
71 if (TRACE_gc && RtsFlags.GcFlags.giveStats == NO_GC_STATS) {
72 RtsFlags.GcFlags.giveStats = COLLECT_GC_STATS;
73 }
74
75 TRACE_spark_sampled =
76 RtsFlags.TraceFlags.sparks_sampled;
77
78 // -Dr turns on full spark tracing
79 TRACE_spark_full =
80 RtsFlags.TraceFlags.sparks_full ||
81 RtsFlags.DebugFlags.sparks;
82
83 TRACE_user =
84 RtsFlags.TraceFlags.user;
85
86 // We trace cap events if we're tracing anything else
87 TRACE_cap =
88 TRACE_sched ||
89 TRACE_gc ||
90 TRACE_spark_sampled ||
91 TRACE_spark_full ||
92 TRACE_user;
93
94 eventlog_enabled = RtsFlags.TraceFlags.tracing == TRACE_EVENTLOG &&
95 eventlog_writer != NULL;
96
97 /* Note: we can have any of the TRACE_* flags turned on even when
98 eventlog_enabled is off. In the DEBUG way we may be tracing to stderr.
99 */
100
101 if (eventlog_enabled) {
102 initEventLogging(eventlog_writer);
103 }
104 }
105
106 void endTracing (void)
107 {
108 if (eventlog_enabled) {
109 endEventLogging();
110 }
111 }
112
113 void freeTracing (void)
114 {
115 if (eventlog_enabled) {
116 freeEventLogging();
117 }
118 }
119
120 void resetTracing (void)
121 {
122 const EventLogWriter *eventlog_writer;
123 eventlog_writer = getEventLogWriter();
124
125 if (eventlog_enabled) {
126 abortEventLogging(); // abort eventlog inherited from parent
127 if (eventlog_writer != NULL) {
128 initEventLogging(eventlog_writer); // child starts its own eventlog
129 }
130 }
131 }
132
133 void tracingAddCapapilities (uint32_t from, uint32_t to)
134 {
135 if (eventlog_enabled) {
136 moreCapEventBufs(from,to);
137 }
138 }
139
140 /* ---------------------------------------------------------------------------
141 Emitting trace messages/events
142 --------------------------------------------------------------------------- */
143
144 #if defined(DEBUG)
145 static void tracePreface (void)
146 {
147 #if defined(THREADED_RTS)
148 debugBelch("%12lx: ", (unsigned long)osThreadId());
149 #endif
150 if (RtsFlags.TraceFlags.timestamp) {
151 debugBelch("%9" FMT_Word64 ": ", stat_getElapsedTime());
152 }
153 }
154 #endif
155
156 #if defined(DEBUG)
157 static char *thread_stop_reasons[] = {
158 [HeapOverflow] = "heap overflow",
159 [StackOverflow] = "stack overflow",
160 [ThreadYielding] = "yielding",
161 [ThreadBlocked] = "blocked",
162 [ThreadFinished] = "finished",
163 [THREAD_SUSPENDED_FOREIGN_CALL] = "suspended while making a foreign call",
164 [6 + BlockedOnMVar] = "blocked on an MVar",
165 [6 + BlockedOnMVarRead] = "blocked on an atomic MVar read",
166 [6 + BlockedOnBlackHole] = "blocked on a black hole",
167 [6 + BlockedOnRead] = "blocked on a read operation",
168 [6 + BlockedOnWrite] = "blocked on a write operation",
169 [6 + BlockedOnDelay] = "blocked on a delay operation",
170 [6 + BlockedOnSTM] = "blocked on STM",
171 [6 + BlockedOnDoProc] = "blocked on asyncDoProc",
172 [6 + BlockedOnCCall] = "blocked on a foreign call",
173 [6 + BlockedOnCCall_Interruptible] = "blocked on a foreign call (interruptible)",
174 [6 + BlockedOnMsgThrowTo] = "blocked on throwTo",
175 [6 + ThreadMigrating] = "migrating"
176 };
177 #endif
178
179 #if defined(DEBUG)
180 static void traceSchedEvent_stderr (Capability *cap, EventTypeNum tag,
181 StgTSO *tso,
182 StgWord info1 STG_UNUSED,
183 StgWord info2 STG_UNUSED)
184 {
185 ACQUIRE_LOCK(&trace_utx);
186
187 tracePreface();
188 switch (tag) {
189 case EVENT_CREATE_THREAD: // (cap, thread)
190 debugBelch("cap %d: created thread %" FMT_Word "\n",
191 cap->no, (W_)tso->id);
192 break;
193 case EVENT_RUN_THREAD: // (cap, thread)
194 debugBelch("cap %d: running thread %" FMT_Word " (%s)\n",
195 cap->no, (W_)tso->id, what_next_strs[tso->what_next]);
196 break;
197 case EVENT_THREAD_RUNNABLE: // (cap, thread)
198 debugBelch("cap %d: thread %" FMT_Word " appended to run queue\n",
199 cap->no, (W_)tso->id);
200 break;
201 case EVENT_MIGRATE_THREAD: // (cap, thread, new_cap)
202 debugBelch("cap %d: thread %" FMT_Word " migrating to cap %d\n",
203 cap->no, (W_)tso->id, (int)info1);
204 break;
205 case EVENT_THREAD_WAKEUP: // (cap, thread, info1_cap)
206 debugBelch("cap %d: waking up thread %" FMT_Word " on cap %d\n",
207 cap->no, (W_)tso->id, (int)info1);
208 break;
209
210 case EVENT_STOP_THREAD: // (cap, thread, status)
211 if (info1 == 6 + BlockedOnBlackHole) {
212 debugBelch("cap %d: thread %" FMT_Word " stopped (blocked on black hole owned by thread %lu)\n",
213 cap->no, (W_)tso->id, (long)info2);
214 } else {
215 debugBelch("cap %d: thread %" FMT_Word " stopped (%s)\n",
216 cap->no, (W_)tso->id, thread_stop_reasons[info1]);
217 }
218 break;
219 default:
220 debugBelch("cap %d: thread %" FMT_Word ": event %d\n\n",
221 cap->no, (W_)tso->id, tag);
222 break;
223 }
224
225 RELEASE_LOCK(&trace_utx);
226 }
227 #endif
228
229 void traceSchedEvent_ (Capability *cap, EventTypeNum tag,
230 StgTSO *tso, StgWord info1, StgWord info2)
231 {
232 #if defined(DEBUG)
233 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
234 traceSchedEvent_stderr(cap, tag, tso, info1, info2);
235 } else
236 #endif
237 {
238 postSchedEvent(cap,tag,tso ? tso->id : 0, info1, info2);
239 }
240 }
241
242 #if defined(DEBUG)
243 static void traceGcEvent_stderr (Capability *cap, EventTypeNum tag)
244 {
245 ACQUIRE_LOCK(&trace_utx);
246
247 tracePreface();
248 switch (tag) {
249 case EVENT_REQUEST_SEQ_GC: // (cap)
250 debugBelch("cap %d: requesting sequential GC\n", cap->no);
251 break;
252 case EVENT_REQUEST_PAR_GC: // (cap)
253 debugBelch("cap %d: requesting parallel GC\n", cap->no);
254 break;
255 case EVENT_GC_START: // (cap)
256 debugBelch("cap %d: starting GC\n", cap->no);
257 break;
258 case EVENT_GC_END: // (cap)
259 debugBelch("cap %d: finished GC\n", cap->no);
260 break;
261 case EVENT_GC_IDLE: // (cap)
262 debugBelch("cap %d: GC idle\n", cap->no);
263 break;
264 case EVENT_GC_WORK: // (cap)
265 debugBelch("cap %d: GC working\n", cap->no);
266 break;
267 case EVENT_GC_DONE: // (cap)
268 debugBelch("cap %d: GC done\n", cap->no);
269 break;
270 case EVENT_GC_GLOBAL_SYNC: // (cap)
271 debugBelch("cap %d: all caps stopped for GC\n", cap->no);
272 break;
273 default:
274 barf("traceGcEvent: unknown event tag %d", tag);
275 break;
276 }
277
278 RELEASE_LOCK(&trace_utx);
279 }
280 #endif
281
282 void traceGcEvent_ (Capability *cap, EventTypeNum tag)
283 {
284 #if defined(DEBUG)
285 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
286 traceGcEvent_stderr(cap, tag);
287 } else
288 #endif
289 {
290 /* currently all GC events are nullary events */
291 postEvent(cap, tag);
292 }
293 }
294
295 void traceGcEventAtT_ (Capability *cap, StgWord64 ts, EventTypeNum tag)
296 {
297 #if defined(DEBUG)
298 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
299 traceGcEvent_stderr(cap, tag);
300 } else
301 #endif
302 {
303 /* assuming nullary events and explicitly inserting a timestamp */
304 postEventAtTimestamp(cap, ts, tag);
305 }
306 }
307
308 void traceHeapEvent_ (Capability *cap,
309 EventTypeNum tag,
310 CapsetID heap_capset,
311 W_ info1)
312 {
313 #if defined(DEBUG)
314 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
315 /* no stderr equivalent for these ones */
316 } else
317 #endif
318 {
319 postHeapEvent(cap, tag, heap_capset, info1);
320 }
321 }
322
323 void traceEventHeapInfo_ (CapsetID heap_capset,
324 uint32_t gens,
325 W_ maxHeapSize,
326 W_ allocAreaSize,
327 W_ mblockSize,
328 W_ blockSize)
329 {
330 #if defined(DEBUG)
331 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
332 /* no stderr equivalent for these ones */
333 } else
334 #endif
335 {
336 postEventHeapInfo(heap_capset, gens,
337 maxHeapSize, allocAreaSize,
338 mblockSize, blockSize);
339 }
340 }
341
342 void traceEventGcStats_ (Capability *cap,
343 CapsetID heap_capset,
344 uint32_t gen,
345 W_ copied,
346 W_ slop,
347 W_ fragmentation,
348 uint32_t par_n_threads,
349 W_ par_max_copied,
350 W_ par_tot_copied,
351 W_ par_balanced_copied)
352 {
353 #if defined(DEBUG)
354 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
355 /* no stderr equivalent for these ones */
356 } else
357 #endif
358 {
359 postEventGcStats(cap, heap_capset, gen,
360 copied, slop, fragmentation,
361 par_n_threads, par_max_copied,
362 par_tot_copied, par_balanced_copied);
363 }
364 }
365
366 void traceCapEvent_ (Capability *cap,
367 EventTypeNum tag)
368 {
369 #if defined(DEBUG)
370 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
371 ACQUIRE_LOCK(&trace_utx);
372
373 tracePreface();
374 switch (tag) {
375 case EVENT_CAP_CREATE: // (cap)
376 debugBelch("cap %d: initialised\n", cap->no);
377 break;
378 case EVENT_CAP_DELETE: // (cap)
379 debugBelch("cap %d: shutting down\n", cap->no);
380 break;
381 case EVENT_CAP_ENABLE: // (cap)
382 debugBelch("cap %d: enabling capability\n", cap->no);
383 break;
384 case EVENT_CAP_DISABLE: // (cap)
385 debugBelch("cap %d: disabling capability\n", cap->no);
386 break;
387 }
388 RELEASE_LOCK(&trace_utx);
389 } else
390 #endif
391 {
392 if (eventlog_enabled) {
393 postCapEvent(tag, (EventCapNo)cap->no);
394 }
395 }
396 }
397
398 void traceCapsetEvent_ (EventTypeNum tag,
399 CapsetID capset,
400 StgWord info)
401 {
402 #if defined(DEBUG)
403 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR && TRACE_sched)
404 // When events go to stderr, it is annoying to see the capset
405 // events every time, so we only emit them with -Ds.
406 {
407 ACQUIRE_LOCK(&trace_utx);
408
409 tracePreface();
410 switch (tag) {
411 case EVENT_CAPSET_CREATE: // (capset, capset_type)
412 debugBelch("created capset %" FMT_Word32 " of type %d\n", capset,
413 (int)info);
414 break;
415 case EVENT_CAPSET_DELETE: // (capset)
416 debugBelch("deleted capset %" FMT_Word32 "\n", capset);
417 break;
418 case EVENT_CAPSET_ASSIGN_CAP: // (capset, capno)
419 debugBelch("assigned cap %" FMT_Word " to capset %" FMT_Word32 "\n",
420 info, capset);
421 break;
422 case EVENT_CAPSET_REMOVE_CAP: // (capset, capno)
423 debugBelch("removed cap %" FMT_Word " from capset %" FMT_Word32
424 "\n", info, capset);
425 break;
426 }
427 RELEASE_LOCK(&trace_utx);
428 } else
429 #endif
430 {
431 if (eventlog_enabled) {
432 postCapsetEvent(tag, capset, info);
433 }
434 }
435 }
436
437 void traceWallClockTime_(void) {
438 if (eventlog_enabled) {
439 postWallClockTime(CAPSET_CLOCKDOMAIN_DEFAULT);
440 }
441 }
442
443 void traceOSProcessInfo_(void) {
444 if (eventlog_enabled) {
445 postCapsetEvent(EVENT_OSPROCESS_PID,
446 CAPSET_OSPROCESS_DEFAULT,
447 getpid());
448
449 #if !defined (mingw32_HOST_OS)
450 /* Windows has no strong concept of process hierarchy, so no getppid().
451 * In any case, this trace event is mainly useful for tracing programs
452 * that use 'forkProcess' which Windows doesn't support anyway.
453 */
454 postCapsetEvent(EVENT_OSPROCESS_PPID,
455 CAPSET_OSPROCESS_DEFAULT,
456 getppid());
457 #endif
458 {
459 char buf[256];
460 snprintf(buf, sizeof(buf), "GHC-%s %s", ProjectVersion, RtsWay);
461 postCapsetStrEvent(EVENT_RTS_IDENTIFIER,
462 CAPSET_OSPROCESS_DEFAULT,
463 buf);
464 }
465 {
466 int argc = 0; char **argv;
467 getFullProgArgv(&argc, &argv);
468 if (argc != 0) {
469 postCapsetVecEvent(EVENT_PROGRAM_ARGS,
470 CAPSET_OSPROCESS_DEFAULT,
471 argc, argv);
472 }
473 }
474 {
475 int envc = 0; char **envv;
476 getProgEnvv(&envc, &envv);
477 if (envc != 0) {
478 postCapsetVecEvent(EVENT_PROGRAM_ENV,
479 CAPSET_OSPROCESS_DEFAULT,
480 envc, envv);
481 }
482 freeProgEnvv(envc, envv);
483 }
484 }
485 }
486
487 #if defined(DEBUG)
488 static void traceSparkEvent_stderr (Capability *cap, EventTypeNum tag,
489 StgWord info1)
490 {
491 ACQUIRE_LOCK(&trace_utx);
492
493 tracePreface();
494 switch (tag) {
495
496 case EVENT_CREATE_SPARK_THREAD: // (cap, spark_thread)
497 debugBelch("cap %d: creating spark thread %lu\n",
498 cap->no, (long)info1);
499 break;
500 case EVENT_SPARK_CREATE: // (cap)
501 debugBelch("cap %d: added spark to pool\n",
502 cap->no);
503 break;
504 case EVENT_SPARK_DUD: // (cap)
505 debugBelch("cap %d: discarded dud spark\n",
506 cap->no);
507 break;
508 case EVENT_SPARK_OVERFLOW: // (cap)
509 debugBelch("cap %d: discarded overflowed spark\n",
510 cap->no);
511 break;
512 case EVENT_SPARK_RUN: // (cap)
513 debugBelch("cap %d: running a spark\n",
514 cap->no);
515 break;
516 case EVENT_SPARK_STEAL: // (cap, victim_cap)
517 debugBelch("cap %d: stealing a spark from cap %d\n",
518 cap->no, (int)info1);
519 break;
520 case EVENT_SPARK_FIZZLE: // (cap)
521 debugBelch("cap %d: fizzled spark removed from pool\n",
522 cap->no);
523 break;
524 case EVENT_SPARK_GC: // (cap)
525 debugBelch("cap %d: GCd spark removed from pool\n",
526 cap->no);
527 break;
528 default:
529 barf("traceSparkEvent: unknown event tag %d", tag);
530 break;
531 }
532
533 RELEASE_LOCK(&trace_utx);
534 }
535 #endif
536
537 void traceSparkEvent_ (Capability *cap, EventTypeNum tag, StgWord info1)
538 {
539 #if defined(DEBUG)
540 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
541 traceSparkEvent_stderr(cap, tag, info1);
542 } else
543 #endif
544 {
545 postSparkEvent(cap,tag,info1);
546 }
547 }
548
549 void traceSparkCounters_ (Capability *cap,
550 SparkCounters counters,
551 StgWord remaining)
552 {
553 #if defined(DEBUG)
554 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
555 /* we currently don't do debug tracing of spark stats but we must
556 test for TRACE_STDERR because of the !eventlog_enabled case. */
557 } else
558 #endif
559 {
560 postSparkCountersEvent(cap, counters, remaining);
561 }
562 }
563
564 void traceTaskCreate_ (Task *task,
565 Capability *cap)
566 {
567 #if defined(DEBUG)
568 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
569 /* We currently don't do debug tracing of tasks but we must
570 test for TRACE_STDERR because of the !eventlog_enabled case. */
571 } else
572 #endif
573 {
574 EventTaskId taskid = serialisableTaskId(task);
575 EventKernelThreadId tid = kernelThreadId();
576 postTaskCreateEvent(taskid, cap->no, tid);
577 }
578 }
579
580 void traceTaskMigrate_ (Task *task,
581 Capability *cap,
582 Capability *new_cap)
583 {
584 #if defined(DEBUG)
585 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
586 /* We currently don't do debug tracing of tasks but we must
587 test for TRACE_STDERR because of the !eventlog_enabled case. */
588 } else
589 #endif
590 {
591 EventTaskId taskid = serialisableTaskId(task);
592 postTaskMigrateEvent(taskid, cap->no, new_cap->no);
593 }
594 }
595
596 void traceTaskDelete_ (Task *task)
597 {
598 #if defined(DEBUG)
599 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
600 /* We currently don't do debug tracing of tasks but we must
601 test for TRACE_STDERR because of the !eventlog_enabled case. */
602 } else
603 #endif
604 {
605 EventTaskId taskid = serialisableTaskId(task);
606 postTaskDeleteEvent(taskid);
607 }
608 }
609
610 void traceHeapProfBegin(StgWord8 profile_id)
611 {
612 if (eventlog_enabled) {
613 postHeapProfBegin(profile_id);
614 }
615 }
616
617 void traceHeapProfSampleBegin(StgInt era)
618 {
619 if (eventlog_enabled) {
620 postHeapProfSampleBegin(era);
621 }
622 }
623
624 void traceHeapProfSampleString(StgWord8 profile_id,
625 const char *label, StgWord residency)
626 {
627 if (eventlog_enabled) {
628 postHeapProfSampleString(profile_id, label, residency);
629 }
630 }
631
632 #if defined(PROFILING)
633 void traceHeapProfCostCentre(StgWord32 ccID,
634 const char *label,
635 const char *module,
636 const char *srcloc,
637 StgBool is_caf)
638 {
639 if (eventlog_enabled) {
640 postHeapProfCostCentre(ccID, label, module, srcloc, is_caf);
641 }
642 }
643
644 void traceHeapProfSampleCostCentre(StgWord8 profile_id,
645 CostCentreStack *stack, StgWord residency)
646 {
647 if (eventlog_enabled) {
648 postHeapProfSampleCostCentre(profile_id, stack, residency);
649 }
650 }
651 #endif
652
653 #if defined(DEBUG)
654 static void vtraceCap_stderr(Capability *cap, char *msg, va_list ap)
655 {
656 ACQUIRE_LOCK(&trace_utx);
657
658 tracePreface();
659 debugBelch("cap %d: ", cap->no);
660 vdebugBelch(msg,ap);
661 debugBelch("\n");
662
663 RELEASE_LOCK(&trace_utx);
664 }
665
666 static void traceCap_stderr(Capability *cap, char *msg, ...)
667 {
668 va_list ap;
669 va_start(ap,msg);
670 vtraceCap_stderr(cap, msg, ap);
671 va_end(ap);
672 }
673 #endif
674
675 void traceCap_(Capability *cap, char *msg, ...)
676 {
677 va_list ap;
678 va_start(ap,msg);
679
680 #if defined(DEBUG)
681 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
682 vtraceCap_stderr(cap, msg, ap);
683 } else
684 #endif
685 {
686 postCapMsg(cap, msg, ap);
687 }
688
689 va_end(ap);
690 }
691
692 #if defined(DEBUG)
693 static void vtrace_stderr(char *msg, va_list ap)
694 {
695 ACQUIRE_LOCK(&trace_utx);
696
697 tracePreface();
698 vdebugBelch(msg,ap);
699 debugBelch("\n");
700
701 RELEASE_LOCK(&trace_utx);
702 }
703 #endif
704
705 void trace_(char *msg, ...)
706 {
707 va_list ap;
708 va_start(ap,msg);
709
710 #if defined(DEBUG)
711 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
712 vtrace_stderr(msg, ap);
713 } else
714 #endif
715 {
716 postMsg(msg, ap);
717 }
718
719 va_end(ap);
720 }
721
722 void traceUserMsg(Capability *cap, char *msg)
723 {
724 /* Note: normally we don't check the TRACE_* flags here as they're checked
725 by the wrappers in Trace.h. But traceUserMsg is special since it has no
726 wrapper (it's called from cmm code), so we check TRACE_user here
727 */
728 #if defined(DEBUG)
729 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR && TRACE_user) {
730 // Use "%s" as format string to ignore format specifiers in msg (#3874).
731 traceCap_stderr(cap, "%s", msg);
732 } else
733 #endif
734 {
735 if (eventlog_enabled && TRACE_user) {
736 postUserEvent(cap, EVENT_USER_MSG, msg);
737 }
738 }
739 dtraceUserMsg(cap->no, msg);
740 }
741
742 void traceUserMarker(Capability *cap, char *markername)
743 {
744 /* Note: traceUserMarker is special since it has no wrapper (it's called
745 from cmm code), so we check eventlog_enabled and TRACE_user here.
746 */
747 #if defined(DEBUG)
748 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR && TRACE_user) {
749 traceCap_stderr(cap, "User marker: %s", markername);
750 } else
751 #endif
752 {
753 if (eventlog_enabled && TRACE_user) {
754 postUserEvent(cap, EVENT_USER_MARKER, markername);
755 }
756 }
757 dtraceUserMarker(cap->no, markername);
758 }
759
760
761 void traceThreadLabel_(Capability *cap,
762 StgTSO *tso,
763 char *label)
764 {
765 #if defined(DEBUG)
766 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
767 ACQUIRE_LOCK(&trace_utx);
768 tracePreface();
769 debugBelch("cap %d: thread %" FMT_Word " has label %s\n",
770 cap->no, (W_)tso->id, label);
771 RELEASE_LOCK(&trace_utx);
772 } else
773 #endif
774 {
775 postThreadLabel(cap, tso->id, label);
776 }
777 }
778
779 void traceThreadStatus_ (StgTSO *tso USED_IF_DEBUG)
780 {
781 #if defined(DEBUG)
782 if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
783 printThreadStatus(tso);
784 } else
785 #endif
786 {
787 /* nothing - no event for this one yet */
788 }
789 }
790
791 #if defined(DEBUG)
792 void traceBegin (const char *str, ...)
793 {
794 va_list ap;
795 va_start(ap,str);
796
797 ACQUIRE_LOCK(&trace_utx);
798
799 tracePreface();
800 vdebugBelch(str,ap);
801 va_end(ap);
802 }
803
804 void traceEnd (void)
805 {
806 debugBelch("\n");
807 RELEASE_LOCK(&trace_utx);
808 }
809 #endif /* DEBUG */
810
811 #endif /* TRACING */
812
813 // If DTRACE is enabled, but neither DEBUG nor TRACING, we need a C land
814 // wrapper for the user-msg probe (as we can't expand that in PrimOps.cmm)
815 //
816 #if !defined(DEBUG) && !defined(TRACING) && defined(DTRACE)
817
818 void dtraceUserMsgWrapper(Capability *cap, char *msg)
819 {
820 dtraceUserMsg(cap->no, msg);
821 }
822
823 void dtraceUserMarkerWrapper(Capability *cap, char *msg)
824 {
825 dtraceUserMarker(cap->no, msg);
826 }
827
828 #endif /* !defined(DEBUG) && !defined(TRACING) && defined(DTRACE) */