nonmoving: Upper-bound time we hold SM_MUTEX for during sweep
[ghc.git] / rts / Trace.h
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 2008-2009
4 *
5 * Support for fast binary event logging and user-space dtrace probes.
6 *
7 * ---------------------------------------------------------------------------*/
8
9 #pragma once
10
11 #include "rts/EventLogFormat.h"
12 #include "sm/NonMovingCensus.h"
13 #include "Capability.h"
14
15 #if defined(DTRACE)
16 #include "RtsProbes.h"
17 #endif /* defined(DTRACE) */
18
19 #include "BeginPrivate.h"
20
21 // -----------------------------------------------------------------------------
22 // EventLog API
23 // -----------------------------------------------------------------------------
24
25 #if defined(TRACING)
26
27 void initTracing (void);
28 void endTracing (void);
29 void freeTracing (void);
30 void resetTracing (void);
31 void tracingAddCapapilities (uint32_t from, uint32_t to);
32
33 #endif /* TRACING */
34
35 typedef StgWord32 CapsetID;
36 typedef StgWord16 CapsetType;
37 enum CapsetType { CapsetTypeCustom = CAPSET_TYPE_CUSTOM,
38 CapsetTypeOsProcess = CAPSET_TYPE_OSPROCESS,
39 CapsetTypeClockdomain = CAPSET_TYPE_CLOCKDOMAIN };
40 #define CAPSET_OSPROCESS_DEFAULT ((CapsetID)0)
41 #define CAPSET_HEAP_DEFAULT ((CapsetID)0) /* reusing the same capset */
42 #define CAPSET_CLOCKDOMAIN_DEFAULT ((CapsetID)1)
43
44 // -----------------------------------------------------------------------------
45 // Message classes
46 // -----------------------------------------------------------------------------
47
48 // shorthand for RtsFlags.DebugFlags.<blah>, useful with debugTrace()
49 #define DEBUG_sched RtsFlags.DebugFlags.scheduler
50 #define DEBUG_interp RtsFlags.DebugFlags.interp
51 #define DEBUG_weak RtsFlags.DebugFlags.weak
52 #define DEBUG_gccafs RtsFlags.DebugFlags.gccafs
53 #define DEBUG_gc RtsFlags.DebugFlags.gc
54 #define DEBUG_nonmoving_gc RtsFlags.DebugFlags.nonmoving_gc
55 #define DEBUG_block_alloc RtsFlags.DebugFlags.alloc
56 #define DEBUG_sanity RtsFlags.DebugFlags.sanity
57 #define DEBUG_zero_on_gc RtsFlags.DebugFlags.zero_on_gc
58 #define DEBUG_stable RtsFlags.DebugFlags.stable
59 #define DEBUG_stm RtsFlags.DebugFlags.stm
60 #define DEBUG_prof RtsFlags.DebugFlags.prof
61 #define DEBUG_gran RtsFlags.DebugFlags.gran
62 #define DEBUG_par RtsFlags.DebugFlags.par
63 #define DEBUG_linker RtsFlags.DebugFlags.linker
64 #define DEBUG_squeeze RtsFlags.DebugFlags.squeeze
65 #define DEBUG_hpc RtsFlags.DebugFlags.hpc
66 #define DEBUG_sparks RtsFlags.DebugFlags.sparks
67 #define DEBUG_compact RtsFlags.DebugFlags.compact
68
69 // events
70 extern int TRACE_sched;
71 extern int TRACE_gc;
72 extern int TRACE_spark_sampled;
73 extern int TRACE_spark_full;
74 /* extern int TRACE_user; */ // only used in Trace.c
75 extern int TRACE_cap;
76 extern int TRACE_nonmoving_gc;
77
78 // -----------------------------------------------------------------------------
79 // Posting events
80 //
81 // We use macros rather than inline functions deliberately. We want
82 // the not-taken case to be as efficient as possible, a simple
83 // test-and-jump, and with inline functions gcc seemed to move some of
84 // the instructions from the branch up before the test.
85 //
86 // -----------------------------------------------------------------------------
87
88 #if defined(DEBUG)
89 void traceBegin (const char *str, ...);
90 void traceEnd (void);
91 #endif
92
93 #if defined(TRACING)
94
95 /*
96 * Record a scheduler event
97 */
98 #define traceSchedEvent(cap, tag, tso, other) \
99 if (RTS_UNLIKELY(TRACE_sched)) { \
100 traceSchedEvent_(cap, tag, tso, other, 0); \
101 }
102
103 #define traceSchedEvent2(cap, tag, tso, info1, info2) \
104 if (RTS_UNLIKELY(TRACE_sched)) { \
105 traceSchedEvent_(cap, tag, tso, info1, info2); \
106 }
107
108 void traceSchedEvent_ (Capability *cap, EventTypeNum tag,
109 StgTSO *tso, StgWord info1, StgWord info2);
110
111 /*
112 * Record a GC event
113 */
114 #define traceGcEvent(cap, tag) \
115 if (RTS_UNLIKELY(TRACE_gc)) { \
116 traceGcEvent_(cap, tag); \
117 }
118
119 void traceGcEvent_ (Capability *cap, EventTypeNum tag);
120
121 /*
122 * Record a GC event at the explicitly given timestamp
123 */
124 #define traceGcEventAtT(cap, ts, tag) \
125 if (RTS_UNLIKELY(TRACE_gc)) { \
126 traceGcEventAtT_(cap, ts, tag); \
127 }
128
129 void traceGcEventAtT_ (Capability *cap, StgWord64 ts, EventTypeNum tag);
130
131 /*
132 * Record a heap event
133 */
134 #define traceHeapEvent(cap, tag, heap_capset, info1) \
135 if (RTS_UNLIKELY(TRACE_gc)) { \
136 traceHeapEvent_(cap, tag, heap_capset, info1); \
137 }
138 void traceHeapEvent_ (Capability *cap,
139 EventTypeNum tag,
140 CapsetID heap_capset,
141 W_ info1);
142
143 void traceEventHeapInfo_ (CapsetID heap_capset,
144 uint32_t gens,
145 W_ maxHeapSize,
146 W_ allocAreaSize,
147 W_ mblockSize,
148 W_ blockSize);
149
150 void traceEventGcStats_ (Capability *cap,
151 CapsetID heap_capset,
152 uint32_t gen,
153 W_ copied,
154 W_ slop,
155 W_ fragmentation,
156 uint32_t par_n_threads,
157 W_ par_max_copied,
158 W_ par_tot_copied,
159 W_ par_balanced_copied);
160
161 /*
162 * Record a spark event
163 */
164 #define traceSparkEvent(cap, tag) \
165 if (RTS_UNLIKELY(TRACE_spark_full)) { \
166 traceSparkEvent_(cap, tag, 0); \
167 }
168
169 #define traceSparkEvent2(cap, tag, other) \
170 if (RTS_UNLIKELY(TRACE_spark_full)) { \
171 traceSparkEvent_(cap, tag, other); \
172 }
173
174 void traceSparkEvent_ (Capability *cap, EventTypeNum tag, StgWord info1);
175
176 // variadic macros are C99, and supported by gcc. However, the
177 // ##__VA_ARGS syntax is a gcc extension, which allows the variable
178 // argument list to be empty (see gcc docs for details).
179
180 /*
181 * Emit a trace message on a particular Capability
182 */
183 #define traceCap(class, cap, msg, ...) \
184 if (RTS_UNLIKELY(class)) { \
185 traceCap_(cap, msg, ##__VA_ARGS__); \
186 }
187
188 void traceCap_(Capability *cap, char *msg, ...);
189
190 /*
191 * Emit a trace message
192 */
193 #define trace(class, msg, ...) \
194 if (RTS_UNLIKELY(class)) { \
195 trace_(msg, ##__VA_ARGS__); \
196 }
197
198 void trace_(char *msg, ...);
199
200 /*
201 * A message or event emitted by the program
202 * Used by Debug.Trace.{traceEvent, traceEventIO}
203 */
204 void traceUserMsg(Capability *cap, char *msg);
205
206 /*
207 * A marker event emitted by the program
208 * Used by Debug.Trace.{traceMarker, traceMarkerIO}
209 */
210 void traceUserMarker(Capability *cap, char *msg);
211
212 /*
213 * A binary message or event emitted by the program
214 */
215 void traceUserBinaryMsg(Capability *cap, uint8_t *msg, size_t size);
216
217 /*
218 * An event to record a Haskell thread's label/name
219 * Used by GHC.Conc.labelThread
220 */
221 void traceThreadLabel_(Capability *cap,
222 StgTSO *tso,
223 char *label);
224
225 /*
226 * Emit a debug message (only when DEBUG is defined)
227 */
228 #if defined(DEBUG)
229 #define debugTrace(class, msg, ...) \
230 if (RTS_UNLIKELY(class)) { \
231 trace_(msg, ##__VA_ARGS__); \
232 }
233 #else
234 #define debugTrace(class, str, ...) /* nothing */
235 #endif
236
237 #if defined(DEBUG)
238 #define debugTraceCap(class, cap, msg, ...) \
239 if (RTS_UNLIKELY(class)) { \
240 traceCap_(cap, msg, ##__VA_ARGS__); \
241 }
242 #else
243 #define debugTraceCap(class, cap, str, ...) /* nothing */
244 #endif
245
246 /*
247 * Emit a message/event describing the state of a thread
248 */
249 #define traceThreadStatus(class, tso) \
250 if (RTS_UNLIKELY(class)) { \
251 traceThreadStatus_(tso); \
252 }
253
254 void traceThreadStatus_ (StgTSO *tso);
255
256 /*
257 * Events for describing capabilities and capability sets in the eventlog
258 */
259 #define traceCapEvent(cap, tag) \
260 if (RTS_UNLIKELY(TRACE_cap)) { \
261 traceCapEvent_(cap, tag); \
262 }
263
264 void traceCapEvent_ (Capability *cap,
265 EventTypeNum tag);
266
267 #define traceCapsetEvent(cap, capset, info) \
268 if (RTS_UNLIKELY(TRACE_cap)) { \
269 traceCapsetEvent_(cap, capset, info); \
270 }
271
272 void traceCapsetEvent_ (EventTypeNum tag,
273 CapsetID capset,
274 StgWord info);
275
276 void traceWallClockTime_(void);
277
278 void traceOSProcessInfo_ (void);
279
280 void traceSparkCounters_ (Capability *cap,
281 SparkCounters counters,
282 StgWord remaining);
283
284 void traceTaskCreate_ (Task *task,
285 Capability *cap);
286
287 void traceTaskMigrate_ (Task *task,
288 Capability *cap,
289 Capability *new_cap);
290
291 void traceTaskDelete_ (Task *task);
292
293 void traceHeapProfBegin(StgWord8 profile_id);
294 void traceHeapProfSampleBegin(StgInt era);
295 void traceHeapBioProfSampleBegin(StgInt era, StgWord64 time);
296 void traceHeapProfSampleEnd(StgInt era);
297 void traceHeapProfSampleString(StgWord8 profile_id,
298 const char *label, StgWord residency);
299 #if defined(PROFILING)
300 void traceHeapProfCostCentre(StgWord32 ccID,
301 const char *label,
302 const char *module,
303 const char *srcloc,
304 StgBool is_caf);
305 void traceHeapProfSampleCostCentre(StgWord8 profile_id,
306 CostCentreStack *stack, StgWord residency);
307 #endif /* PROFILING */
308
309 void traceConcMarkBegin(void);
310 void traceConcMarkEnd(StgWord32 marked_obj_count);
311 void traceConcSyncBegin(void);
312 void traceConcSyncEnd(void);
313 void traceConcSweepBegin(void);
314 void traceConcSweepEnd(void);
315 void traceConcUpdRemSetFlush(Capability *cap);
316 void traceNonmovingHeapCensus(uint32_t log_blk_size,
317 const struct NonmovingAllocCensus *census);
318
319 void flushTrace(void);
320
321 #else /* !TRACING */
322
323 #define traceSchedEvent(cap, tag, tso, other) /* nothing */
324 #define traceSchedEvent2(cap, tag, tso, other, info) /* nothing */
325 #define traceGcEvent(cap, tag) /* nothing */
326 #define traceGcEventAtT(cap, ts, tag) /* nothing */
327 #define traceEventGcStats_(cap, heap_capset, gen, \
328 copied, slop, fragmentation, \
329 par_n_threads, par_max_copied, \
330 par_tot_copied, par_balanced_copied) /* nothing */
331 #define traceHeapEvent(cap, tag, heap_capset, info1) /* nothing */
332 #define traceEventHeapInfo_(heap_capset, gens, \
333 maxHeapSize, allocAreaSize, \
334 mblockSize, blockSize) /* nothing */
335 #define traceSparkEvent(cap, tag) /* nothing */
336 #define traceSparkEvent2(cap, tag, other) /* nothing */
337 #define traceCap(class, cap, msg, ...) /* nothing */
338 #define trace(class, msg, ...) /* nothing */
339 #define debugTrace(class, str, ...) /* nothing */
340 #define debugTraceCap(class, cap, str, ...) /* nothing */
341 #define traceThreadStatus(class, tso) /* nothing */
342 #define traceThreadLabel_(cap, tso, label) /* nothing */
343 #define traceCapEvent(cap, tag) /* nothing */
344 #define traceCapsetEvent(tag, capset, info) /* nothing */
345 #define traceWallClockTime_() /* nothing */
346 #define traceOSProcessInfo_() /* nothing */
347 #define traceSparkCounters_(cap, counters, remaining) /* nothing */
348 #define traceTaskCreate_(taskID, cap) /* nothing */
349 #define traceTaskMigrate_(taskID, cap, new_cap) /* nothing */
350 #define traceTaskDelete_(taskID) /* nothing */
351 #define traceHeapProfBegin(profile_id) /* nothing */
352 #define traceHeapProfCostCentre(ccID, label, module, srcloc, is_caf) /* nothing */
353 #define traceHeapProfSampleBegin(era) /* nothing */
354 #define traceHeapBioProfSampleBegin(era, time) /* nothing */
355 #define traceHeapProfSampleEnd(era) /* nothing */
356 #define traceHeapProfSampleCostCentre(profile_id, stack, residency) /* nothing */
357 #define traceHeapProfSampleString(profile_id, label, residency) /* nothing */
358
359 #define traceConcMarkBegin() /* nothing */
360 #define traceConcMarkEnd(marked_obj_count) /* nothing */
361 #define traceConcSyncBegin() /* nothing */
362 #define traceConcSyncEnd() /* nothing */
363 #define traceConcSweepBegin() /* nothing */
364 #define traceConcSweepEnd() /* nothing */
365 #define traceConcUpdRemSetFlush(cap) /* nothing */
366 #define traceNonmovingHeapCensus(blk_size, census) /* nothing */
367
368 #define flushTrace() /* nothing */
369
370 #endif /* TRACING */
371
372 // If DTRACE is enabled, but neither DEBUG nor TRACING, we need a C land
373 // wrapper for the user-msg probe (as we can't expand that in PrimOps.cmm)
374 //
375 #if !defined(DEBUG) && !defined(TRACING) && defined(DTRACE)
376
377 void dtraceUserMsgWrapper(Capability *cap, char *msg);
378 void dtraceUserMarkerWrapper(Capability *cap, char *msg);
379
380 #endif /* !defined(DEBUG) && !defined(TRACING) && defined(DTRACE) */
381
382 // -----------------------------------------------------------------------------
383 // Aliases for static dtrace probes if dtrace is available
384 // -----------------------------------------------------------------------------
385
386 #if defined(DTRACE)
387
388 #define dtraceCreateThread(cap, tid) \
389 HASKELLEVENT_CREATE_THREAD(cap, tid)
390 #define dtraceRunThread(cap, tid) \
391 HASKELLEVENT_RUN_THREAD(cap, tid)
392 #define dtraceStopThread(cap, tid, status, info) \
393 HASKELLEVENT_STOP_THREAD(cap, tid, status, info)
394 #define dtraceThreadRunnable(cap, tid) \
395 HASKELLEVENT_THREAD_RUNNABLE(cap, tid)
396 #define dtraceMigrateThread(cap, tid, new_cap) \
397 HASKELLEVENT_MIGRATE_THREAD(cap, tid, new_cap)
398 #define dtraceThreadWakeup(cap, tid, other_cap) \
399 HASKELLEVENT_THREAD_WAKEUP(cap, tid, other_cap)
400 #define dtraceGcStart(cap) \
401 HASKELLEVENT_GC_START(cap)
402 #define dtraceGcEnd(cap) \
403 HASKELLEVENT_GC_END(cap)
404 #define dtraceRequestSeqGc(cap) \
405 HASKELLEVENT_REQUEST_SEQ_GC(cap)
406 #define dtraceRequestParGc(cap) \
407 HASKELLEVENT_REQUEST_PAR_GC(cap)
408 #define dtraceCreateSparkThread(cap, spark_tid) \
409 HASKELLEVENT_CREATE_SPARK_THREAD(cap, spark_tid)
410 #define dtraceThreadLabel(cap, tso, label) \
411 HASKELLEVENT_THREAD_LABEL(cap, tso, label)
412 #define dtraceCapCreate(cap) \
413 HASKELLEVENT_CAP_CREATE(cap)
414 #define dtraceCapDelete(cap) \
415 HASKELLEVENT_CAP_DELETE(cap)
416 #define dtraceCapEnable(cap) \
417 HASKELLEVENT_CAP_ENABLE(cap)
418 #define dtraceCapDisable(cap) \
419 HASKELLEVENT_CAP_DISABLE(cap)
420 #define dtraceUserMsg(cap, msg) \
421 HASKELLEVENT_USER_MSG(cap, msg)
422 #define dtraceUserMarker(cap, msg) \
423 HASKELLEVENT_USER_MARKER(cap, msg)
424 #define dtraceGcIdle(cap) \
425 HASKELLEVENT_GC_IDLE(cap)
426 #define dtraceGcWork(cap) \
427 HASKELLEVENT_GC_WORK(cap)
428 #define dtraceGcDone(cap) \
429 HASKELLEVENT_GC_DONE(cap)
430 #define dtraceGcGlobalSync(cap) \
431 HASKELLEVENT_GC_GLOBAL_SYNC(cap)
432 #define dtraceEventGcStats(heap_capset, gens, \
433 copies, slop, fragmentation, \
434 par_n_threads, \
435 par_max_copied, \
436 par_tot_copied, \
437 par_balanced_copied) \
438 HASKELLEVENT_GC_STATS(heap_capset, gens, \
439 copies, slop, fragmentation, \
440 par_n_threads, \
441 par_max_copied, \
442 par_balanced_copied, \
443 par_tot_copied)
444 #define dtraceHeapInfo(heap_capset, gens, \
445 maxHeapSize, allocAreaSize, \
446 mblockSize, blockSize) \
447 HASKELLEVENT_HEAP_INFO(heap_capset, gens, \
448 maxHeapSize, allocAreaSize, \
449 mblockSize, blockSize)
450 #define dtraceEventHeapAllocated(cap, heap_capset, \
451 allocated) \
452 HASKELLEVENT_HEAP_ALLOCATED(cap, heap_capset, \
453 allocated)
454 #define dtraceEventHeapSize(heap_capset, size) \
455 HASKELLEVENT_HEAP_SIZE(heap_capset, size)
456 #define dtraceEventHeapLive(heap_capset, live) \
457 HASKELLEVENT_HEAP_LIVE(heap_capset, live)
458 #define dtraceCapsetCreate(capset, capset_type) \
459 HASKELLEVENT_CAPSET_CREATE(capset, capset_type)
460 #define dtraceCapsetDelete(capset) \
461 HASKELLEVENT_CAPSET_DELETE(capset)
462 #define dtraceCapsetAssignCap(capset, capno) \
463 HASKELLEVENT_CAPSET_ASSIGN_CAP(capset, capno)
464 #define dtraceCapsetRemoveCap(capset, capno) \
465 HASKELLEVENT_CAPSET_REMOVE_CAP(capset, capno)
466 #define dtraceSparkCounters(cap, a, b, c, d, e, f, g) \
467 HASKELLEVENT_SPARK_COUNTERS(cap, a, b, c, d, e, f, g)
468 #define dtraceSparkCreate(cap) \
469 HASKELLEVENT_SPARK_CREATE(cap)
470 #define dtraceSparkDud(cap) \
471 HASKELLEVENT_SPARK_DUD(cap)
472 #define dtraceSparkOverflow(cap) \
473 HASKELLEVENT_SPARK_OVERFLOW(cap)
474 #define dtraceSparkRun(cap) \
475 HASKELLEVENT_SPARK_RUN(cap)
476 #define dtraceSparkSteal(cap, victim_cap) \
477 HASKELLEVENT_SPARK_STEAL(cap, victim_cap)
478 #define dtraceSparkFizzle(cap) \
479 HASKELLEVENT_SPARK_FIZZLE(cap)
480 #define dtraceSparkGc(cap) \
481 HASKELLEVENT_SPARK_GC(cap)
482 #define dtraceTaskCreate(taskID, cap, tid) \
483 HASKELLEVENT_TASK_CREATE(taskID, cap, tid)
484 #define dtraceTaskMigrate(taskID, cap, new_cap) \
485 HASKELLEVENT_TASK_MIGRATE(taskID, cap, new_cap)
486 #define dtraceTaskDelete(taskID) \
487 HASKELLEVENT_TASK_DELETE(taskID)
488
489 #else /* !defined(DTRACE) */
490
491 #define dtraceCreateThread(cap, tid) /* nothing */
492 #define dtraceRunThread(cap, tid) /* nothing */
493 #define dtraceStopThread(cap, tid, status, info) /* nothing */
494 #define dtraceThreadRunnable(cap, tid) /* nothing */
495 #define dtraceMigrateThread(cap, tid, new_cap) /* nothing */
496 #define dtraceThreadWakeup(cap, tid, other_cap) /* nothing */
497 #define dtraceGcStart(cap) /* nothing */
498 #define dtraceGcEnd(cap) /* nothing */
499 #define dtraceRequestSeqGc(cap) /* nothing */
500 #define dtraceRequestParGc(cap) /* nothing */
501 #define dtraceCreateSparkThread(cap, spark_tid) /* nothing */
502 #define dtraceThreadLabel(cap, tso, label) /* nothing */
503 #define dtraceUserMsg(cap, msg) /* nothing */
504 #define dtraceUserMarker(cap, msg) /* nothing */
505 #define dtraceGcIdle(cap) /* nothing */
506 #define dtraceGcWork(cap) /* nothing */
507 #define dtraceGcDone(cap) /* nothing */
508 #define dtraceGcGlobalSync(cap) /* nothing */
509 #define dtraceEventGcStats(heap_capset, gens, \
510 copies, slop, fragmentation, \
511 par_n_threads, \
512 par_max_copied, \
513 par_tot_copied, \
514 par_balanced_copied) /* nothing */
515 #define dtraceHeapInfo(heap_capset, gens, \
516 maxHeapSize, allocAreaSize, \
517 mblockSize, blockSize) /* nothing */
518 #define dtraceEventHeapAllocated(cap, heap_capset, \
519 allocated) /* nothing */
520 #define dtraceEventHeapSize(heap_capset, size) /* nothing */
521 #define dtraceEventHeapLive(heap_capset, live) /* nothing */
522 #define dtraceCapCreate(cap) /* nothing */
523 #define dtraceCapDelete(cap) /* nothing */
524 #define dtraceCapEnable(cap) /* nothing */
525 #define dtraceCapDisable(cap) /* nothing */
526 #define dtraceCapsetCreate(capset, capset_type) /* nothing */
527 #define dtraceCapsetDelete(capset) /* nothing */
528 #define dtraceCapsetAssignCap(capset, capno) /* nothing */
529 #define dtraceCapsetRemoveCap(capset, capno) /* nothing */
530 #define dtraceSparkCounters(cap, a, b, c, d, e, f, g) /* nothing */
531 #define dtraceSparkCreate(cap) /* nothing */
532 #define dtraceSparkDud(cap) /* nothing */
533 #define dtraceSparkOverflow(cap) /* nothing */
534 #define dtraceSparkRun(cap) /* nothing */
535 #define dtraceSparkSteal(cap, victim_cap) /* nothing */
536 #define dtraceSparkFizzle(cap) /* nothing */
537 #define dtraceSparkGc(cap) /* nothing */
538 #define dtraceTaskCreate(taskID, cap, tid) /* nothing */
539 #define dtraceTaskMigrate(taskID, cap, new_cap) /* nothing */
540 #define dtraceTaskDelete(taskID) /* nothing */
541
542 #endif
543
544 // -----------------------------------------------------------------------------
545 // Trace probes dispatching to various tracing frameworks
546 //
547 // In order to avoid accumulating multiple calls to tracing calls at trace
548 // points, we define inline probe functions that contain the various
549 // invocations.
550 //
551 // Dtrace - dtrace probes are unconditionally added as probe activation is
552 // handled by the dtrace component of the kernel, and inactive probes are
553 // very cheap - usually, one no-op. Consequently, dtrace can be used with
554 // all flavours of the RTS. In addition, we still support logging events to
555 // a file, even in the presence of dtrace. This is, eg, useful when tracing
556 // on a server, but browsing trace information with ThreadScope on a local
557 // client.
558 //
559 // -----------------------------------------------------------------------------
560
561 INLINE_HEADER void traceEventCreateThread(Capability *cap STG_UNUSED,
562 StgTSO *tso STG_UNUSED)
563 {
564 traceSchedEvent(cap, EVENT_CREATE_THREAD, tso, tso->stackobj->stack_size);
565 dtraceCreateThread((EventCapNo)cap->no, (EventThreadID)tso->id);
566 }
567
568 INLINE_HEADER void traceEventRunThread(Capability *cap STG_UNUSED,
569 StgTSO *tso STG_UNUSED)
570 {
571 traceSchedEvent(cap, EVENT_RUN_THREAD, tso, tso->what_next);
572 dtraceRunThread((EventCapNo)cap->no, (EventThreadID)tso->id);
573 }
574
575 INLINE_HEADER void traceEventStopThread(Capability *cap STG_UNUSED,
576 StgTSO *tso STG_UNUSED,
577 StgThreadReturnCode status STG_UNUSED,
578 StgWord32 info STG_UNUSED)
579 {
580 traceSchedEvent2(cap, EVENT_STOP_THREAD, tso, status, info);
581 dtraceStopThread((EventCapNo)cap->no, (EventThreadID)tso->id,
582 (EventThreadStatus)status, (EventThreadID)info);
583 }
584
585 INLINE_HEADER void traceEventMigrateThread(Capability *cap STG_UNUSED,
586 StgTSO *tso STG_UNUSED,
587 uint32_t new_cap STG_UNUSED)
588 {
589 traceSchedEvent(cap, EVENT_MIGRATE_THREAD, tso, new_cap);
590 dtraceMigrateThread((EventCapNo)cap->no, (EventThreadID)tso->id,
591 (EventCapNo)new_cap);
592 }
593
594 INLINE_HEADER void traceCapCreate(Capability *cap STG_UNUSED)
595 {
596 traceCapEvent(cap, EVENT_CAP_CREATE);
597 dtraceCapCreate((EventCapNo)cap->no);
598 }
599
600 INLINE_HEADER void traceCapDelete(Capability *cap STG_UNUSED)
601 {
602 traceCapEvent(cap, EVENT_CAP_DELETE);
603 dtraceCapDelete((EventCapNo)cap->no);
604 }
605
606 INLINE_HEADER void traceCapEnable(Capability *cap STG_UNUSED)
607 {
608 traceCapEvent(cap, EVENT_CAP_ENABLE);
609 dtraceCapEnable((EventCapNo)cap->no);
610 }
611
612 INLINE_HEADER void traceCapDisable(Capability *cap STG_UNUSED)
613 {
614 traceCapEvent(cap, EVENT_CAP_DISABLE);
615 dtraceCapDisable((EventCapNo)cap->no);
616 }
617
618 INLINE_HEADER void traceEventThreadWakeup(Capability *cap STG_UNUSED,
619 StgTSO *tso STG_UNUSED,
620 uint32_t other_cap STG_UNUSED)
621 {
622 traceSchedEvent(cap, EVENT_THREAD_WAKEUP, tso, other_cap);
623 dtraceThreadWakeup((EventCapNo)cap->no, (EventThreadID)tso->id,
624 (EventCapNo)other_cap);
625 }
626
627 INLINE_HEADER void traceThreadLabel(Capability *cap STG_UNUSED,
628 StgTSO *tso STG_UNUSED,
629 char *label STG_UNUSED)
630 {
631 if (RTS_UNLIKELY(TRACE_sched)) {
632 traceThreadLabel_(cap, tso, label);
633 }
634 dtraceThreadLabel((EventCapNo)cap->no, (EventThreadID)tso->id, label);
635 }
636
637 INLINE_HEADER void traceEventGcStart(Capability *cap STG_UNUSED)
638 {
639 traceGcEvent(cap, EVENT_GC_START);
640 dtraceGcStart((EventCapNo)cap->no);
641 }
642
643 INLINE_HEADER void traceEventGcStartAtT(Capability *cap STG_UNUSED,
644 StgWord64 ts STG_UNUSED)
645 {
646 traceGcEventAtT(cap, ts, EVENT_GC_START);
647 dtraceGcStart((EventCapNo)cap->no);
648 }
649
650 INLINE_HEADER void traceEventGcEnd(Capability *cap STG_UNUSED)
651 {
652 traceGcEvent(cap, EVENT_GC_END);
653 dtraceGcEnd((EventCapNo)cap->no);
654 }
655
656 INLINE_HEADER void traceEventGcEndAtT(Capability *cap STG_UNUSED,
657 StgWord64 ts STG_UNUSED)
658 {
659 traceGcEventAtT(cap, ts, EVENT_GC_END);
660 dtraceGcEnd((EventCapNo)cap->no);
661 }
662
663 INLINE_HEADER void traceEventRequestSeqGc(Capability *cap STG_UNUSED)
664 {
665 traceGcEvent(cap, EVENT_REQUEST_SEQ_GC);
666 dtraceRequestSeqGc((EventCapNo)cap->no);
667 }
668
669 INLINE_HEADER void traceEventRequestParGc(Capability *cap STG_UNUSED)
670 {
671 traceGcEvent(cap, EVENT_REQUEST_PAR_GC);
672 dtraceRequestParGc((EventCapNo)cap->no);
673 }
674
675 INLINE_HEADER void traceEventGcIdle(Capability *cap STG_UNUSED)
676 {
677 traceGcEvent(cap, EVENT_GC_IDLE);
678 dtraceGcIdle((EventCapNo)cap->no);
679 }
680
681 INLINE_HEADER void traceEventGcWork(Capability *cap STG_UNUSED)
682 {
683 traceGcEvent(cap, EVENT_GC_WORK);
684 dtraceGcWork((EventCapNo)cap->no);
685 }
686
687 INLINE_HEADER void traceEventGcDone(Capability *cap STG_UNUSED)
688 {
689 traceGcEvent(cap, EVENT_GC_DONE);
690 dtraceGcDone((EventCapNo)cap->no);
691 }
692
693 INLINE_HEADER void traceEventGcGlobalSync(Capability *cap STG_UNUSED)
694 {
695 traceGcEvent(cap, EVENT_GC_GLOBAL_SYNC);
696 dtraceGcGlobalSync((EventCapNo)cap->no);
697 }
698
699 INLINE_HEADER void traceEventGcStats(Capability *cap STG_UNUSED,
700 CapsetID heap_capset STG_UNUSED,
701 uint32_t gen STG_UNUSED,
702 W_ copied STG_UNUSED,
703 W_ slop STG_UNUSED,
704 W_ fragmentation STG_UNUSED,
705 uint32_t par_n_threads STG_UNUSED,
706 W_ par_max_copied STG_UNUSED,
707 W_ par_tot_copied STG_UNUSED,
708 W_ par_balanced_copied STG_UNUSED)
709 {
710 if (RTS_UNLIKELY(TRACE_gc)) {
711 traceEventGcStats_(cap, heap_capset, gen,
712 copied, slop, fragmentation,
713 par_n_threads, par_max_copied,
714 par_tot_copied, par_balanced_copied);
715 }
716 dtraceEventGcStats(heap_capset, gen,
717 copied, slop, fragmentation,
718 par_n_threads, par_max_copied,
719 par_tot_copied, par_balanced_copied);
720 }
721
722 INLINE_HEADER void traceEventHeapInfo(CapsetID heap_capset STG_UNUSED,
723 uint32_t gens STG_UNUSED,
724 W_ maxHeapSize STG_UNUSED,
725 W_ allocAreaSize STG_UNUSED,
726 W_ mblockSize STG_UNUSED,
727 W_ blockSize STG_UNUSED)
728 {
729 if (RTS_UNLIKELY(TRACE_gc)) {
730 traceEventHeapInfo_(heap_capset, gens,
731 maxHeapSize, allocAreaSize,
732 mblockSize, blockSize);
733 }
734 dtraceHeapInfo(heap_capset, gens,
735 maxHeapSize, allocAreaSize,
736 mblockSize, blockSize);
737 }
738
739 INLINE_HEADER void traceEventHeapAllocated(Capability *cap STG_UNUSED,
740 CapsetID heap_capset STG_UNUSED,
741 W_ allocated STG_UNUSED)
742 {
743 traceHeapEvent(cap, EVENT_HEAP_ALLOCATED, heap_capset, allocated);
744 dtraceEventHeapAllocated((EventCapNo)cap->no, heap_capset, allocated);
745 }
746
747 INLINE_HEADER void traceEventHeapSize(Capability *cap STG_UNUSED,
748 CapsetID heap_capset STG_UNUSED,
749 W_ heap_size STG_UNUSED)
750 {
751 traceHeapEvent(cap, EVENT_HEAP_SIZE, heap_capset, heap_size);
752 dtraceEventHeapSize(heap_capset, heap_size);
753 }
754
755 INLINE_HEADER void traceEventHeapLive(Capability *cap STG_UNUSED,
756 CapsetID heap_capset STG_UNUSED,
757 W_ heap_live STG_UNUSED)
758 {
759 traceHeapEvent(cap, EVENT_HEAP_LIVE, heap_capset, heap_live);
760 dtraceEventHeapLive(heap_capset, heap_live);
761 }
762
763 INLINE_HEADER void traceCapsetCreate(CapsetID capset STG_UNUSED,
764 CapsetType capset_type STG_UNUSED)
765 {
766 traceCapsetEvent(EVENT_CAPSET_CREATE, capset, capset_type);
767 dtraceCapsetCreate(capset, capset_type);
768 }
769
770 INLINE_HEADER void traceCapsetDelete(CapsetID capset STG_UNUSED)
771 {
772 traceCapsetEvent(EVENT_CAPSET_DELETE, capset, 0);
773 dtraceCapsetDelete(capset);
774 }
775
776 INLINE_HEADER void traceCapsetAssignCap(CapsetID capset STG_UNUSED,
777 uint32_t capno STG_UNUSED)
778 {
779 traceCapsetEvent(EVENT_CAPSET_ASSIGN_CAP, capset, capno);
780 dtraceCapsetAssignCap(capset, capno);
781 }
782
783 INLINE_HEADER void traceCapsetRemoveCap(CapsetID capset STG_UNUSED,
784 uint32_t capno STG_UNUSED)
785 {
786 traceCapsetEvent(EVENT_CAPSET_REMOVE_CAP, capset, capno);
787 dtraceCapsetRemoveCap(capset, capno);
788 }
789
790 INLINE_HEADER void traceWallClockTime(void)
791 {
792 traceWallClockTime_();
793 /* Note: no DTrace equivalent because it is available to DTrace directly */
794 }
795
796 INLINE_HEADER void traceOSProcessInfo(void)
797 {
798 traceOSProcessInfo_();
799 /* Note: no DTrace equivalent because all this OS process info
800 * is available to DTrace directly */
801 }
802
803 INLINE_HEADER void traceEventCreateSparkThread(Capability *cap STG_UNUSED,
804 StgThreadID spark_tid STG_UNUSED)
805 {
806 traceSparkEvent2(cap, EVENT_CREATE_SPARK_THREAD, spark_tid);
807 dtraceCreateSparkThread((EventCapNo)cap->no, (EventThreadID)spark_tid);
808 }
809
810 INLINE_HEADER void traceSparkCounters(Capability *cap STG_UNUSED)
811 {
812 #if defined(THREADED_RTS)
813 if (RTS_UNLIKELY(TRACE_spark_sampled)) {
814 traceSparkCounters_(cap, cap->spark_stats, sparkPoolSize(cap->sparks));
815 }
816 dtraceSparkCounters((EventCapNo)cap->no,
817 cap->spark_stats.created,
818 cap->spark_stats.dud,
819 cap->spark_stats.overflowed,
820 cap->spark_stats.converted,
821 cap->spark_stats.gcd,
822 cap->spark_stats.fizzled,
823 sparkPoolSize(cap->sparks));
824 #endif
825 }
826
827 INLINE_HEADER void traceEventSparkCreate(Capability *cap STG_UNUSED)
828 {
829 traceSparkEvent(cap, EVENT_SPARK_CREATE);
830 dtraceSparkCreate((EventCapNo)cap->no);
831 }
832
833 INLINE_HEADER void traceEventSparkDud(Capability *cap STG_UNUSED)
834 {
835 traceSparkEvent(cap, EVENT_SPARK_DUD);
836 dtraceSparkDud((EventCapNo)cap->no);
837 }
838
839 INLINE_HEADER void traceEventSparkOverflow(Capability *cap STG_UNUSED)
840 {
841 traceSparkEvent(cap, EVENT_SPARK_OVERFLOW);
842 dtraceSparkOverflow((EventCapNo)cap->no);
843 }
844
845 INLINE_HEADER void traceEventSparkRun(Capability *cap STG_UNUSED)
846 {
847 traceSparkEvent(cap, EVENT_SPARK_RUN);
848 dtraceSparkRun((EventCapNo)cap->no);
849 }
850
851 INLINE_HEADER void traceEventSparkSteal(Capability *cap STG_UNUSED,
852 uint32_t victim_cap STG_UNUSED)
853 {
854 traceSparkEvent2(cap, EVENT_SPARK_STEAL, victim_cap);
855 dtraceSparkSteal((EventCapNo)cap->no, (EventCapNo)victim_cap);
856 }
857
858 INLINE_HEADER void traceEventSparkFizzle(Capability *cap STG_UNUSED)
859 {
860 traceSparkEvent(cap, EVENT_SPARK_FIZZLE);
861 dtraceSparkFizzle((EventCapNo)cap->no);
862 }
863
864 INLINE_HEADER void traceEventSparkGC(Capability *cap STG_UNUSED)
865 {
866 traceSparkEvent(cap, EVENT_SPARK_GC);
867 dtraceSparkGc((EventCapNo)cap->no);
868 }
869
870 INLINE_HEADER void traceTaskCreate(Task *task STG_UNUSED,
871 Capability *cap STG_UNUSED)
872 {
873 ASSERT(task->cap == cap);
874 // TODO: asserting task->cap == NULL would be much stronger
875 // (the intention being that the task structure is just created and empty)
876 // but would require large changes of traceTaskCreate calls.
877 ASSERT(cap != NULL);
878 // A new task gets associated with a cap. We also record
879 // the kernel thread id of the task, which should never change.
880 if (RTS_UNLIKELY(TRACE_sched)) {
881 traceTaskCreate_(task, cap);
882 }
883 dtraceTaskCreate(serialisableTaskId(task),
884 (EventCapNo)cap->no,
885 kernelThreadId());
886 }
887
888 INLINE_HEADER void traceTaskMigrate(Task *task STG_UNUSED,
889 Capability *cap STG_UNUSED,
890 Capability *new_cap STG_UNUSED)
891 {
892 ASSERT(task->cap == cap);
893 ASSERT(cap != NULL);
894 ASSERT(cap != new_cap);
895 ASSERT(new_cap != NULL);
896 // A task migrates from a cap to another.
897 if (RTS_UNLIKELY(TRACE_sched)) {
898 traceTaskMigrate_(task, cap, new_cap);
899 }
900 dtraceTaskMigrate(serialisableTaskId(task), (EventCapNo)cap->no,
901 (EventCapNo)new_cap->no);
902 }
903
904 INLINE_HEADER void traceTaskDelete(Task *task STG_UNUSED)
905 {
906 ASSERT(task->cap != NULL);
907 if (RTS_UNLIKELY(TRACE_sched)) {
908 traceTaskDelete_(task);
909 }
910 dtraceTaskDelete(serialisableTaskId(task));
911 }
912
913 #include "EndPrivate.h"