NonmovingCensus: Emit samples to eventlog
[ghc.git] / rts / Trace.h
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 2008-2009
4 *
5 * Support for fast binary event logging and user-space dtrace probes.
6 *
7 * ---------------------------------------------------------------------------*/
8
9 #pragma once
10
11 #include "rts/EventLogFormat.h"
12 #include "sm/NonMovingCensus.h"
13 #include "Capability.h"
14
15 #if defined(DTRACE)
16 #include "RtsProbes.h"
17 #endif /* defined(DTRACE) */
18
19 #include "BeginPrivate.h"
20
21 // -----------------------------------------------------------------------------
22 // EventLog API
23 // -----------------------------------------------------------------------------
24
25 #if defined(TRACING)
26
27 void initTracing (void);
28 void endTracing (void);
29 void freeTracing (void);
30 void resetTracing (void);
31 void tracingAddCapapilities (uint32_t from, uint32_t to);
32
33 #endif /* TRACING */
34
35 typedef StgWord32 CapsetID;
36 typedef StgWord16 CapsetType;
37 enum CapsetType { CapsetTypeCustom = CAPSET_TYPE_CUSTOM,
38 CapsetTypeOsProcess = CAPSET_TYPE_OSPROCESS,
39 CapsetTypeClockdomain = CAPSET_TYPE_CLOCKDOMAIN };
40 #define CAPSET_OSPROCESS_DEFAULT ((CapsetID)0)
41 #define CAPSET_HEAP_DEFAULT ((CapsetID)0) /* reusing the same capset */
42 #define CAPSET_CLOCKDOMAIN_DEFAULT ((CapsetID)1)
43
44 // -----------------------------------------------------------------------------
45 // Message classes
46 // -----------------------------------------------------------------------------
47
48 // shorthand for RtsFlags.DebugFlags.<blah>, useful with debugTrace()
49 #define DEBUG_sched RtsFlags.DebugFlags.scheduler
50 #define DEBUG_interp RtsFlags.DebugFlags.interp
51 #define DEBUG_weak RtsFlags.DebugFlags.weak
52 #define DEBUG_gccafs RtsFlags.DebugFlags.gccafs
53 #define DEBUG_gc RtsFlags.DebugFlags.gc
54 #define DEBUG_nonmoving_gc RtsFlags.DebugFlags.nonmoving_gc
55 #define DEBUG_block_alloc RtsFlags.DebugFlags.alloc
56 #define DEBUG_sanity RtsFlags.DebugFlags.sanity
57 #define DEBUG_stable RtsFlags.DebugFlags.stable
58 #define DEBUG_stm RtsFlags.DebugFlags.stm
59 #define DEBUG_prof RtsFlags.DebugFlags.prof
60 #define DEBUG_gran RtsFlags.DebugFlags.gran
61 #define DEBUG_par RtsFlags.DebugFlags.par
62 #define DEBUG_linker RtsFlags.DebugFlags.linker
63 #define DEBUG_squeeze RtsFlags.DebugFlags.squeeze
64 #define DEBUG_hpc RtsFlags.DebugFlags.hpc
65 #define DEBUG_sparks RtsFlags.DebugFlags.sparks
66 #define DEBUG_compact RtsFlags.DebugFlags.compact
67
68 // events
69 extern int TRACE_sched;
70 extern int TRACE_gc;
71 extern int TRACE_spark_sampled;
72 extern int TRACE_spark_full;
73 /* extern int TRACE_user; */ // only used in Trace.c
74 extern int TRACE_cap;
75 extern int TRACE_nonmoving_gc;
76
77 // -----------------------------------------------------------------------------
78 // Posting events
79 //
80 // We use macros rather than inline functions deliberately. We want
81 // the not-taken case to be as efficient as possible, a simple
82 // test-and-jump, and with inline functions gcc seemed to move some of
83 // the instructions from the branch up before the test.
84 //
85 // -----------------------------------------------------------------------------
86
87 #if defined(DEBUG)
88 void traceBegin (const char *str, ...);
89 void traceEnd (void);
90 #endif
91
92 #if defined(TRACING)
93
94 /*
95 * Record a scheduler event
96 */
97 #define traceSchedEvent(cap, tag, tso, other) \
98 if (RTS_UNLIKELY(TRACE_sched)) { \
99 traceSchedEvent_(cap, tag, tso, other, 0); \
100 }
101
102 #define traceSchedEvent2(cap, tag, tso, info1, info2) \
103 if (RTS_UNLIKELY(TRACE_sched)) { \
104 traceSchedEvent_(cap, tag, tso, info1, info2); \
105 }
106
107 void traceSchedEvent_ (Capability *cap, EventTypeNum tag,
108 StgTSO *tso, StgWord info1, StgWord info2);
109
110 /*
111 * Record a GC event
112 */
113 #define traceGcEvent(cap, tag) \
114 if (RTS_UNLIKELY(TRACE_gc)) { \
115 traceGcEvent_(cap, tag); \
116 }
117
118 void traceGcEvent_ (Capability *cap, EventTypeNum tag);
119
120 /*
121 * Record a GC event at the explicitly given timestamp
122 */
123 #define traceGcEventAtT(cap, ts, tag) \
124 if (RTS_UNLIKELY(TRACE_gc)) { \
125 traceGcEventAtT_(cap, ts, tag); \
126 }
127
128 void traceGcEventAtT_ (Capability *cap, StgWord64 ts, EventTypeNum tag);
129
130 /*
131 * Record a heap event
132 */
133 #define traceHeapEvent(cap, tag, heap_capset, info1) \
134 if (RTS_UNLIKELY(TRACE_gc)) { \
135 traceHeapEvent_(cap, tag, heap_capset, info1); \
136 }
137 void traceHeapEvent_ (Capability *cap,
138 EventTypeNum tag,
139 CapsetID heap_capset,
140 W_ info1);
141
142 void traceEventHeapInfo_ (CapsetID heap_capset,
143 uint32_t gens,
144 W_ maxHeapSize,
145 W_ allocAreaSize,
146 W_ mblockSize,
147 W_ blockSize);
148
149 void traceEventGcStats_ (Capability *cap,
150 CapsetID heap_capset,
151 uint32_t gen,
152 W_ copied,
153 W_ slop,
154 W_ fragmentation,
155 uint32_t par_n_threads,
156 W_ par_max_copied,
157 W_ par_tot_copied,
158 W_ par_balanced_copied);
159
160 /*
161 * Record a spark event
162 */
163 #define traceSparkEvent(cap, tag) \
164 if (RTS_UNLIKELY(TRACE_spark_full)) { \
165 traceSparkEvent_(cap, tag, 0); \
166 }
167
168 #define traceSparkEvent2(cap, tag, other) \
169 if (RTS_UNLIKELY(TRACE_spark_full)) { \
170 traceSparkEvent_(cap, tag, other); \
171 }
172
173 void traceSparkEvent_ (Capability *cap, EventTypeNum tag, StgWord info1);
174
175 // variadic macros are C99, and supported by gcc. However, the
176 // ##__VA_ARGS syntax is a gcc extension, which allows the variable
177 // argument list to be empty (see gcc docs for details).
178
179 /*
180 * Emit a trace message on a particular Capability
181 */
182 #define traceCap(class, cap, msg, ...) \
183 if (RTS_UNLIKELY(class)) { \
184 traceCap_(cap, msg, ##__VA_ARGS__); \
185 }
186
187 void traceCap_(Capability *cap, char *msg, ...);
188
189 /*
190 * Emit a trace message
191 */
192 #define trace(class, msg, ...) \
193 if (RTS_UNLIKELY(class)) { \
194 trace_(msg, ##__VA_ARGS__); \
195 }
196
197 void trace_(char *msg, ...);
198
199 /*
200 * A message or event emitted by the program
201 * Used by Debug.Trace.{traceEvent, traceEventIO}
202 */
203 void traceUserMsg(Capability *cap, char *msg);
204
205 /*
206 * A marker event emitted by the program
207 * Used by Debug.Trace.{traceMarker, traceMarkerIO}
208 */
209 void traceUserMarker(Capability *cap, char *msg);
210
211 /*
212 * A binary message or event emitted by the program
213 */
214 void traceUserBinaryMsg(Capability *cap, uint8_t *msg, size_t size);
215
216 /*
217 * An event to record a Haskell thread's label/name
218 * Used by GHC.Conc.labelThread
219 */
220 void traceThreadLabel_(Capability *cap,
221 StgTSO *tso,
222 char *label);
223
224 /*
225 * Emit a debug message (only when DEBUG is defined)
226 */
227 #if defined(DEBUG)
228 #define debugTrace(class, msg, ...) \
229 if (RTS_UNLIKELY(class)) { \
230 trace_(msg, ##__VA_ARGS__); \
231 }
232 #else
233 #define debugTrace(class, str, ...) /* nothing */
234 #endif
235
236 #if defined(DEBUG)
237 #define debugTraceCap(class, cap, msg, ...) \
238 if (RTS_UNLIKELY(class)) { \
239 traceCap_(cap, msg, ##__VA_ARGS__); \
240 }
241 #else
242 #define debugTraceCap(class, cap, str, ...) /* nothing */
243 #endif
244
245 /*
246 * Emit a message/event describing the state of a thread
247 */
248 #define traceThreadStatus(class, tso) \
249 if (RTS_UNLIKELY(class)) { \
250 traceThreadStatus_(tso); \
251 }
252
253 void traceThreadStatus_ (StgTSO *tso);
254
255 /*
256 * Events for describing capabilities and capability sets in the eventlog
257 */
258 #define traceCapEvent(cap, tag) \
259 if (RTS_UNLIKELY(TRACE_cap)) { \
260 traceCapEvent_(cap, tag); \
261 }
262
263 void traceCapEvent_ (Capability *cap,
264 EventTypeNum tag);
265
266 #define traceCapsetEvent(cap, capset, info) \
267 if (RTS_UNLIKELY(TRACE_cap)) { \
268 traceCapsetEvent_(cap, capset, info); \
269 }
270
271 void traceCapsetEvent_ (EventTypeNum tag,
272 CapsetID capset,
273 StgWord info);
274
275 void traceWallClockTime_(void);
276
277 void traceOSProcessInfo_ (void);
278
279 void traceSparkCounters_ (Capability *cap,
280 SparkCounters counters,
281 StgWord remaining);
282
283 void traceTaskCreate_ (Task *task,
284 Capability *cap);
285
286 void traceTaskMigrate_ (Task *task,
287 Capability *cap,
288 Capability *new_cap);
289
290 void traceTaskDelete_ (Task *task);
291
292 void traceHeapProfBegin(StgWord8 profile_id);
293 void traceHeapProfSampleBegin(StgInt era);
294 void traceHeapProfSampleEnd(StgInt era);
295 void traceHeapProfSampleString(StgWord8 profile_id,
296 const char *label, StgWord residency);
297 #if defined(PROFILING)
298 void traceHeapProfCostCentre(StgWord32 ccID,
299 const char *label,
300 const char *module,
301 const char *srcloc,
302 StgBool is_caf);
303 void traceHeapProfSampleCostCentre(StgWord8 profile_id,
304 CostCentreStack *stack, StgWord residency);
305 #endif /* PROFILING */
306
307 void traceConcMarkBegin(void);
308 void traceConcMarkEnd(StgWord32 marked_obj_count);
309 void traceConcSyncBegin(void);
310 void traceConcSyncEnd(void);
311 void traceConcSweepBegin(void);
312 void traceConcSweepEnd(void);
313 void traceConcUpdRemSetFlush(Capability *cap);
314 void traceNonmovingHeapCensus(uint32_t log_blk_size,
315 const struct NonmovingAllocCensus *census);
316
317 void flushTrace(void);
318
319 #else /* !TRACING */
320
321 #define traceSchedEvent(cap, tag, tso, other) /* nothing */
322 #define traceSchedEvent2(cap, tag, tso, other, info) /* nothing */
323 #define traceGcEvent(cap, tag) /* nothing */
324 #define traceGcEventAtT(cap, ts, tag) /* nothing */
325 #define traceEventGcStats_(cap, heap_capset, gen, \
326 copied, slop, fragmentation, \
327 par_n_threads, par_max_copied, \
328 par_tot_copied, par_balanced_copied) /* nothing */
329 #define traceHeapEvent(cap, tag, heap_capset, info1) /* nothing */
330 #define traceEventHeapInfo_(heap_capset, gens, \
331 maxHeapSize, allocAreaSize, \
332 mblockSize, blockSize) /* nothing */
333 #define traceSparkEvent(cap, tag) /* nothing */
334 #define traceSparkEvent2(cap, tag, other) /* nothing */
335 #define traceCap(class, cap, msg, ...) /* nothing */
336 #define trace(class, msg, ...) /* nothing */
337 #define debugTrace(class, str, ...) /* nothing */
338 #define debugTraceCap(class, cap, str, ...) /* nothing */
339 #define traceThreadStatus(class, tso) /* nothing */
340 #define traceThreadLabel_(cap, tso, label) /* nothing */
341 #define traceCapEvent(cap, tag) /* nothing */
342 #define traceCapsetEvent(tag, capset, info) /* nothing */
343 #define traceWallClockTime_() /* nothing */
344 #define traceOSProcessInfo_() /* nothing */
345 #define traceSparkCounters_(cap, counters, remaining) /* nothing */
346 #define traceTaskCreate_(taskID, cap) /* nothing */
347 #define traceTaskMigrate_(taskID, cap, new_cap) /* nothing */
348 #define traceTaskDelete_(taskID) /* nothing */
349 #define traceHeapProfBegin(profile_id) /* nothing */
350 #define traceHeapProfCostCentre(ccID, label, module, srcloc, is_caf) /* nothing */
351 #define traceHeapProfSampleBegin(era) /* nothing */
352 #define traceHeapProfSampleEnd(era) /* nothing */
353 #define traceHeapProfSampleCostCentre(profile_id, stack, residency) /* nothing */
354 #define traceHeapProfSampleString(profile_id, label, residency) /* nothing */
355
356 #define traceConcMarkBegin() /* nothing */
357 #define traceConcMarkEnd(marked_obj_count) /* nothing */
358 #define traceConcSyncBegin() /* nothing */
359 #define traceConcSyncEnd() /* nothing */
360 #define traceConcSweepBegin() /* nothing */
361 #define traceConcSweepEnd() /* nothing */
362 #define traceConcUpdRemSetFlush(cap) /* nothing */
363 #define traceNonmovingHeapCensus(blk_size, census) /* nothing */
364
365 #define flushTrace() /* nothing */
366
367 #endif /* TRACING */
368
369 // If DTRACE is enabled, but neither DEBUG nor TRACING, we need a C land
370 // wrapper for the user-msg probe (as we can't expand that in PrimOps.cmm)
371 //
372 #if !defined(DEBUG) && !defined(TRACING) && defined(DTRACE)
373
374 void dtraceUserMsgWrapper(Capability *cap, char *msg);
375 void dtraceUserMarkerWrapper(Capability *cap, char *msg);
376
377 #endif /* !defined(DEBUG) && !defined(TRACING) && defined(DTRACE) */
378
379 // -----------------------------------------------------------------------------
380 // Aliases for static dtrace probes if dtrace is available
381 // -----------------------------------------------------------------------------
382
383 #if defined(DTRACE)
384
385 #define dtraceCreateThread(cap, tid) \
386 HASKELLEVENT_CREATE_THREAD(cap, tid)
387 #define dtraceRunThread(cap, tid) \
388 HASKELLEVENT_RUN_THREAD(cap, tid)
389 #define dtraceStopThread(cap, tid, status, info) \
390 HASKELLEVENT_STOP_THREAD(cap, tid, status, info)
391 #define dtraceThreadRunnable(cap, tid) \
392 HASKELLEVENT_THREAD_RUNNABLE(cap, tid)
393 #define dtraceMigrateThread(cap, tid, new_cap) \
394 HASKELLEVENT_MIGRATE_THREAD(cap, tid, new_cap)
395 #define dtraceThreadWakeup(cap, tid, other_cap) \
396 HASKELLEVENT_THREAD_WAKEUP(cap, tid, other_cap)
397 #define dtraceGcStart(cap) \
398 HASKELLEVENT_GC_START(cap)
399 #define dtraceGcEnd(cap) \
400 HASKELLEVENT_GC_END(cap)
401 #define dtraceRequestSeqGc(cap) \
402 HASKELLEVENT_REQUEST_SEQ_GC(cap)
403 #define dtraceRequestParGc(cap) \
404 HASKELLEVENT_REQUEST_PAR_GC(cap)
405 #define dtraceCreateSparkThread(cap, spark_tid) \
406 HASKELLEVENT_CREATE_SPARK_THREAD(cap, spark_tid)
407 #define dtraceThreadLabel(cap, tso, label) \
408 HASKELLEVENT_THREAD_LABEL(cap, tso, label)
409 #define dtraceCapCreate(cap) \
410 HASKELLEVENT_CAP_CREATE(cap)
411 #define dtraceCapDelete(cap) \
412 HASKELLEVENT_CAP_DELETE(cap)
413 #define dtraceCapEnable(cap) \
414 HASKELLEVENT_CAP_ENABLE(cap)
415 #define dtraceCapDisable(cap) \
416 HASKELLEVENT_CAP_DISABLE(cap)
417 #define dtraceUserMsg(cap, msg) \
418 HASKELLEVENT_USER_MSG(cap, msg)
419 #define dtraceUserMarker(cap, msg) \
420 HASKELLEVENT_USER_MARKER(cap, msg)
421 #define dtraceGcIdle(cap) \
422 HASKELLEVENT_GC_IDLE(cap)
423 #define dtraceGcWork(cap) \
424 HASKELLEVENT_GC_WORK(cap)
425 #define dtraceGcDone(cap) \
426 HASKELLEVENT_GC_DONE(cap)
427 #define dtraceGcGlobalSync(cap) \
428 HASKELLEVENT_GC_GLOBAL_SYNC(cap)
429 #define dtraceEventGcStats(heap_capset, gens, \
430 copies, slop, fragmentation, \
431 par_n_threads, \
432 par_max_copied, \
433 par_tot_copied, \
434 par_balanced_copied) \
435 HASKELLEVENT_GC_STATS(heap_capset, gens, \
436 copies, slop, fragmentation, \
437 par_n_threads, \
438 par_max_copied, \
439 par_balanced_copied, \
440 par_tot_copied)
441 #define dtraceHeapInfo(heap_capset, gens, \
442 maxHeapSize, allocAreaSize, \
443 mblockSize, blockSize) \
444 HASKELLEVENT_HEAP_INFO(heap_capset, gens, \
445 maxHeapSize, allocAreaSize, \
446 mblockSize, blockSize)
447 #define dtraceEventHeapAllocated(cap, heap_capset, \
448 allocated) \
449 HASKELLEVENT_HEAP_ALLOCATED(cap, heap_capset, \
450 allocated)
451 #define dtraceEventHeapSize(heap_capset, size) \
452 HASKELLEVENT_HEAP_SIZE(heap_capset, size)
453 #define dtraceEventHeapLive(heap_capset, live) \
454 HASKELLEVENT_HEAP_LIVE(heap_capset, live)
455 #define dtraceCapsetCreate(capset, capset_type) \
456 HASKELLEVENT_CAPSET_CREATE(capset, capset_type)
457 #define dtraceCapsetDelete(capset) \
458 HASKELLEVENT_CAPSET_DELETE(capset)
459 #define dtraceCapsetAssignCap(capset, capno) \
460 HASKELLEVENT_CAPSET_ASSIGN_CAP(capset, capno)
461 #define dtraceCapsetRemoveCap(capset, capno) \
462 HASKELLEVENT_CAPSET_REMOVE_CAP(capset, capno)
463 #define dtraceSparkCounters(cap, a, b, c, d, e, f, g) \
464 HASKELLEVENT_SPARK_COUNTERS(cap, a, b, c, d, e, f, g)
465 #define dtraceSparkCreate(cap) \
466 HASKELLEVENT_SPARK_CREATE(cap)
467 #define dtraceSparkDud(cap) \
468 HASKELLEVENT_SPARK_DUD(cap)
469 #define dtraceSparkOverflow(cap) \
470 HASKELLEVENT_SPARK_OVERFLOW(cap)
471 #define dtraceSparkRun(cap) \
472 HASKELLEVENT_SPARK_RUN(cap)
473 #define dtraceSparkSteal(cap, victim_cap) \
474 HASKELLEVENT_SPARK_STEAL(cap, victim_cap)
475 #define dtraceSparkFizzle(cap) \
476 HASKELLEVENT_SPARK_FIZZLE(cap)
477 #define dtraceSparkGc(cap) \
478 HASKELLEVENT_SPARK_GC(cap)
479 #define dtraceTaskCreate(taskID, cap, tid) \
480 HASKELLEVENT_TASK_CREATE(taskID, cap, tid)
481 #define dtraceTaskMigrate(taskID, cap, new_cap) \
482 HASKELLEVENT_TASK_MIGRATE(taskID, cap, new_cap)
483 #define dtraceTaskDelete(taskID) \
484 HASKELLEVENT_TASK_DELETE(taskID)
485
486 #else /* !defined(DTRACE) */
487
488 #define dtraceCreateThread(cap, tid) /* nothing */
489 #define dtraceRunThread(cap, tid) /* nothing */
490 #define dtraceStopThread(cap, tid, status, info) /* nothing */
491 #define dtraceThreadRunnable(cap, tid) /* nothing */
492 #define dtraceMigrateThread(cap, tid, new_cap) /* nothing */
493 #define dtraceThreadWakeup(cap, tid, other_cap) /* nothing */
494 #define dtraceGcStart(cap) /* nothing */
495 #define dtraceGcEnd(cap) /* nothing */
496 #define dtraceRequestSeqGc(cap) /* nothing */
497 #define dtraceRequestParGc(cap) /* nothing */
498 #define dtraceCreateSparkThread(cap, spark_tid) /* nothing */
499 #define dtraceThreadLabel(cap, tso, label) /* nothing */
500 #define dtraceUserMsg(cap, msg) /* nothing */
501 #define dtraceUserMarker(cap, msg) /* nothing */
502 #define dtraceGcIdle(cap) /* nothing */
503 #define dtraceGcWork(cap) /* nothing */
504 #define dtraceGcDone(cap) /* nothing */
505 #define dtraceGcGlobalSync(cap) /* nothing */
506 #define dtraceEventGcStats(heap_capset, gens, \
507 copies, slop, fragmentation, \
508 par_n_threads, \
509 par_max_copied, \
510 par_tot_copied, \
511 par_balanced_copied) /* nothing */
512 #define dtraceHeapInfo(heap_capset, gens, \
513 maxHeapSize, allocAreaSize, \
514 mblockSize, blockSize) /* nothing */
515 #define dtraceEventHeapAllocated(cap, heap_capset, \
516 allocated) /* nothing */
517 #define dtraceEventHeapSize(heap_capset, size) /* nothing */
518 #define dtraceEventHeapLive(heap_capset, live) /* nothing */
519 #define dtraceCapCreate(cap) /* nothing */
520 #define dtraceCapDelete(cap) /* nothing */
521 #define dtraceCapEnable(cap) /* nothing */
522 #define dtraceCapDisable(cap) /* nothing */
523 #define dtraceCapsetCreate(capset, capset_type) /* nothing */
524 #define dtraceCapsetDelete(capset) /* nothing */
525 #define dtraceCapsetAssignCap(capset, capno) /* nothing */
526 #define dtraceCapsetRemoveCap(capset, capno) /* nothing */
527 #define dtraceSparkCounters(cap, a, b, c, d, e, f, g) /* nothing */
528 #define dtraceSparkCreate(cap) /* nothing */
529 #define dtraceSparkDud(cap) /* nothing */
530 #define dtraceSparkOverflow(cap) /* nothing */
531 #define dtraceSparkRun(cap) /* nothing */
532 #define dtraceSparkSteal(cap, victim_cap) /* nothing */
533 #define dtraceSparkFizzle(cap) /* nothing */
534 #define dtraceSparkGc(cap) /* nothing */
535 #define dtraceTaskCreate(taskID, cap, tid) /* nothing */
536 #define dtraceTaskMigrate(taskID, cap, new_cap) /* nothing */
537 #define dtraceTaskDelete(taskID) /* nothing */
538
539 #endif
540
541 // -----------------------------------------------------------------------------
542 // Trace probes dispatching to various tracing frameworks
543 //
544 // In order to avoid accumulating multiple calls to tracing calls at trace
545 // points, we define inline probe functions that contain the various
546 // invocations.
547 //
548 // Dtrace - dtrace probes are unconditionally added as probe activation is
549 // handled by the dtrace component of the kernel, and inactive probes are
550 // very cheap - usually, one no-op. Consequently, dtrace can be used with
551 // all flavours of the RTS. In addition, we still support logging events to
552 // a file, even in the presence of dtrace. This is, eg, useful when tracing
553 // on a server, but browsing trace information with ThreadScope on a local
554 // client.
555 //
556 // -----------------------------------------------------------------------------
557
558 INLINE_HEADER void traceEventCreateThread(Capability *cap STG_UNUSED,
559 StgTSO *tso STG_UNUSED)
560 {
561 traceSchedEvent(cap, EVENT_CREATE_THREAD, tso, tso->stackobj->stack_size);
562 dtraceCreateThread((EventCapNo)cap->no, (EventThreadID)tso->id);
563 }
564
565 INLINE_HEADER void traceEventRunThread(Capability *cap STG_UNUSED,
566 StgTSO *tso STG_UNUSED)
567 {
568 traceSchedEvent(cap, EVENT_RUN_THREAD, tso, tso->what_next);
569 dtraceRunThread((EventCapNo)cap->no, (EventThreadID)tso->id);
570 }
571
572 INLINE_HEADER void traceEventStopThread(Capability *cap STG_UNUSED,
573 StgTSO *tso STG_UNUSED,
574 StgThreadReturnCode status STG_UNUSED,
575 StgWord32 info STG_UNUSED)
576 {
577 traceSchedEvent2(cap, EVENT_STOP_THREAD, tso, status, info);
578 dtraceStopThread((EventCapNo)cap->no, (EventThreadID)tso->id,
579 (EventThreadStatus)status, (EventThreadID)info);
580 }
581
582 INLINE_HEADER void traceEventMigrateThread(Capability *cap STG_UNUSED,
583 StgTSO *tso STG_UNUSED,
584 uint32_t new_cap STG_UNUSED)
585 {
586 traceSchedEvent(cap, EVENT_MIGRATE_THREAD, tso, new_cap);
587 dtraceMigrateThread((EventCapNo)cap->no, (EventThreadID)tso->id,
588 (EventCapNo)new_cap);
589 }
590
591 INLINE_HEADER void traceCapCreate(Capability *cap STG_UNUSED)
592 {
593 traceCapEvent(cap, EVENT_CAP_CREATE);
594 dtraceCapCreate((EventCapNo)cap->no);
595 }
596
597 INLINE_HEADER void traceCapDelete(Capability *cap STG_UNUSED)
598 {
599 traceCapEvent(cap, EVENT_CAP_DELETE);
600 dtraceCapDelete((EventCapNo)cap->no);
601 }
602
603 INLINE_HEADER void traceCapEnable(Capability *cap STG_UNUSED)
604 {
605 traceCapEvent(cap, EVENT_CAP_ENABLE);
606 dtraceCapEnable((EventCapNo)cap->no);
607 }
608
609 INLINE_HEADER void traceCapDisable(Capability *cap STG_UNUSED)
610 {
611 traceCapEvent(cap, EVENT_CAP_DISABLE);
612 dtraceCapDisable((EventCapNo)cap->no);
613 }
614
615 INLINE_HEADER void traceEventThreadWakeup(Capability *cap STG_UNUSED,
616 StgTSO *tso STG_UNUSED,
617 uint32_t other_cap STG_UNUSED)
618 {
619 traceSchedEvent(cap, EVENT_THREAD_WAKEUP, tso, other_cap);
620 dtraceThreadWakeup((EventCapNo)cap->no, (EventThreadID)tso->id,
621 (EventCapNo)other_cap);
622 }
623
624 INLINE_HEADER void traceThreadLabel(Capability *cap STG_UNUSED,
625 StgTSO *tso STG_UNUSED,
626 char *label STG_UNUSED)
627 {
628 if (RTS_UNLIKELY(TRACE_sched)) {
629 traceThreadLabel_(cap, tso, label);
630 }
631 dtraceThreadLabel((EventCapNo)cap->no, (EventThreadID)tso->id, label);
632 }
633
634 INLINE_HEADER void traceEventGcStart(Capability *cap STG_UNUSED)
635 {
636 traceGcEvent(cap, EVENT_GC_START);
637 dtraceGcStart((EventCapNo)cap->no);
638 }
639
640 INLINE_HEADER void traceEventGcStartAtT(Capability *cap STG_UNUSED,
641 StgWord64 ts STG_UNUSED)
642 {
643 traceGcEventAtT(cap, ts, EVENT_GC_START);
644 dtraceGcStart((EventCapNo)cap->no);
645 }
646
647 INLINE_HEADER void traceEventGcEnd(Capability *cap STG_UNUSED)
648 {
649 traceGcEvent(cap, EVENT_GC_END);
650 dtraceGcEnd((EventCapNo)cap->no);
651 }
652
653 INLINE_HEADER void traceEventGcEndAtT(Capability *cap STG_UNUSED,
654 StgWord64 ts STG_UNUSED)
655 {
656 traceGcEventAtT(cap, ts, EVENT_GC_END);
657 dtraceGcEnd((EventCapNo)cap->no);
658 }
659
660 INLINE_HEADER void traceEventRequestSeqGc(Capability *cap STG_UNUSED)
661 {
662 traceGcEvent(cap, EVENT_REQUEST_SEQ_GC);
663 dtraceRequestSeqGc((EventCapNo)cap->no);
664 }
665
666 INLINE_HEADER void traceEventRequestParGc(Capability *cap STG_UNUSED)
667 {
668 traceGcEvent(cap, EVENT_REQUEST_PAR_GC);
669 dtraceRequestParGc((EventCapNo)cap->no);
670 }
671
672 INLINE_HEADER void traceEventGcIdle(Capability *cap STG_UNUSED)
673 {
674 traceGcEvent(cap, EVENT_GC_IDLE);
675 dtraceGcIdle((EventCapNo)cap->no);
676 }
677
678 INLINE_HEADER void traceEventGcWork(Capability *cap STG_UNUSED)
679 {
680 traceGcEvent(cap, EVENT_GC_WORK);
681 dtraceGcWork((EventCapNo)cap->no);
682 }
683
684 INLINE_HEADER void traceEventGcDone(Capability *cap STG_UNUSED)
685 {
686 traceGcEvent(cap, EVENT_GC_DONE);
687 dtraceGcDone((EventCapNo)cap->no);
688 }
689
690 INLINE_HEADER void traceEventGcGlobalSync(Capability *cap STG_UNUSED)
691 {
692 traceGcEvent(cap, EVENT_GC_GLOBAL_SYNC);
693 dtraceGcGlobalSync((EventCapNo)cap->no);
694 }
695
696 INLINE_HEADER void traceEventGcStats(Capability *cap STG_UNUSED,
697 CapsetID heap_capset STG_UNUSED,
698 uint32_t gen STG_UNUSED,
699 W_ copied STG_UNUSED,
700 W_ slop STG_UNUSED,
701 W_ fragmentation STG_UNUSED,
702 uint32_t par_n_threads STG_UNUSED,
703 W_ par_max_copied STG_UNUSED,
704 W_ par_tot_copied STG_UNUSED,
705 W_ par_balanced_copied STG_UNUSED)
706 {
707 if (RTS_UNLIKELY(TRACE_gc)) {
708 traceEventGcStats_(cap, heap_capset, gen,
709 copied, slop, fragmentation,
710 par_n_threads, par_max_copied,
711 par_tot_copied, par_balanced_copied);
712 }
713 dtraceEventGcStats(heap_capset, gen,
714 copied, slop, fragmentation,
715 par_n_threads, par_max_copied,
716 par_tot_copied, par_balanced_copied);
717 }
718
719 INLINE_HEADER void traceEventHeapInfo(CapsetID heap_capset STG_UNUSED,
720 uint32_t gens STG_UNUSED,
721 W_ maxHeapSize STG_UNUSED,
722 W_ allocAreaSize STG_UNUSED,
723 W_ mblockSize STG_UNUSED,
724 W_ blockSize STG_UNUSED)
725 {
726 if (RTS_UNLIKELY(TRACE_gc)) {
727 traceEventHeapInfo_(heap_capset, gens,
728 maxHeapSize, allocAreaSize,
729 mblockSize, blockSize);
730 }
731 dtraceHeapInfo(heap_capset, gens,
732 maxHeapSize, allocAreaSize,
733 mblockSize, blockSize);
734 }
735
736 INLINE_HEADER void traceEventHeapAllocated(Capability *cap STG_UNUSED,
737 CapsetID heap_capset STG_UNUSED,
738 W_ allocated STG_UNUSED)
739 {
740 traceHeapEvent(cap, EVENT_HEAP_ALLOCATED, heap_capset, allocated);
741 dtraceEventHeapAllocated((EventCapNo)cap->no, heap_capset, allocated);
742 }
743
744 INLINE_HEADER void traceEventHeapSize(Capability *cap STG_UNUSED,
745 CapsetID heap_capset STG_UNUSED,
746 W_ heap_size STG_UNUSED)
747 {
748 traceHeapEvent(cap, EVENT_HEAP_SIZE, heap_capset, heap_size);
749 dtraceEventHeapSize(heap_capset, heap_size);
750 }
751
752 INLINE_HEADER void traceEventHeapLive(Capability *cap STG_UNUSED,
753 CapsetID heap_capset STG_UNUSED,
754 W_ heap_live STG_UNUSED)
755 {
756 traceHeapEvent(cap, EVENT_HEAP_LIVE, heap_capset, heap_live);
757 dtraceEventHeapLive(heap_capset, heap_live);
758 }
759
760 INLINE_HEADER void traceCapsetCreate(CapsetID capset STG_UNUSED,
761 CapsetType capset_type STG_UNUSED)
762 {
763 traceCapsetEvent(EVENT_CAPSET_CREATE, capset, capset_type);
764 dtraceCapsetCreate(capset, capset_type);
765 }
766
767 INLINE_HEADER void traceCapsetDelete(CapsetID capset STG_UNUSED)
768 {
769 traceCapsetEvent(EVENT_CAPSET_DELETE, capset, 0);
770 dtraceCapsetDelete(capset);
771 }
772
773 INLINE_HEADER void traceCapsetAssignCap(CapsetID capset STG_UNUSED,
774 uint32_t capno STG_UNUSED)
775 {
776 traceCapsetEvent(EVENT_CAPSET_ASSIGN_CAP, capset, capno);
777 dtraceCapsetAssignCap(capset, capno);
778 }
779
780 INLINE_HEADER void traceCapsetRemoveCap(CapsetID capset STG_UNUSED,
781 uint32_t capno STG_UNUSED)
782 {
783 traceCapsetEvent(EVENT_CAPSET_REMOVE_CAP, capset, capno);
784 dtraceCapsetRemoveCap(capset, capno);
785 }
786
787 INLINE_HEADER void traceWallClockTime(void)
788 {
789 traceWallClockTime_();
790 /* Note: no DTrace equivalent because it is available to DTrace directly */
791 }
792
793 INLINE_HEADER void traceOSProcessInfo(void)
794 {
795 traceOSProcessInfo_();
796 /* Note: no DTrace equivalent because all this OS process info
797 * is available to DTrace directly */
798 }
799
800 INLINE_HEADER void traceEventCreateSparkThread(Capability *cap STG_UNUSED,
801 StgThreadID spark_tid STG_UNUSED)
802 {
803 traceSparkEvent2(cap, EVENT_CREATE_SPARK_THREAD, spark_tid);
804 dtraceCreateSparkThread((EventCapNo)cap->no, (EventThreadID)spark_tid);
805 }
806
807 INLINE_HEADER void traceSparkCounters(Capability *cap STG_UNUSED)
808 {
809 #if defined(THREADED_RTS)
810 if (RTS_UNLIKELY(TRACE_spark_sampled)) {
811 traceSparkCounters_(cap, cap->spark_stats, sparkPoolSize(cap->sparks));
812 }
813 dtraceSparkCounters((EventCapNo)cap->no,
814 cap->spark_stats.created,
815 cap->spark_stats.dud,
816 cap->spark_stats.overflowed,
817 cap->spark_stats.converted,
818 cap->spark_stats.gcd,
819 cap->spark_stats.fizzled,
820 sparkPoolSize(cap->sparks));
821 #endif
822 }
823
824 INLINE_HEADER void traceEventSparkCreate(Capability *cap STG_UNUSED)
825 {
826 traceSparkEvent(cap, EVENT_SPARK_CREATE);
827 dtraceSparkCreate((EventCapNo)cap->no);
828 }
829
830 INLINE_HEADER void traceEventSparkDud(Capability *cap STG_UNUSED)
831 {
832 traceSparkEvent(cap, EVENT_SPARK_DUD);
833 dtraceSparkDud((EventCapNo)cap->no);
834 }
835
836 INLINE_HEADER void traceEventSparkOverflow(Capability *cap STG_UNUSED)
837 {
838 traceSparkEvent(cap, EVENT_SPARK_OVERFLOW);
839 dtraceSparkOverflow((EventCapNo)cap->no);
840 }
841
842 INLINE_HEADER void traceEventSparkRun(Capability *cap STG_UNUSED)
843 {
844 traceSparkEvent(cap, EVENT_SPARK_RUN);
845 dtraceSparkRun((EventCapNo)cap->no);
846 }
847
848 INLINE_HEADER void traceEventSparkSteal(Capability *cap STG_UNUSED,
849 uint32_t victim_cap STG_UNUSED)
850 {
851 traceSparkEvent2(cap, EVENT_SPARK_STEAL, victim_cap);
852 dtraceSparkSteal((EventCapNo)cap->no, (EventCapNo)victim_cap);
853 }
854
855 INLINE_HEADER void traceEventSparkFizzle(Capability *cap STG_UNUSED)
856 {
857 traceSparkEvent(cap, EVENT_SPARK_FIZZLE);
858 dtraceSparkFizzle((EventCapNo)cap->no);
859 }
860
861 INLINE_HEADER void traceEventSparkGC(Capability *cap STG_UNUSED)
862 {
863 traceSparkEvent(cap, EVENT_SPARK_GC);
864 dtraceSparkGc((EventCapNo)cap->no);
865 }
866
867 INLINE_HEADER void traceTaskCreate(Task *task STG_UNUSED,
868 Capability *cap STG_UNUSED)
869 {
870 ASSERT(task->cap == cap);
871 // TODO: asserting task->cap == NULL would be much stronger
872 // (the intention being that the task structure is just created and empty)
873 // but would require large changes of traceTaskCreate calls.
874 ASSERT(cap != NULL);
875 // A new task gets associated with a cap. We also record
876 // the kernel thread id of the task, which should never change.
877 if (RTS_UNLIKELY(TRACE_sched)) {
878 traceTaskCreate_(task, cap);
879 }
880 dtraceTaskCreate(serialisableTaskId(task),
881 (EventCapNo)cap->no,
882 kernelThreadId());
883 }
884
885 INLINE_HEADER void traceTaskMigrate(Task *task STG_UNUSED,
886 Capability *cap STG_UNUSED,
887 Capability *new_cap STG_UNUSED)
888 {
889 ASSERT(task->cap == cap);
890 ASSERT(cap != NULL);
891 ASSERT(cap != new_cap);
892 ASSERT(new_cap != NULL);
893 // A task migrates from a cap to another.
894 if (RTS_UNLIKELY(TRACE_sched)) {
895 traceTaskMigrate_(task, cap, new_cap);
896 }
897 dtraceTaskMigrate(serialisableTaskId(task), (EventCapNo)cap->no,
898 (EventCapNo)new_cap->no);
899 }
900
901 INLINE_HEADER void traceTaskDelete(Task *task STG_UNUSED)
902 {
903 ASSERT(task->cap != NULL);
904 if (RTS_UNLIKELY(TRACE_sched)) {
905 traceTaskDelete_(task);
906 }
907 dtraceTaskDelete(serialisableTaskId(task));
908 }
909
910 #include "EndPrivate.h"