NonmovingCensus: Emit samples to eventlog
[ghc.git] / rts / Trace.h
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 2008-2009
4 *
5 * Support for fast binary event logging and user-space dtrace probes.
6 *
7 * ---------------------------------------------------------------------------*/
8
9 #pragma once
10
11 #include "rts/EventLogFormat.h"
12 #include "sm/NonMovingCensus.h"
13 #include "Capability.h"
14
15 #if defined(DTRACE)
16 #include "RtsProbes.h"
17 #endif /* defined(DTRACE) */
18
19 #include "BeginPrivate.h"
20
21 // -----------------------------------------------------------------------------
22 // EventLog API
23 // -----------------------------------------------------------------------------
24
25 #if defined(TRACING)
26
27 void initTracing (void);
28 void endTracing (void);
29 void freeTracing (void);
30 void resetTracing (void);
31 void tracingAddCapapilities (uint32_t from, uint32_t to);
32
33 #endif /* TRACING */
34
35 typedef StgWord32 CapsetID;
36 typedef StgWord16 CapsetType;
37 enum CapsetType { CapsetTypeCustom = CAPSET_TYPE_CUSTOM,
38 CapsetTypeOsProcess = CAPSET_TYPE_OSPROCESS,
39 CapsetTypeClockdomain = CAPSET_TYPE_CLOCKDOMAIN };
40 #define CAPSET_OSPROCESS_DEFAULT ((CapsetID)0)
41 #define CAPSET_HEAP_DEFAULT ((CapsetID)0) /* reusing the same capset */
42 #define CAPSET_CLOCKDOMAIN_DEFAULT ((CapsetID)1)
43
44 // -----------------------------------------------------------------------------
45 // Message classes
46 // -----------------------------------------------------------------------------
47
48 // shorthand for RtsFlags.DebugFlags.<blah>, useful with debugTrace()
49 #define DEBUG_sched RtsFlags.DebugFlags.scheduler
50 #define DEBUG_interp RtsFlags.DebugFlags.interp
51 #define DEBUG_weak RtsFlags.DebugFlags.weak
52 #define DEBUG_gccafs RtsFlags.DebugFlags.gccafs
53 #define DEBUG_gc RtsFlags.DebugFlags.gc
54 #define DEBUG_nonmoving_gc RtsFlags.DebugFlags.nonmoving_gc
55 #define DEBUG_block_alloc RtsFlags.DebugFlags.alloc
56 #define DEBUG_sanity RtsFlags.DebugFlags.sanity
57 #define DEBUG_stable RtsFlags.DebugFlags.stable
58 #define DEBUG_stm RtsFlags.DebugFlags.stm
59 #define DEBUG_prof RtsFlags.DebugFlags.prof
60 #define DEBUG_gran RtsFlags.DebugFlags.gran
61 #define DEBUG_par RtsFlags.DebugFlags.par
62 #define DEBUG_linker RtsFlags.DebugFlags.linker
63 #define DEBUG_squeeze RtsFlags.DebugFlags.squeeze
64 #define DEBUG_hpc RtsFlags.DebugFlags.hpc
65 #define DEBUG_sparks RtsFlags.DebugFlags.sparks
66 #define DEBUG_compact RtsFlags.DebugFlags.compact
67
68 // events
69 extern int TRACE_sched;
70 extern int TRACE_gc;
71 extern int TRACE_spark_sampled;
72 extern int TRACE_spark_full;
73 /* extern int TRACE_user; */ // only used in Trace.c
74 extern int TRACE_cap;
75 extern int TRACE_nonmoving_gc;
76
77 // -----------------------------------------------------------------------------
78 // Posting events
79 //
80 // We use macros rather than inline functions deliberately. We want
81 // the not-taken case to be as efficient as possible, a simple
82 // test-and-jump, and with inline functions gcc seemed to move some of
83 // the instructions from the branch up before the test.
84 //
85 // -----------------------------------------------------------------------------
86
87 #if defined(DEBUG)
88 void traceBegin (const char *str, ...);
89 void traceEnd (void);
90 #endif
91
92 #if defined(TRACING)
93
94 /*
95 * Record a scheduler event
96 */
97 #define traceSchedEvent(cap, tag, tso, other) \
98 if (RTS_UNLIKELY(TRACE_sched)) { \
99 traceSchedEvent_(cap, tag, tso, other, 0); \
100 }
101
102 #define traceSchedEvent2(cap, tag, tso, info1, info2) \
103 if (RTS_UNLIKELY(TRACE_sched)) { \
104 traceSchedEvent_(cap, tag, tso, info1, info2); \
105 }
106
107 void traceSchedEvent_ (Capability *cap, EventTypeNum tag,
108 StgTSO *tso, StgWord info1, StgWord info2);
109
110 /*
111 * Record a GC event
112 */
113 #define traceGcEvent(cap, tag) \
114 if (RTS_UNLIKELY(TRACE_gc)) { \
115 traceGcEvent_(cap, tag); \
116 }
117
118 void traceGcEvent_ (Capability *cap, EventTypeNum tag);
119
120 /*
121 * Record a GC event at the explicitly given timestamp
122 */
123 #define traceGcEventAtT(cap, ts, tag) \
124 if (RTS_UNLIKELY(TRACE_gc)) { \
125 traceGcEventAtT_(cap, ts, tag); \
126 }
127
128 void traceGcEventAtT_ (Capability *cap, StgWord64 ts, EventTypeNum tag);
129
130 /*
131 * Record a heap event
132 */
133 #define traceHeapEvent(cap, tag, heap_capset, info1) \
134 if (RTS_UNLIKELY(TRACE_gc)) { \
135 traceHeapEvent_(cap, tag, heap_capset, info1); \
136 }
137 void traceHeapEvent_ (Capability *cap,
138 EventTypeNum tag,
139 CapsetID heap_capset,
140 W_ info1);
141
142 void traceEventHeapInfo_ (CapsetID heap_capset,
143 uint32_t gens,
144 W_ maxHeapSize,
145 W_ allocAreaSize,
146 W_ mblockSize,
147 W_ blockSize);
148
149 void traceEventGcStats_ (Capability *cap,
150 CapsetID heap_capset,
151 uint32_t gen,
152 W_ copied,
153 W_ slop,
154 W_ fragmentation,
155 uint32_t par_n_threads,
156 W_ par_max_copied,
157 W_ par_tot_copied,
158 W_ par_balanced_copied);
159
160 /*
161 * Record a spark event
162 */
163 #define traceSparkEvent(cap, tag) \
164 if (RTS_UNLIKELY(TRACE_spark_full)) { \
165 traceSparkEvent_(cap, tag, 0); \
166 }
167
168 #define traceSparkEvent2(cap, tag, other) \
169 if (RTS_UNLIKELY(TRACE_spark_full)) { \
170 traceSparkEvent_(cap, tag, other); \
171 }
172
173 void traceSparkEvent_ (Capability *cap, EventTypeNum tag, StgWord info1);
174
175 // variadic macros are C99, and supported by gcc. However, the
176 // ##__VA_ARGS syntax is a gcc extension, which allows the variable
177 // argument list to be empty (see gcc docs for details).
178
179 /*
180 * Emit a trace message on a particular Capability
181 */
182 #define traceCap(class, cap, msg, ...) \
183 if (RTS_UNLIKELY(class)) { \
184 traceCap_(cap, msg, ##__VA_ARGS__); \
185 }
186
187 void traceCap_(Capability *cap, char *msg, ...);
188
189 /*
190 * Emit a trace message
191 */
192 #define trace(class, msg, ...) \
193 if (RTS_UNLIKELY(class)) { \
194 trace_(msg, ##__VA_ARGS__); \
195 }
196
197 void trace_(char *msg, ...);
198
199 /*
200 * A message or event emitted by the program
201 * Used by Debug.Trace.{traceEvent, traceEventIO}
202 */
203 void traceUserMsg(Capability *cap, char *msg);
204
205 /*
206 * A marker event emitted by the program
207 * Used by Debug.Trace.{traceMarker, traceMarkerIO}
208 */
209 void traceUserMarker(Capability *cap, char *msg);
210
211 /*
212 * A binary message or event emitted by the program
213 */
214 void traceUserBinaryMsg(Capability *cap, uint8_t *msg, size_t size);
215
216 /*
217 * An event to record a Haskell thread's label/name
218 * Used by GHC.Conc.labelThread
219 */
220 void traceThreadLabel_(Capability *cap,
221 StgTSO *tso,
222 char *label);
223
224 /*
225 * Emit a debug message (only when DEBUG is defined)
226 */
227 #if defined(DEBUG)
228 #define debugTrace(class, msg, ...) \
229 if (RTS_UNLIKELY(class)) { \
230 trace_(msg, ##__VA_ARGS__); \
231 }
232 #else
233 #define debugTrace(class, str, ...) /* nothing */
234 #endif
235
236 #if defined(DEBUG)
237 #define debugTraceCap(class, cap, msg, ...) \
238 if (RTS_UNLIKELY(class)) { \
239 traceCap_(cap, msg, ##__VA_ARGS__); \
240 }
241 #else
242 #define debugTraceCap(class, cap, str, ...) /* nothing */
243 #endif
244
245 /*
246 * Emit a message/event describing the state of a thread
247 */
248 #define traceThreadStatus(class, tso) \
249 if (RTS_UNLIKELY(class)) { \
250 traceThreadStatus_(tso); \
251 }
252
253 void traceThreadStatus_ (StgTSO *tso);
254
255 /*
256 * Events for describing capabilities and capability sets in the eventlog
257 */
258 #define traceCapEvent(cap, tag) \
259 if (RTS_UNLIKELY(TRACE_cap)) { \
260 traceCapEvent_(cap, tag); \
261 }
262
263 void traceCapEvent_ (Capability *cap,
264 EventTypeNum tag);
265
266 #define traceCapsetEvent(cap, capset, info) \
267 if (RTS_UNLIKELY(TRACE_cap)) { \
268 traceCapsetEvent_(cap, capset, info); \
269 }
270
271 void traceCapsetEvent_ (EventTypeNum tag,
272 CapsetID capset,
273 StgWord info);
274
275 void traceWallClockTime_(void);
276
277 void traceOSProcessInfo_ (void);
278
279 void traceSparkCounters_ (Capability *cap,
280 SparkCounters counters,
281 StgWord remaining);
282
283 void traceTaskCreate_ (Task *task,
284 Capability *cap);
285
286 void traceTaskMigrate_ (Task *task,
287 Capability *cap,
288 Capability *new_cap);
289
290 void traceTaskDelete_ (Task *task);
291
292 void traceHeapProfBegin(StgWord8 profile_id);
293 void traceHeapProfSampleBegin(StgInt era);
294 void traceHeapProfSampleString(StgWord8 profile_id,
295 const char *label, StgWord residency);
296 #if defined(PROFILING)
297 void traceHeapProfCostCentre(StgWord32 ccID,
298 const char *label,
299 const char *module,
300 const char *srcloc,
301 StgBool is_caf);
302 void traceHeapProfSampleCostCentre(StgWord8 profile_id,
303 CostCentreStack *stack, StgWord residency);
304 #endif /* PROFILING */
305
306 void traceConcMarkBegin(void);
307 void traceConcMarkEnd(StgWord32 marked_obj_count);
308 void traceConcSyncBegin(void);
309 void traceConcSyncEnd(void);
310 void traceConcSweepBegin(void);
311 void traceConcSweepEnd(void);
312 void traceConcUpdRemSetFlush(Capability *cap);
313 void traceNonmovingHeapCensus(uint32_t log_blk_size,
314 const struct NonmovingAllocCensus *census);
315
316 void flushTrace(void);
317
318 #else /* !TRACING */
319
320 #define traceSchedEvent(cap, tag, tso, other) /* nothing */
321 #define traceSchedEvent2(cap, tag, tso, other, info) /* nothing */
322 #define traceGcEvent(cap, tag) /* nothing */
323 #define traceGcEventAtT(cap, ts, tag) /* nothing */
324 #define traceEventGcStats_(cap, heap_capset, gen, \
325 copied, slop, fragmentation, \
326 par_n_threads, par_max_copied, \
327 par_tot_copied, par_balanced_copied) /* nothing */
328 #define traceHeapEvent(cap, tag, heap_capset, info1) /* nothing */
329 #define traceEventHeapInfo_(heap_capset, gens, \
330 maxHeapSize, allocAreaSize, \
331 mblockSize, blockSize) /* nothing */
332 #define traceSparkEvent(cap, tag) /* nothing */
333 #define traceSparkEvent2(cap, tag, other) /* nothing */
334 #define traceCap(class, cap, msg, ...) /* nothing */
335 #define trace(class, msg, ...) /* nothing */
336 #define debugTrace(class, str, ...) /* nothing */
337 #define debugTraceCap(class, cap, str, ...) /* nothing */
338 #define traceThreadStatus(class, tso) /* nothing */
339 #define traceThreadLabel_(cap, tso, label) /* nothing */
340 #define traceCapEvent(cap, tag) /* nothing */
341 #define traceCapsetEvent(tag, capset, info) /* nothing */
342 #define traceWallClockTime_() /* nothing */
343 #define traceOSProcessInfo_() /* nothing */
344 #define traceSparkCounters_(cap, counters, remaining) /* nothing */
345 #define traceTaskCreate_(taskID, cap) /* nothing */
346 #define traceTaskMigrate_(taskID, cap, new_cap) /* nothing */
347 #define traceTaskDelete_(taskID) /* nothing */
348 #define traceHeapProfBegin(profile_id) /* nothing */
349 #define traceHeapProfCostCentre(ccID, label, module, srcloc, is_caf) /* nothing */
350 #define traceHeapProfSampleBegin(era) /* nothing */
351 #define traceHeapProfSampleCostCentre(profile_id, stack, residency) /* nothing */
352 #define traceHeapProfSampleString(profile_id, label, residency) /* nothing */
353
354 #define traceConcMarkBegin() /* nothing */
355 #define traceConcMarkEnd(marked_obj_count) /* nothing */
356 #define traceConcSyncBegin() /* nothing */
357 #define traceConcSyncEnd() /* nothing */
358 #define traceConcSweepBegin() /* nothing */
359 #define traceConcSweepEnd() /* nothing */
360 #define traceConcUpdRemSetFlush(cap) /* nothing */
361 #define traceNonmovingHeapCensus(blk_size, census) /* nothing */
362
363 #define flushTrace() /* nothing */
364
365 #endif /* TRACING */
366
367 // If DTRACE is enabled, but neither DEBUG nor TRACING, we need a C land
368 // wrapper for the user-msg probe (as we can't expand that in PrimOps.cmm)
369 //
370 #if !defined(DEBUG) && !defined(TRACING) && defined(DTRACE)
371
372 void dtraceUserMsgWrapper(Capability *cap, char *msg);
373 void dtraceUserMarkerWrapper(Capability *cap, char *msg);
374
375 #endif /* !defined(DEBUG) && !defined(TRACING) && defined(DTRACE) */
376
377 // -----------------------------------------------------------------------------
378 // Aliases for static dtrace probes if dtrace is available
379 // -----------------------------------------------------------------------------
380
381 #if defined(DTRACE)
382
383 #define dtraceCreateThread(cap, tid) \
384 HASKELLEVENT_CREATE_THREAD(cap, tid)
385 #define dtraceRunThread(cap, tid) \
386 HASKELLEVENT_RUN_THREAD(cap, tid)
387 #define dtraceStopThread(cap, tid, status, info) \
388 HASKELLEVENT_STOP_THREAD(cap, tid, status, info)
389 #define dtraceThreadRunnable(cap, tid) \
390 HASKELLEVENT_THREAD_RUNNABLE(cap, tid)
391 #define dtraceMigrateThread(cap, tid, new_cap) \
392 HASKELLEVENT_MIGRATE_THREAD(cap, tid, new_cap)
393 #define dtraceThreadWakeup(cap, tid, other_cap) \
394 HASKELLEVENT_THREAD_WAKEUP(cap, tid, other_cap)
395 #define dtraceGcStart(cap) \
396 HASKELLEVENT_GC_START(cap)
397 #define dtraceGcEnd(cap) \
398 HASKELLEVENT_GC_END(cap)
399 #define dtraceRequestSeqGc(cap) \
400 HASKELLEVENT_REQUEST_SEQ_GC(cap)
401 #define dtraceRequestParGc(cap) \
402 HASKELLEVENT_REQUEST_PAR_GC(cap)
403 #define dtraceCreateSparkThread(cap, spark_tid) \
404 HASKELLEVENT_CREATE_SPARK_THREAD(cap, spark_tid)
405 #define dtraceThreadLabel(cap, tso, label) \
406 HASKELLEVENT_THREAD_LABEL(cap, tso, label)
407 #define dtraceCapCreate(cap) \
408 HASKELLEVENT_CAP_CREATE(cap)
409 #define dtraceCapDelete(cap) \
410 HASKELLEVENT_CAP_DELETE(cap)
411 #define dtraceCapEnable(cap) \
412 HASKELLEVENT_CAP_ENABLE(cap)
413 #define dtraceCapDisable(cap) \
414 HASKELLEVENT_CAP_DISABLE(cap)
415 #define dtraceUserMsg(cap, msg) \
416 HASKELLEVENT_USER_MSG(cap, msg)
417 #define dtraceUserMarker(cap, msg) \
418 HASKELLEVENT_USER_MARKER(cap, msg)
419 #define dtraceGcIdle(cap) \
420 HASKELLEVENT_GC_IDLE(cap)
421 #define dtraceGcWork(cap) \
422 HASKELLEVENT_GC_WORK(cap)
423 #define dtraceGcDone(cap) \
424 HASKELLEVENT_GC_DONE(cap)
425 #define dtraceGcGlobalSync(cap) \
426 HASKELLEVENT_GC_GLOBAL_SYNC(cap)
427 #define dtraceEventGcStats(heap_capset, gens, \
428 copies, slop, fragmentation, \
429 par_n_threads, \
430 par_max_copied, \
431 par_tot_copied, \
432 par_balanced_copied) \
433 HASKELLEVENT_GC_STATS(heap_capset, gens, \
434 copies, slop, fragmentation, \
435 par_n_threads, \
436 par_max_copied, \
437 par_balanced_copied, \
438 par_tot_copied)
439 #define dtraceHeapInfo(heap_capset, gens, \
440 maxHeapSize, allocAreaSize, \
441 mblockSize, blockSize) \
442 HASKELLEVENT_HEAP_INFO(heap_capset, gens, \
443 maxHeapSize, allocAreaSize, \
444 mblockSize, blockSize)
445 #define dtraceEventHeapAllocated(cap, heap_capset, \
446 allocated) \
447 HASKELLEVENT_HEAP_ALLOCATED(cap, heap_capset, \
448 allocated)
449 #define dtraceEventHeapSize(heap_capset, size) \
450 HASKELLEVENT_HEAP_SIZE(heap_capset, size)
451 #define dtraceEventHeapLive(heap_capset, live) \
452 HASKELLEVENT_HEAP_LIVE(heap_capset, live)
453 #define dtraceCapsetCreate(capset, capset_type) \
454 HASKELLEVENT_CAPSET_CREATE(capset, capset_type)
455 #define dtraceCapsetDelete(capset) \
456 HASKELLEVENT_CAPSET_DELETE(capset)
457 #define dtraceCapsetAssignCap(capset, capno) \
458 HASKELLEVENT_CAPSET_ASSIGN_CAP(capset, capno)
459 #define dtraceCapsetRemoveCap(capset, capno) \
460 HASKELLEVENT_CAPSET_REMOVE_CAP(capset, capno)
461 #define dtraceSparkCounters(cap, a, b, c, d, e, f, g) \
462 HASKELLEVENT_SPARK_COUNTERS(cap, a, b, c, d, e, f, g)
463 #define dtraceSparkCreate(cap) \
464 HASKELLEVENT_SPARK_CREATE(cap)
465 #define dtraceSparkDud(cap) \
466 HASKELLEVENT_SPARK_DUD(cap)
467 #define dtraceSparkOverflow(cap) \
468 HASKELLEVENT_SPARK_OVERFLOW(cap)
469 #define dtraceSparkRun(cap) \
470 HASKELLEVENT_SPARK_RUN(cap)
471 #define dtraceSparkSteal(cap, victim_cap) \
472 HASKELLEVENT_SPARK_STEAL(cap, victim_cap)
473 #define dtraceSparkFizzle(cap) \
474 HASKELLEVENT_SPARK_FIZZLE(cap)
475 #define dtraceSparkGc(cap) \
476 HASKELLEVENT_SPARK_GC(cap)
477 #define dtraceTaskCreate(taskID, cap, tid) \
478 HASKELLEVENT_TASK_CREATE(taskID, cap, tid)
479 #define dtraceTaskMigrate(taskID, cap, new_cap) \
480 HASKELLEVENT_TASK_MIGRATE(taskID, cap, new_cap)
481 #define dtraceTaskDelete(taskID) \
482 HASKELLEVENT_TASK_DELETE(taskID)
483
484 #else /* !defined(DTRACE) */
485
486 #define dtraceCreateThread(cap, tid) /* nothing */
487 #define dtraceRunThread(cap, tid) /* nothing */
488 #define dtraceStopThread(cap, tid, status, info) /* nothing */
489 #define dtraceThreadRunnable(cap, tid) /* nothing */
490 #define dtraceMigrateThread(cap, tid, new_cap) /* nothing */
491 #define dtraceThreadWakeup(cap, tid, other_cap) /* nothing */
492 #define dtraceGcStart(cap) /* nothing */
493 #define dtraceGcEnd(cap) /* nothing */
494 #define dtraceRequestSeqGc(cap) /* nothing */
495 #define dtraceRequestParGc(cap) /* nothing */
496 #define dtraceCreateSparkThread(cap, spark_tid) /* nothing */
497 #define dtraceThreadLabel(cap, tso, label) /* nothing */
498 #define dtraceUserMsg(cap, msg) /* nothing */
499 #define dtraceUserMarker(cap, msg) /* nothing */
500 #define dtraceGcIdle(cap) /* nothing */
501 #define dtraceGcWork(cap) /* nothing */
502 #define dtraceGcDone(cap) /* nothing */
503 #define dtraceGcGlobalSync(cap) /* nothing */
504 #define dtraceEventGcStats(heap_capset, gens, \
505 copies, slop, fragmentation, \
506 par_n_threads, \
507 par_max_copied, \
508 par_tot_copied, \
509 par_balanced_copied) /* nothing */
510 #define dtraceHeapInfo(heap_capset, gens, \
511 maxHeapSize, allocAreaSize, \
512 mblockSize, blockSize) /* nothing */
513 #define dtraceEventHeapAllocated(cap, heap_capset, \
514 allocated) /* nothing */
515 #define dtraceEventHeapSize(heap_capset, size) /* nothing */
516 #define dtraceEventHeapLive(heap_capset, live) /* nothing */
517 #define dtraceCapCreate(cap) /* nothing */
518 #define dtraceCapDelete(cap) /* nothing */
519 #define dtraceCapEnable(cap) /* nothing */
520 #define dtraceCapDisable(cap) /* nothing */
521 #define dtraceCapsetCreate(capset, capset_type) /* nothing */
522 #define dtraceCapsetDelete(capset) /* nothing */
523 #define dtraceCapsetAssignCap(capset, capno) /* nothing */
524 #define dtraceCapsetRemoveCap(capset, capno) /* nothing */
525 #define dtraceSparkCounters(cap, a, b, c, d, e, f, g) /* nothing */
526 #define dtraceSparkCreate(cap) /* nothing */
527 #define dtraceSparkDud(cap) /* nothing */
528 #define dtraceSparkOverflow(cap) /* nothing */
529 #define dtraceSparkRun(cap) /* nothing */
530 #define dtraceSparkSteal(cap, victim_cap) /* nothing */
531 #define dtraceSparkFizzle(cap) /* nothing */
532 #define dtraceSparkGc(cap) /* nothing */
533 #define dtraceTaskCreate(taskID, cap, tid) /* nothing */
534 #define dtraceTaskMigrate(taskID, cap, new_cap) /* nothing */
535 #define dtraceTaskDelete(taskID) /* nothing */
536
537 #endif
538
539 // -----------------------------------------------------------------------------
540 // Trace probes dispatching to various tracing frameworks
541 //
542 // In order to avoid accumulating multiple calls to tracing calls at trace
543 // points, we define inline probe functions that contain the various
544 // invocations.
545 //
546 // Dtrace - dtrace probes are unconditionally added as probe activation is
547 // handled by the dtrace component of the kernel, and inactive probes are
548 // very cheap - usually, one no-op. Consequently, dtrace can be used with
549 // all flavours of the RTS. In addition, we still support logging events to
550 // a file, even in the presence of dtrace. This is, eg, useful when tracing
551 // on a server, but browsing trace information with ThreadScope on a local
552 // client.
553 //
554 // -----------------------------------------------------------------------------
555
556 INLINE_HEADER void traceEventCreateThread(Capability *cap STG_UNUSED,
557 StgTSO *tso STG_UNUSED)
558 {
559 traceSchedEvent(cap, EVENT_CREATE_THREAD, tso, tso->stackobj->stack_size);
560 dtraceCreateThread((EventCapNo)cap->no, (EventThreadID)tso->id);
561 }
562
563 INLINE_HEADER void traceEventRunThread(Capability *cap STG_UNUSED,
564 StgTSO *tso STG_UNUSED)
565 {
566 traceSchedEvent(cap, EVENT_RUN_THREAD, tso, tso->what_next);
567 dtraceRunThread((EventCapNo)cap->no, (EventThreadID)tso->id);
568 }
569
570 INLINE_HEADER void traceEventStopThread(Capability *cap STG_UNUSED,
571 StgTSO *tso STG_UNUSED,
572 StgThreadReturnCode status STG_UNUSED,
573 StgWord32 info STG_UNUSED)
574 {
575 traceSchedEvent2(cap, EVENT_STOP_THREAD, tso, status, info);
576 dtraceStopThread((EventCapNo)cap->no, (EventThreadID)tso->id,
577 (EventThreadStatus)status, (EventThreadID)info);
578 }
579
580 INLINE_HEADER void traceEventMigrateThread(Capability *cap STG_UNUSED,
581 StgTSO *tso STG_UNUSED,
582 uint32_t new_cap STG_UNUSED)
583 {
584 traceSchedEvent(cap, EVENT_MIGRATE_THREAD, tso, new_cap);
585 dtraceMigrateThread((EventCapNo)cap->no, (EventThreadID)tso->id,
586 (EventCapNo)new_cap);
587 }
588
589 INLINE_HEADER void traceCapCreate(Capability *cap STG_UNUSED)
590 {
591 traceCapEvent(cap, EVENT_CAP_CREATE);
592 dtraceCapCreate((EventCapNo)cap->no);
593 }
594
595 INLINE_HEADER void traceCapDelete(Capability *cap STG_UNUSED)
596 {
597 traceCapEvent(cap, EVENT_CAP_DELETE);
598 dtraceCapDelete((EventCapNo)cap->no);
599 }
600
601 INLINE_HEADER void traceCapEnable(Capability *cap STG_UNUSED)
602 {
603 traceCapEvent(cap, EVENT_CAP_ENABLE);
604 dtraceCapEnable((EventCapNo)cap->no);
605 }
606
607 INLINE_HEADER void traceCapDisable(Capability *cap STG_UNUSED)
608 {
609 traceCapEvent(cap, EVENT_CAP_DISABLE);
610 dtraceCapDisable((EventCapNo)cap->no);
611 }
612
613 INLINE_HEADER void traceEventThreadWakeup(Capability *cap STG_UNUSED,
614 StgTSO *tso STG_UNUSED,
615 uint32_t other_cap STG_UNUSED)
616 {
617 traceSchedEvent(cap, EVENT_THREAD_WAKEUP, tso, other_cap);
618 dtraceThreadWakeup((EventCapNo)cap->no, (EventThreadID)tso->id,
619 (EventCapNo)other_cap);
620 }
621
622 INLINE_HEADER void traceThreadLabel(Capability *cap STG_UNUSED,
623 StgTSO *tso STG_UNUSED,
624 char *label STG_UNUSED)
625 {
626 if (RTS_UNLIKELY(TRACE_sched)) {
627 traceThreadLabel_(cap, tso, label);
628 }
629 dtraceThreadLabel((EventCapNo)cap->no, (EventThreadID)tso->id, label);
630 }
631
632 INLINE_HEADER void traceEventGcStart(Capability *cap STG_UNUSED)
633 {
634 traceGcEvent(cap, EVENT_GC_START);
635 dtraceGcStart((EventCapNo)cap->no);
636 }
637
638 INLINE_HEADER void traceEventGcStartAtT(Capability *cap STG_UNUSED,
639 StgWord64 ts STG_UNUSED)
640 {
641 traceGcEventAtT(cap, ts, EVENT_GC_START);
642 dtraceGcStart((EventCapNo)cap->no);
643 }
644
645 INLINE_HEADER void traceEventGcEnd(Capability *cap STG_UNUSED)
646 {
647 traceGcEvent(cap, EVENT_GC_END);
648 dtraceGcEnd((EventCapNo)cap->no);
649 }
650
651 INLINE_HEADER void traceEventGcEndAtT(Capability *cap STG_UNUSED,
652 StgWord64 ts STG_UNUSED)
653 {
654 traceGcEventAtT(cap, ts, EVENT_GC_END);
655 dtraceGcEnd((EventCapNo)cap->no);
656 }
657
658 INLINE_HEADER void traceEventRequestSeqGc(Capability *cap STG_UNUSED)
659 {
660 traceGcEvent(cap, EVENT_REQUEST_SEQ_GC);
661 dtraceRequestSeqGc((EventCapNo)cap->no);
662 }
663
664 INLINE_HEADER void traceEventRequestParGc(Capability *cap STG_UNUSED)
665 {
666 traceGcEvent(cap, EVENT_REQUEST_PAR_GC);
667 dtraceRequestParGc((EventCapNo)cap->no);
668 }
669
670 INLINE_HEADER void traceEventGcIdle(Capability *cap STG_UNUSED)
671 {
672 traceGcEvent(cap, EVENT_GC_IDLE);
673 dtraceGcIdle((EventCapNo)cap->no);
674 }
675
676 INLINE_HEADER void traceEventGcWork(Capability *cap STG_UNUSED)
677 {
678 traceGcEvent(cap, EVENT_GC_WORK);
679 dtraceGcWork((EventCapNo)cap->no);
680 }
681
682 INLINE_HEADER void traceEventGcDone(Capability *cap STG_UNUSED)
683 {
684 traceGcEvent(cap, EVENT_GC_DONE);
685 dtraceGcDone((EventCapNo)cap->no);
686 }
687
688 INLINE_HEADER void traceEventGcGlobalSync(Capability *cap STG_UNUSED)
689 {
690 traceGcEvent(cap, EVENT_GC_GLOBAL_SYNC);
691 dtraceGcGlobalSync((EventCapNo)cap->no);
692 }
693
694 INLINE_HEADER void traceEventGcStats(Capability *cap STG_UNUSED,
695 CapsetID heap_capset STG_UNUSED,
696 uint32_t gen STG_UNUSED,
697 W_ copied STG_UNUSED,
698 W_ slop STG_UNUSED,
699 W_ fragmentation STG_UNUSED,
700 uint32_t par_n_threads STG_UNUSED,
701 W_ par_max_copied STG_UNUSED,
702 W_ par_tot_copied STG_UNUSED,
703 W_ par_balanced_copied STG_UNUSED)
704 {
705 if (RTS_UNLIKELY(TRACE_gc)) {
706 traceEventGcStats_(cap, heap_capset, gen,
707 copied, slop, fragmentation,
708 par_n_threads, par_max_copied,
709 par_tot_copied, par_balanced_copied);
710 }
711 dtraceEventGcStats(heap_capset, gen,
712 copied, slop, fragmentation,
713 par_n_threads, par_max_copied,
714 par_tot_copied, par_balanced_copied);
715 }
716
717 INLINE_HEADER void traceEventHeapInfo(CapsetID heap_capset STG_UNUSED,
718 uint32_t gens STG_UNUSED,
719 W_ maxHeapSize STG_UNUSED,
720 W_ allocAreaSize STG_UNUSED,
721 W_ mblockSize STG_UNUSED,
722 W_ blockSize STG_UNUSED)
723 {
724 if (RTS_UNLIKELY(TRACE_gc)) {
725 traceEventHeapInfo_(heap_capset, gens,
726 maxHeapSize, allocAreaSize,
727 mblockSize, blockSize);
728 }
729 dtraceHeapInfo(heap_capset, gens,
730 maxHeapSize, allocAreaSize,
731 mblockSize, blockSize);
732 }
733
734 INLINE_HEADER void traceEventHeapAllocated(Capability *cap STG_UNUSED,
735 CapsetID heap_capset STG_UNUSED,
736 W_ allocated STG_UNUSED)
737 {
738 traceHeapEvent(cap, EVENT_HEAP_ALLOCATED, heap_capset, allocated);
739 dtraceEventHeapAllocated((EventCapNo)cap->no, heap_capset, allocated);
740 }
741
742 INLINE_HEADER void traceEventHeapSize(Capability *cap STG_UNUSED,
743 CapsetID heap_capset STG_UNUSED,
744 W_ heap_size STG_UNUSED)
745 {
746 traceHeapEvent(cap, EVENT_HEAP_SIZE, heap_capset, heap_size);
747 dtraceEventHeapSize(heap_capset, heap_size);
748 }
749
750 INLINE_HEADER void traceEventHeapLive(Capability *cap STG_UNUSED,
751 CapsetID heap_capset STG_UNUSED,
752 W_ heap_live STG_UNUSED)
753 {
754 traceHeapEvent(cap, EVENT_HEAP_LIVE, heap_capset, heap_live);
755 dtraceEventHeapLive(heap_capset, heap_live);
756 }
757
758 INLINE_HEADER void traceCapsetCreate(CapsetID capset STG_UNUSED,
759 CapsetType capset_type STG_UNUSED)
760 {
761 traceCapsetEvent(EVENT_CAPSET_CREATE, capset, capset_type);
762 dtraceCapsetCreate(capset, capset_type);
763 }
764
765 INLINE_HEADER void traceCapsetDelete(CapsetID capset STG_UNUSED)
766 {
767 traceCapsetEvent(EVENT_CAPSET_DELETE, capset, 0);
768 dtraceCapsetDelete(capset);
769 }
770
771 INLINE_HEADER void traceCapsetAssignCap(CapsetID capset STG_UNUSED,
772 uint32_t capno STG_UNUSED)
773 {
774 traceCapsetEvent(EVENT_CAPSET_ASSIGN_CAP, capset, capno);
775 dtraceCapsetAssignCap(capset, capno);
776 }
777
778 INLINE_HEADER void traceCapsetRemoveCap(CapsetID capset STG_UNUSED,
779 uint32_t capno STG_UNUSED)
780 {
781 traceCapsetEvent(EVENT_CAPSET_REMOVE_CAP, capset, capno);
782 dtraceCapsetRemoveCap(capset, capno);
783 }
784
785 INLINE_HEADER void traceWallClockTime(void)
786 {
787 traceWallClockTime_();
788 /* Note: no DTrace equivalent because it is available to DTrace directly */
789 }
790
791 INLINE_HEADER void traceOSProcessInfo(void)
792 {
793 traceOSProcessInfo_();
794 /* Note: no DTrace equivalent because all this OS process info
795 * is available to DTrace directly */
796 }
797
798 INLINE_HEADER void traceEventCreateSparkThread(Capability *cap STG_UNUSED,
799 StgThreadID spark_tid STG_UNUSED)
800 {
801 traceSparkEvent2(cap, EVENT_CREATE_SPARK_THREAD, spark_tid);
802 dtraceCreateSparkThread((EventCapNo)cap->no, (EventThreadID)spark_tid);
803 }
804
805 INLINE_HEADER void traceSparkCounters(Capability *cap STG_UNUSED)
806 {
807 #if defined(THREADED_RTS)
808 if (RTS_UNLIKELY(TRACE_spark_sampled)) {
809 traceSparkCounters_(cap, cap->spark_stats, sparkPoolSize(cap->sparks));
810 }
811 dtraceSparkCounters((EventCapNo)cap->no,
812 cap->spark_stats.created,
813 cap->spark_stats.dud,
814 cap->spark_stats.overflowed,
815 cap->spark_stats.converted,
816 cap->spark_stats.gcd,
817 cap->spark_stats.fizzled,
818 sparkPoolSize(cap->sparks));
819 #endif
820 }
821
822 INLINE_HEADER void traceEventSparkCreate(Capability *cap STG_UNUSED)
823 {
824 traceSparkEvent(cap, EVENT_SPARK_CREATE);
825 dtraceSparkCreate((EventCapNo)cap->no);
826 }
827
828 INLINE_HEADER void traceEventSparkDud(Capability *cap STG_UNUSED)
829 {
830 traceSparkEvent(cap, EVENT_SPARK_DUD);
831 dtraceSparkDud((EventCapNo)cap->no);
832 }
833
834 INLINE_HEADER void traceEventSparkOverflow(Capability *cap STG_UNUSED)
835 {
836 traceSparkEvent(cap, EVENT_SPARK_OVERFLOW);
837 dtraceSparkOverflow((EventCapNo)cap->no);
838 }
839
840 INLINE_HEADER void traceEventSparkRun(Capability *cap STG_UNUSED)
841 {
842 traceSparkEvent(cap, EVENT_SPARK_RUN);
843 dtraceSparkRun((EventCapNo)cap->no);
844 }
845
846 INLINE_HEADER void traceEventSparkSteal(Capability *cap STG_UNUSED,
847 uint32_t victim_cap STG_UNUSED)
848 {
849 traceSparkEvent2(cap, EVENT_SPARK_STEAL, victim_cap);
850 dtraceSparkSteal((EventCapNo)cap->no, (EventCapNo)victim_cap);
851 }
852
853 INLINE_HEADER void traceEventSparkFizzle(Capability *cap STG_UNUSED)
854 {
855 traceSparkEvent(cap, EVENT_SPARK_FIZZLE);
856 dtraceSparkFizzle((EventCapNo)cap->no);
857 }
858
859 INLINE_HEADER void traceEventSparkGC(Capability *cap STG_UNUSED)
860 {
861 traceSparkEvent(cap, EVENT_SPARK_GC);
862 dtraceSparkGc((EventCapNo)cap->no);
863 }
864
865 INLINE_HEADER void traceTaskCreate(Task *task STG_UNUSED,
866 Capability *cap STG_UNUSED)
867 {
868 ASSERT(task->cap == cap);
869 // TODO: asserting task->cap == NULL would be much stronger
870 // (the intention being that the task structure is just created and empty)
871 // but would require large changes of traceTaskCreate calls.
872 ASSERT(cap != NULL);
873 // A new task gets associated with a cap. We also record
874 // the kernel thread id of the task, which should never change.
875 if (RTS_UNLIKELY(TRACE_sched)) {
876 traceTaskCreate_(task, cap);
877 }
878 dtraceTaskCreate(serialisableTaskId(task),
879 (EventCapNo)cap->no,
880 kernelThreadId());
881 }
882
883 INLINE_HEADER void traceTaskMigrate(Task *task STG_UNUSED,
884 Capability *cap STG_UNUSED,
885 Capability *new_cap STG_UNUSED)
886 {
887 ASSERT(task->cap == cap);
888 ASSERT(cap != NULL);
889 ASSERT(cap != new_cap);
890 ASSERT(new_cap != NULL);
891 // A task migrates from a cap to another.
892 if (RTS_UNLIKELY(TRACE_sched)) {
893 traceTaskMigrate_(task, cap, new_cap);
894 }
895 dtraceTaskMigrate(serialisableTaskId(task), (EventCapNo)cap->no,
896 (EventCapNo)new_cap->no);
897 }
898
899 INLINE_HEADER void traceTaskDelete(Task *task STG_UNUSED)
900 {
901 ASSERT(task->cap != NULL);
902 if (RTS_UNLIKELY(TRACE_sched)) {
903 traceTaskDelete_(task);
904 }
905 dtraceTaskDelete(serialisableTaskId(task));
906 }
907
908 #include "EndPrivate.h"