NonMoving: Add summarizing Note
[ghc.git] / includes / rts / storage / TSO.h
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 1998-2009
4 *
5 * The definitions for Thread State Objects.
6 *
7 * ---------------------------------------------------------------------------*/
8
9 #pragma once
10
11 /*
12 * PROFILING info in a TSO
13 */
14 typedef struct {
15 CostCentreStack *cccs; /* thread's current CCS */
16 } StgTSOProfInfo;
17
18 /*
19 * There is no TICKY info in a TSO at this time.
20 */
21
22 /*
23 * Thread IDs are 32 bits.
24 */
25 typedef StgWord32 StgThreadID;
26
27 #define tsoLocked(tso) ((tso)->flags & TSO_LOCKED)
28
29 /*
30 * Type returned after running a thread. Values of this type
31 * include HeapOverflow, StackOverflow etc. See Constants.h for the
32 * full list.
33 */
34 typedef unsigned int StgThreadReturnCode;
35
36 #if defined(mingw32_HOST_OS)
37 /* results from an async I/O request + its request ID. */
38 typedef struct {
39 unsigned int reqID;
40 int len;
41 int errCode;
42 } StgAsyncIOResult;
43 #endif
44
45 /* Reason for thread being blocked. See comment above struct StgTso_. */
46 typedef union {
47 StgClosure *closure;
48 StgTSO *prev; // a back-link when the TSO is on the run queue (NotBlocked)
49 struct MessageBlackHole_ *bh;
50 struct MessageThrowTo_ *throwto;
51 struct MessageWakeup_ *wakeup;
52 StgInt fd; /* StgInt instead of int, so that it's the same size as the ptrs */
53 #if defined(mingw32_HOST_OS)
54 StgAsyncIOResult *async_result;
55 #endif
56 #if !defined(THREADED_RTS)
57 StgWord target;
58 // Only for the non-threaded RTS: the target time for a thread
59 // blocked in threadDelay, in units of 1ms. This is a
60 // compromise: we don't want to take up much space in the TSO. If
61 // you want better resolution for threadDelay, use -threaded.
62 #endif
63 } StgTSOBlockInfo;
64
65
66 /*
67 * TSOs live on the heap, and therefore look just like heap objects.
68 * Large TSOs will live in their own "block group" allocated by the
69 * storage manager, and won't be copied during garbage collection.
70 */
71
72 /*
73 * Threads may be blocked for several reasons. A blocked thread will
74 * have the reason in the why_blocked field of the TSO, and some
75 * further info (such as the closure the thread is blocked on, or the
76 * file descriptor if the thread is waiting on I/O) in the block_info
77 * field.
78 */
79
80 typedef struct StgTSO_ {
81 StgHeader header;
82
83 /* The link field, for linking threads together in lists (e.g. the
84 run queue on a Capability.
85 */
86 struct StgTSO_* _link;
87 /*
88 Currently used for linking TSOs on:
89 * cap->run_queue_{hd,tl}
90 * (non-THREADED_RTS); the blocked_queue
91 * and pointing to the next chunk for a ThreadOldStack
92
93 NOTE!!! do not modify _link directly, it is subject to
94 a write barrier for generational GC. Instead use the
95 setTSOLink() function. Exceptions to this rule are:
96
97 * setting the link field to END_TSO_QUEUE
98 * setting the link field of the currently running TSO, as it
99 will already be dirty.
100 */
101
102 struct StgTSO_* global_link; // Links threads on the
103 // generation->threads lists
104
105 /*
106 * The thread's stack
107 */
108 struct StgStack_ *stackobj;
109
110 /*
111 * The tso->dirty flag indicates that this TSO's stack should be
112 * scanned during garbage collection. It also indicates that this
113 * TSO is on the mutable list.
114 *
115 * NB. The dirty flag gets a word to itself, so that it can be set
116 * safely by multiple threads simultaneously (the flags field is
117 * not safe for this purpose; see #3429). It is harmless for the
118 * TSO to be on the mutable list multiple times.
119 *
120 * tso->dirty is set by dirty_TSO(), and unset by the garbage
121 * collector (only).
122 */
123
124 StgWord16 what_next; // Values defined in Constants.h
125 StgWord16 why_blocked; // Values defined in Constants.h
126 StgWord32 flags; // Values defined in Constants.h
127 StgTSOBlockInfo block_info;
128 StgThreadID id;
129 StgWord32 saved_errno;
130 StgWord32 dirty; /* non-zero => dirty */
131 struct InCall_* bound;
132 struct Capability_* cap;
133
134 struct StgTRecHeader_ * trec; /* STM transaction record */
135
136 /*
137 * A list of threads blocked on this TSO waiting to throw exceptions.
138 */
139 struct MessageThrowTo_ * blocked_exceptions;
140
141 /*
142 * A list of StgBlockingQueue objects, representing threads
143 * blocked on thunks that are under evaluation by this thread.
144 */
145 struct StgBlockingQueue_ *bq;
146
147 /*
148 * The allocation limit for this thread, which is updated as the
149 * thread allocates. If the value drops below zero, and
150 * TSO_ALLOC_LIMIT is set in flags, we raise an exception in the
151 * thread, and give the thread a little more space to handle the
152 * exception before we raise the exception again.
153 *
154 * This is an integer, because we might update it in a place where
155 * it isn't convenient to raise the exception, so we want it to
156 * stay negative until we get around to checking it.
157 *
158 * Use only PK_Int64/ASSIGN_Int64 macros to get/set the value of alloc_limit
159 * in C code otherwise you will cause alignment issues on SPARC
160 */
161 StgInt64 alloc_limit; /* in bytes */
162
163 /*
164 * sum of the sizes of all stack chunks (in words), used to decide
165 * whether to throw the StackOverflow exception when the stack
166 * overflows, or whether to just chain on another stack chunk.
167 *
168 * Note that this overestimates the real stack size, because each
169 * chunk will have a gap at the end, of +RTS -kb<size> words.
170 * This means stack overflows are not entirely accurate, because
171 * the more gaps there are, the sooner the stack will run into the
172 * hard +RTS -K<size> limit.
173 */
174 StgWord32 tot_stack_size;
175
176 #if defined(TICKY_TICKY)
177 /* TICKY-specific stuff would go here. */
178 #endif
179 #if defined(PROFILING)
180 StgTSOProfInfo prof;
181 #endif
182 #if defined(mingw32_HOST_OS)
183 StgWord32 saved_winerror;
184 #endif
185
186 } *StgTSOPtr; // StgTSO defined in rts/Types.h
187
188 /* Note [StgStack dirtiness flags and concurrent marking]
189 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
190 *
191 * Without concurrent collection by the nonmoving collector the stack dirtiness story
192 * is quite simple: The stack is either STACK_DIRTY (meaning it has been added to mut_list)
193 * or not.
194 *
195 * However, things are considerably more complicated with concurrent collection
196 * (namely, when nonmoving_write_barrier_enabled is set): In addition to adding
197 * the stack to mut_list and flagging it as STACK_DIRTY, we also must ensure
198 * that stacks are marked in accordance with the nonmoving collector's snapshot
199 * invariant. This is: every stack alive at the time the snapshot is taken must
200 * be marked at some point after the moment the snapshot is taken and before it
201 * is mutated or the commencement of the sweep phase.
202 *
203 * This marking may be done by the concurrent mark phase (in the case of a
204 * thread that never runs during the concurrent mark) or by the mutator when
205 * dirtying the stack. However, it is unsafe for the concurrent collector to
206 * traverse the stack while it is under mutation. Consequently, the following
207 * handshake is obeyed by the mutator's write barrier and the concurrent mark to
208 * ensure this doesn't happen:
209 *
210 * 1. The entity seeking to mark first checks that the stack lives in the nonmoving
211 * generation; if not then the stack was not alive at the time the snapshot
212 * was taken and therefore we need not mark it.
213 *
214 * 2. The entity seeking to mark checks the stack's mark bit. If it is set then
215 * no mark is necessary.
216 *
217 * 3. The entity seeking to mark tries to lock the stack for marking by
218 * atomically setting its `marking` field to the current non-moving mark
219 * epoch:
220 *
221 * a. If the mutator finds the concurrent collector has already locked the
222 * stack then it waits until it is finished (indicated by the mark bit
223 * being set) before proceeding with execution.
224 *
225 * b. If the concurrent collector finds that the mutator has locked the stack
226 * then it moves on, leaving the mutator to mark it. There is no need to wait;
227 * the mark is guaranteed to finish before sweep due to the post-mark
228 * synchronization with mutators.
229 *
230 * c. Whoever succeeds in locking the stack is responsible for marking it and
231 * setting the stack's mark bit (either the BF_MARKED bit for large objects
232 * or otherwise its bit in its segment's mark bitmap).
233 *
234 * To ensure that mutation does not proceed until the stack is fully marked the
235 * mark phase must not set the mark bit until it has finished tracing.
236 *
237 */
238
239 #define STACK_DIRTY 1
240 // used by sanity checker to verify that all dirty stacks are on the mutable list
241 #define STACK_SANE 64
242
243 typedef struct StgStack_ {
244 StgHeader header;
245 StgWord32 stack_size; // stack size in *words*
246 StgWord dirty; // non-zero => dirty
247 StgWord marking; // non-zero => someone is currently marking the stack
248 StgPtr sp; // current stack pointer
249 StgWord stack[];
250 } StgStack;
251
252 // Calculate SpLim from a TSO (reads tso->stackobj, but no fields from
253 // the stackobj itself).
254 INLINE_HEADER StgPtr tso_SpLim (StgTSO* tso)
255 {
256 return tso->stackobj->stack + RESERVED_STACK_WORDS;
257 }
258
259 /* -----------------------------------------------------------------------------
260 functions
261 -------------------------------------------------------------------------- */
262
263 void dirty_TSO (Capability *cap, StgTSO *tso);
264 void setTSOLink (Capability *cap, StgTSO *tso, StgTSO *target);
265 void setTSOPrev (Capability *cap, StgTSO *tso, StgTSO *target);
266
267 void dirty_STACK (Capability *cap, StgStack *stack);
268
269 /* -----------------------------------------------------------------------------
270 Invariants:
271
272 An active thread has the following properties:
273
274 tso->stack < tso->sp < tso->stack+tso->stack_size
275 tso->stack_size <= tso->max_stack_size
276
277 RESERVED_STACK_WORDS is large enough for any heap-check or
278 stack-check failure.
279
280 The size of the TSO struct plus the stack is either
281 (a) smaller than a block, or
282 (b) a multiple of BLOCK_SIZE
283
284 tso->why_blocked tso->block_info location
285 ----------------------------------------------------------------------
286 NotBlocked END_TSO_QUEUE runnable_queue, or running
287
288 BlockedOnBlackHole MessageBlackHole * TSO->bq
289
290 BlockedOnMVar the MVAR the MVAR's queue
291
292 BlockedOnSTM END_TSO_QUEUE STM wait queue(s)
293 BlockedOnSTM STM_AWOKEN run queue
294
295 BlockedOnMsgThrowTo MessageThrowTo * TSO->blocked_exception
296
297 BlockedOnRead NULL blocked_queue
298 BlockedOnWrite NULL blocked_queue
299 BlockedOnDelay NULL blocked_queue
300
301 tso->link == END_TSO_QUEUE, if the thread is currently running.
302
303 A zombie thread has the following properties:
304
305 tso->what_next == ThreadComplete or ThreadKilled
306 tso->link == (could be on some queue somewhere)
307 tso->sp == tso->stack + tso->stack_size - 1 (i.e. top stack word)
308 tso->sp[0] == return value of thread, if what_next == ThreadComplete,
309 exception , if what_next == ThreadKilled
310
311 (tso->sp is left pointing at the top word on the stack so that
312 the return value or exception will be retained by a GC).
313
314 ---------------------------------------------------------------------------- */
315
316 /* this is the NIL ptr for a TSO queue (e.g. runnable queue) */
317 #define END_TSO_QUEUE ((StgTSO *)(void*)&stg_END_TSO_QUEUE_closure)