CmmLayoutStack: Add unwind information on stack fixups
[ghc.git] / rts / Task.h
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team 2001-2005
4 *
5 * Tasks
6 *
7 * For details on the high-level design, see
8 * http://ghc.haskell.org/trac/ghc/wiki/Commentary/Rts/Scheduler
9 *
10 * -------------------------------------------------------------------------*/
11
12 #ifndef TASK_H
13 #define TASK_H
14
15 #include "GetTime.h"
16
17 #include "BeginPrivate.h"
18
19 /*
20 Definition of a Task
21 --------------------
22
23 A task is an OSThread that runs Haskell code. Every OSThread that
24 runs inside the RTS, whether as a worker created by the RTS or via
25 an in-call from C to Haskell, has an associated Task. The first
26 time an OS thread calls into Haskell it is allocated a Task, which
27 remains until the RTS is shut down.
28
29 There is a one-to-one relationship between OSThreads and Tasks.
30 The Task for an OSThread is kept in thread-local storage, and can
31 be retrieved at any time using myTask().
32
33 In the THREADED_RTS build, multiple Tasks may all be running
34 Haskell code simultaneously. A task relinquishes its Capability
35 when it is asked to evaluate an external (C) call.
36
37 Ownership of Task
38 -----------------
39
40 Task ownership is a little tricky. The default situation is that
41 the Task is an OS-thread-local structure that is owned by the OS
42 thread named in task->id. An OS thread not currently executing
43 Haskell code might call newBoundTask() at any time, which assumes
44 that it has access to the Task for the current OS thread.
45
46 The all_next and all_prev fields of a Task are owned by
47 all_tasks_mutex, which must also be taken if we want to create or
48 free a Task.
49
50 For an OS thread in Haskell, if (task->cap->running_task != task),
51 then the Task is owned by the owner of the parent data structure on
52 which it is sleeping; for example, if the task is sleeping on
53 spare_workers field of a Capability, then the owner of the
54 Capability has access to the Task.
55
56 When a task is migrated from sleeping on one Capability to another,
57 its task->cap field must be modified. When the task wakes up, it
58 will read the new value of task->cap to find out which Capability
59 it belongs to. Hence some synchronisation is required on
60 task->cap, and this is why we have task->lock.
61
62 If the Task is not currently owned by task->id, then the thread is
63 either
64
65 (a) waiting on the condition task->cond. The Task is either
66 (1) a bound Task, the TSO will be on a queue somewhere
67 (2) a worker task, on the spare_workers queue of task->cap.
68
69 (b) making a foreign call. The InCall will be on the
70 suspended_ccalls list.
71
72 We re-establish ownership in each case by respectively
73
74 (a) the task is currently blocked in yieldCapability().
75 This call will return when we have ownership of the Task and
76 a Capability. The Capability we get might not be the same
77 as the one we had when we called yieldCapability().
78
79 (b) we must call resumeThread(task), which will safely establish
80 ownership of the Task and a Capability.
81 */
82
83 // The InCall structure represents either a single in-call from C to
84 // Haskell, or a worker thread.
85 typedef struct InCall_ {
86 StgTSO * tso; // the bound TSO (or NULL for a worker)
87
88 StgTSO * suspended_tso; // the TSO is stashed here when we
89 // make a foreign call (NULL otherwise);
90
91 Capability *suspended_cap; // The capability that the
92 // suspended_tso is on, because
93 // we can't read this from the TSO
94 // without owning a Capability in the
95 // first place.
96
97 SchedulerStatus rstat; // return status
98 StgClosure ** ret; // return value
99
100 struct Task_ *task;
101
102 // When a Haskell thread makes a foreign call that re-enters
103 // Haskell, we end up with another Task associated with the
104 // current thread. We have to remember the whole stack of InCalls
105 // associated with the current Task so that we can correctly
106 // save & restore the InCall on entry to and exit from Haskell.
107 struct InCall_ *prev_stack;
108
109 // Links InCalls onto suspended_ccalls, spare_incalls
110 struct InCall_ *prev;
111 struct InCall_ *next;
112 } InCall;
113
114 typedef struct Task_ {
115 #if defined(THREADED_RTS)
116 OSThreadId id; // The OS Thread ID of this task
117
118 // The NUMA node this Task belongs to. If this is a worker thread, then the
119 // OS thread will be bound to this node (see workerStart()). If this is an
120 // external thread calling into Haskell, it can be bound to a node using
121 // rts_setInCallCapability().
122 uint32_t node;
123
124 Condition cond; // used for sleeping & waking up this task
125 Mutex lock; // lock for the condition variable
126
127 // this flag tells the task whether it should wait on task->cond
128 // or just continue immediately. It's a workaround for the fact
129 // that signalling a condition variable doesn't do anything if the
130 // thread is already running, but we want it to be sticky.
131 bool wakeup;
132 #endif
133
134 // If the task owns a Capability, task->cap points to it. (occasionally a
135 // task may own multiple capabilities, in which case task->cap may point to
136 // any of them. We must be careful to set task->cap to the appropriate one
137 // when using Capability APIs.)
138 //
139 // If the task is a worker, task->cap points to the Capability on which it
140 // is queued.
141 //
142 // If the task is in an unsafe foreign call, then task->cap can be used to
143 // retrieve the capability (see rts_unsafeGetMyCapability()).
144 struct Capability_ *cap;
145
146 // The current top-of-stack InCall
147 struct InCall_ *incall;
148
149 uint32_t n_spare_incalls;
150 struct InCall_ *spare_incalls;
151
152 bool worker; // == true if this is a worker Task
153 bool stopped; // == true between newBoundTask and
154 // boundTaskExiting, or in a worker Task.
155
156 // So that we can detect when a finalizer illegally calls back into Haskell
157 bool running_finalizers;
158
159 // if >= 0, this Capability will be used for in-calls
160 int preferred_capability;
161
162 // Links tasks on the returning_tasks queue of a Capability, and
163 // on spare_workers.
164 struct Task_ *next;
165
166 // Links tasks on the all_tasks list; need ACQUIRE_LOCK(&all_tasks_mutex)
167 struct Task_ *all_next;
168 struct Task_ *all_prev;
169
170 } Task;
171
172 INLINE_HEADER bool
173 isBoundTask (Task *task)
174 {
175 return (task->incall->tso != NULL);
176 }
177
178 // A Task is currently a worker if
179 // (a) it was created as a worker (task->worker), and
180 // (b) it has not left and re-entered Haskell, in which case
181 // task->incall->prev_stack would be non-NULL.
182 //
183 INLINE_HEADER bool
184 isWorker (Task *task)
185 {
186 return (task->worker && task->incall->prev_stack == NULL);
187 }
188
189 // Linked list of all tasks.
190 //
191 extern Task *all_tasks;
192
193 // The all_tasks list is protected by the all_tasks_mutex
194 #if defined(THREADED_RTS)
195 extern Mutex all_tasks_mutex;
196 #endif
197
198 // Start and stop the task manager.
199 // Requires: sched_mutex.
200 //
201 void initTaskManager (void);
202 uint32_t freeTaskManager (void);
203
204 // Create a new Task for a bound thread. This Task must be released
205 // by calling boundTaskExiting. The Task is cached in
206 // thread-local storage and will remain even after boundTaskExiting()
207 // has been called; to free the memory, see freeMyTask().
208 //
209 Task* newBoundTask (void);
210
211 // Return the current OS thread's Task, which is created if it doesn't already
212 // exist. After you have finished using RTS APIs, you should call freeMyTask()
213 // to release this thread's Task.
214 Task* getTask (void);
215
216 // The current task is a bound task that is exiting.
217 //
218 void boundTaskExiting (Task *task);
219
220 // Free a Task if one was previously allocated by newBoundTask().
221 // This is not necessary unless the thread that called newBoundTask()
222 // will be exiting, or if this thread has finished calling Haskell
223 // functions.
224 //
225 void freeMyTask(void);
226
227 // Notify the task manager that a task has stopped. This is used
228 // mainly for stats-gathering purposes.
229 // Requires: sched_mutex.
230 //
231 #if defined(THREADED_RTS)
232 // In the non-threaded RTS, tasks never stop.
233 void workerTaskStop (Task *task);
234 #endif
235
236 // Put the task back on the free list, mark it stopped. Used by
237 // forkProcess().
238 //
239 void discardTasksExcept (Task *keep);
240
241 // Get the Task associated with the current OS thread (or NULL if none).
242 //
243 INLINE_HEADER Task *myTask (void);
244
245 #if defined(THREADED_RTS)
246
247 // Workers are attached to the supplied Capability. This Capability
248 // should not currently have a running_task, because the new task
249 // will become the running_task for that Capability.
250 // Requires: sched_mutex.
251 //
252 void startWorkerTask (Capability *cap);
253
254 // Interrupts a worker task that is performing an FFI call. The thread
255 // should not be destroyed.
256 //
257 void interruptWorkerTask (Task *task);
258
259 #endif /* THREADED_RTS */
260
261 // For stats
262 extern uint32_t taskCount;
263 extern uint32_t workerCount;
264 extern uint32_t peakWorkerCount;
265
266 // -----------------------------------------------------------------------------
267 // INLINE functions... private from here on down:
268
269 // A thread-local-storage key that we can use to get access to the
270 // current thread's Task structure.
271 #if defined(THREADED_RTS)
272 #if ((defined(linux_HOST_OS) && \
273 (defined(i386_HOST_ARCH) || defined(x86_64_HOST_ARCH))) || \
274 (defined(mingw32_HOST_OS) && __GNUC__ >= 4 && __GNUC_MINOR__ >= 4)) && \
275 (!defined(llvm_CC_FLAVOR))
276 #define MYTASK_USE_TLV
277 extern __thread Task *my_task;
278 #else
279 extern ThreadLocalKey currentTaskKey;
280 #endif
281 #else
282 extern Task *my_task;
283 #endif
284
285 //
286 // myTask() uses thread-local storage to find the Task associated with
287 // the current OS thread. If the current OS thread has multiple
288 // Tasks, because it has re-entered the RTS, then the task->prev_stack
289 // field is used to store the previous Task.
290 //
291 INLINE_HEADER Task *
292 myTask (void)
293 {
294 #if defined(THREADED_RTS) && !defined(MYTASK_USE_TLV)
295 return getThreadLocalVar(&currentTaskKey);
296 #else
297 return my_task;
298 #endif
299 }
300
301 INLINE_HEADER void
302 setMyTask (Task *task)
303 {
304 #if defined(THREADED_RTS) && !defined(MYTASK_USE_TLV)
305 setThreadLocalVar(&currentTaskKey,task);
306 #else
307 my_task = task;
308 #endif
309 }
310
311 // Tasks are identified by their OS thread ID, which can be serialised
312 // to StgWord64, as defined below.
313 typedef StgWord64 TaskId;
314
315 // Get a unique serialisable representation for a task id.
316 //
317 // It's only unique within the process. For example if they are emitted in a
318 // log file then it is suitable to work out which log entries are releated.
319 //
320 // This is needed because OSThreadId is an opaque type
321 // and in practice on some platforms it is a pointer type.
322 //
323 #if defined(THREADED_RTS)
324 INLINE_HEADER TaskId serialiseTaskId (OSThreadId taskID) {
325 #if defined(freebsd_HOST_OS) || defined(darwin_HOST_OS)
326 // Here OSThreadId is a pthread_t and pthread_t is a pointer, but within
327 // the process we can still use that pointer value as a unique id.
328 return (TaskId) (size_t) taskID;
329 #else
330 // On Windows, Linux and others it's an integral type to start with.
331 return (TaskId) taskID;
332 #endif
333 }
334 #endif
335
336 //
337 // Get a serialisable Id for the Task's OS thread
338 // Needed mainly for logging since the OSThreadId is an opaque type
339 INLINE_HEADER TaskId
340 serialisableTaskId (Task *task)
341 {
342 #if defined(THREADED_RTS)
343 return serialiseTaskId(task->id);
344 #else
345 return (TaskId) (size_t) task;
346 #endif
347 }
348
349 #include "EndPrivate.h"
350
351 #endif /* TASK_H */