Revert "rts/sm/Storage.c: tweak __clear_cache proto for clang"
[ghc.git] / rts / Task.h
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team 2001-2005
4 *
5 * Tasks
6 *
7 * For details on the high-level design, see
8 * http://ghc.haskell.org/trac/ghc/wiki/Commentary/Rts/Scheduler
9 *
10 * -------------------------------------------------------------------------*/
11
12 #pragma once
13
14 #include "GetTime.h"
15
16 #include "BeginPrivate.h"
17
18 /*
19 Definition of a Task
20 --------------------
21
22 A task is an OSThread that runs Haskell code. Every OSThread that
23 runs inside the RTS, whether as a worker created by the RTS or via
24 an in-call from C to Haskell, has an associated Task. The first
25 time an OS thread calls into Haskell it is allocated a Task, which
26 remains until the RTS is shut down.
27
28 There is a one-to-one relationship between OSThreads and Tasks.
29 The Task for an OSThread is kept in thread-local storage, and can
30 be retrieved at any time using myTask().
31
32 In the THREADED_RTS build, multiple Tasks may all be running
33 Haskell code simultaneously. A task relinquishes its Capability
34 when it is asked to evaluate an external (C) call.
35
36 Ownership of Task
37 -----------------
38
39 Task ownership is a little tricky. The default situation is that
40 the Task is an OS-thread-local structure that is owned by the OS
41 thread named in task->id. An OS thread not currently executing
42 Haskell code might call newBoundTask() at any time, which assumes
43 that it has access to the Task for the current OS thread.
44
45 The all_next and all_prev fields of a Task are owned by
46 all_tasks_mutex, which must also be taken if we want to create or
47 free a Task.
48
49 For an OS thread in Haskell, if (task->cap->running_task != task),
50 then the Task is owned by the owner of the parent data structure on
51 which it is sleeping; for example, if the task is sleeping on
52 spare_workers field of a Capability, then the owner of the
53 Capability has access to the Task.
54
55 When a task is migrated from sleeping on one Capability to another,
56 its task->cap field must be modified. When the task wakes up, it
57 will read the new value of task->cap to find out which Capability
58 it belongs to. Hence some synchronisation is required on
59 task->cap, and this is why we have task->lock.
60
61 If the Task is not currently owned by task->id, then the thread is
62 either
63
64 (a) waiting on the condition task->cond. The Task is either
65 (1) a bound Task, the TSO will be on a queue somewhere
66 (2) a worker task, on the spare_workers queue of task->cap.
67
68 (b) making a foreign call. The InCall will be on the
69 suspended_ccalls list.
70
71 We re-establish ownership in each case by respectively
72
73 (a) the task is currently blocked in yieldCapability().
74 This call will return when we have ownership of the Task and
75 a Capability. The Capability we get might not be the same
76 as the one we had when we called yieldCapability().
77
78 (b) we must call resumeThread(task), which will safely establish
79 ownership of the Task and a Capability.
80 */
81
82 // The InCall structure represents either a single in-call from C to
83 // Haskell, or a worker thread.
84 typedef struct InCall_ {
85 StgTSO * tso; // the bound TSO (or NULL for a worker)
86
87 StgTSO * suspended_tso; // the TSO is stashed here when we
88 // make a foreign call (NULL otherwise);
89
90 Capability *suspended_cap; // The capability that the
91 // suspended_tso is on, because
92 // we can't read this from the TSO
93 // without owning a Capability in the
94 // first place.
95
96 SchedulerStatus rstat; // return status
97 StgClosure ** ret; // return value
98
99 struct Task_ *task;
100
101 // When a Haskell thread makes a foreign call that re-enters
102 // Haskell, we end up with another Task associated with the
103 // current thread. We have to remember the whole stack of InCalls
104 // associated with the current Task so that we can correctly
105 // save & restore the InCall on entry to and exit from Haskell.
106 struct InCall_ *prev_stack;
107
108 // Links InCalls onto suspended_ccalls, spare_incalls
109 struct InCall_ *prev;
110 struct InCall_ *next;
111 } InCall;
112
113 typedef struct Task_ {
114 #if defined(THREADED_RTS)
115 OSThreadId id; // The OS Thread ID of this task
116
117 // The NUMA node this Task belongs to. If this is a worker thread, then the
118 // OS thread will be bound to this node (see workerStart()). If this is an
119 // external thread calling into Haskell, it can be bound to a node using
120 // rts_setInCallCapability().
121 uint32_t node;
122
123 Condition cond; // used for sleeping & waking up this task
124 Mutex lock; // lock for the condition variable
125
126 // this flag tells the task whether it should wait on task->cond
127 // or just continue immediately. It's a workaround for the fact
128 // that signalling a condition variable doesn't do anything if the
129 // thread is already running, but we want it to be sticky.
130 bool wakeup;
131 #endif
132
133 // If the task owns a Capability, task->cap points to it. (occasionally a
134 // task may own multiple capabilities, in which case task->cap may point to
135 // any of them. We must be careful to set task->cap to the appropriate one
136 // when using Capability APIs.)
137 //
138 // If the task is a worker, task->cap points to the Capability on which it
139 // is queued.
140 //
141 // If the task is in an unsafe foreign call, then task->cap can be used to
142 // retrieve the capability (see rts_unsafeGetMyCapability()).
143 struct Capability_ *cap;
144
145 // The current top-of-stack InCall
146 struct InCall_ *incall;
147
148 uint32_t n_spare_incalls;
149 struct InCall_ *spare_incalls;
150
151 bool worker; // == true if this is a worker Task
152 bool stopped; // == true between newBoundTask and
153 // boundTaskExiting, or in a worker Task.
154
155 // So that we can detect when a finalizer illegally calls back into Haskell
156 bool running_finalizers;
157
158 // if >= 0, this Capability will be used for in-calls
159 int preferred_capability;
160
161 // Links tasks on the returning_tasks queue of a Capability, and
162 // on spare_workers.
163 struct Task_ *next;
164
165 // Links tasks on the all_tasks list; need ACQUIRE_LOCK(&all_tasks_mutex)
166 struct Task_ *all_next;
167 struct Task_ *all_prev;
168
169 } Task;
170
171 INLINE_HEADER bool
172 isBoundTask (Task *task)
173 {
174 return (task->incall->tso != NULL);
175 }
176
177 // A Task is currently a worker if
178 // (a) it was created as a worker (task->worker), and
179 // (b) it has not left and re-entered Haskell, in which case
180 // task->incall->prev_stack would be non-NULL.
181 //
182 INLINE_HEADER bool
183 isWorker (Task *task)
184 {
185 return (task->worker && task->incall->prev_stack == NULL);
186 }
187
188 // Linked list of all tasks.
189 //
190 extern Task *all_tasks;
191
192 // The all_tasks list is protected by the all_tasks_mutex
193 #if defined(THREADED_RTS)
194 extern Mutex all_tasks_mutex;
195 #endif
196
197 // Start and stop the task manager.
198 // Requires: sched_mutex.
199 //
200 void initTaskManager (void);
201 uint32_t freeTaskManager (void);
202
203 // Create a new Task for a bound thread. This Task must be released
204 // by calling boundTaskExiting. The Task is cached in
205 // thread-local storage and will remain even after boundTaskExiting()
206 // has been called; to free the memory, see freeMyTask().
207 //
208 Task* newBoundTask (void);
209
210 // Return the current OS thread's Task, which is created if it doesn't already
211 // exist. After you have finished using RTS APIs, you should call freeMyTask()
212 // to release this thread's Task.
213 Task* getTask (void);
214
215 // The current task is a bound task that is exiting.
216 //
217 void boundTaskExiting (Task *task);
218
219 // Free a Task if one was previously allocated by newBoundTask().
220 // This is not necessary unless the thread that called newBoundTask()
221 // will be exiting, or if this thread has finished calling Haskell
222 // functions.
223 //
224 void freeMyTask(void);
225
226 // Notify the task manager that a task has stopped. This is used
227 // mainly for stats-gathering purposes.
228 // Requires: sched_mutex.
229 //
230 #if defined(THREADED_RTS)
231 // In the non-threaded RTS, tasks never stop.
232 void workerTaskStop (Task *task);
233 #endif
234
235 // Put the task back on the free list, mark it stopped. Used by
236 // forkProcess().
237 //
238 void discardTasksExcept (Task *keep);
239
240 // Get the Task associated with the current OS thread (or NULL if none).
241 //
242 INLINE_HEADER Task *myTask (void);
243
244 #if defined(THREADED_RTS)
245
246 // Workers are attached to the supplied Capability. This Capability
247 // should not currently have a running_task, because the new task
248 // will become the running_task for that Capability.
249 // Requires: sched_mutex.
250 //
251 void startWorkerTask (Capability *cap);
252
253 // Interrupts a worker task that is performing an FFI call. The thread
254 // should not be destroyed.
255 //
256 void interruptWorkerTask (Task *task);
257
258 #endif /* THREADED_RTS */
259
260 // For stats
261 extern uint32_t taskCount;
262 extern uint32_t workerCount;
263 extern uint32_t peakWorkerCount;
264
265 // -----------------------------------------------------------------------------
266 // INLINE functions... private from here on down:
267
268 // A thread-local-storage key that we can use to get access to the
269 // current thread's Task structure.
270 #if defined(THREADED_RTS)
271 #if ((defined(linux_HOST_OS) && \
272 (defined(i386_HOST_ARCH) || defined(x86_64_HOST_ARCH))) || \
273 (defined(mingw32_HOST_OS) && __GNUC__ >= 4 && __GNUC_MINOR__ >= 4)) && \
274 (!defined(llvm_CC_FLAVOR))
275 #define MYTASK_USE_TLV
276 extern __thread Task *my_task;
277 #else
278 extern ThreadLocalKey currentTaskKey;
279 #endif
280 #else
281 extern Task *my_task;
282 #endif
283
284 //
285 // myTask() uses thread-local storage to find the Task associated with
286 // the current OS thread. If the current OS thread has multiple
287 // Tasks, because it has re-entered the RTS, then the task->prev_stack
288 // field is used to store the previous Task.
289 //
290 INLINE_HEADER Task *
291 myTask (void)
292 {
293 #if defined(THREADED_RTS) && !defined(MYTASK_USE_TLV)
294 return getThreadLocalVar(&currentTaskKey);
295 #else
296 return my_task;
297 #endif
298 }
299
300 INLINE_HEADER void
301 setMyTask (Task *task)
302 {
303 #if defined(THREADED_RTS) && !defined(MYTASK_USE_TLV)
304 setThreadLocalVar(&currentTaskKey,task);
305 #else
306 my_task = task;
307 #endif
308 }
309
310 // Tasks are identified by their OS thread ID, which can be serialised
311 // to StgWord64, as defined below.
312 typedef StgWord64 TaskId;
313
314 // Get a unique serialisable representation for a task id.
315 //
316 // It's only unique within the process. For example if they are emitted in a
317 // log file then it is suitable to work out which log entries are releated.
318 //
319 // This is needed because OSThreadId is an opaque type
320 // and in practice on some platforms it is a pointer type.
321 //
322 #if defined(THREADED_RTS)
323 INLINE_HEADER TaskId serialiseTaskId (OSThreadId taskID) {
324 #if defined(freebsd_HOST_OS) || defined(darwin_HOST_OS)
325 // Here OSThreadId is a pthread_t and pthread_t is a pointer, but within
326 // the process we can still use that pointer value as a unique id.
327 return (TaskId) (size_t) taskID;
328 #else
329 // On Windows, Linux and others it's an integral type to start with.
330 return (TaskId) taskID;
331 #endif
332 }
333 #endif
334
335 //
336 // Get a serialisable Id for the Task's OS thread
337 // Needed mainly for logging since the OSThreadId is an opaque type
338 INLINE_HEADER TaskId
339 serialisableTaskId (Task *task)
340 {
341 #if defined(THREADED_RTS)
342 return serialiseTaskId(task->id);
343 #else
344 return (TaskId) (size_t) task;
345 #endif
346 }
347
348 #include "EndPrivate.h"