22c1d2a5c786eab64d4c85e34e8d79b60a4d69f6
[ghc.git] / rts / Capability.h
1 /* ---------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 2001-2006
4 *
5 * Capabilities
6 *
7 * For details on the high-level design, see
8 * http://ghc.haskell.org/trac/ghc/wiki/Commentary/Rts/Scheduler
9 *
10 * A Capability holds all the state an OS thread/task needs to run
11 * Haskell code: its STG registers, a pointer to its TSO, a nursery
12 * etc. During STG execution, a pointer to the Capabilitity is kept in
13 * a register (BaseReg).
14 *
15 * Only in a THREADED_RTS build will there be multiple capabilities,
16 * in the non-threaded RTS there is one global capability, called
17 * MainCapability.
18 *
19 * --------------------------------------------------------------------------*/
20
21 #ifndef CAPABILITY_H
22 #define CAPABILITY_H
23
24 #include "sm/GC.h" // for evac_fn
25 #include "Task.h"
26 #include "Sparks.h"
27
28 #include "BeginPrivate.h"
29
30 struct Capability_ {
31 // State required by the STG virtual machine when running Haskell
32 // code. During STG execution, the BaseReg register always points
33 // to the StgRegTable of the current Capability (&cap->r).
34 StgFunTable f;
35 StgRegTable r;
36
37 uint32_t no; // capability number.
38
39 // The Task currently holding this Capability. This task has
40 // exclusive access to the contents of this Capability (apart from
41 // returning_tasks_hd/returning_tasks_tl).
42 // Locks required: cap->lock.
43 Task *running_task;
44
45 // true if this Capability is running Haskell code, used for
46 // catching unsafe call-ins.
47 rtsBool in_haskell;
48
49 // Has there been any activity on this Capability since the last GC?
50 uint32_t idle;
51
52 rtsBool disabled;
53
54 // The run queue. The Task owning this Capability has exclusive
55 // access to its run queue, so can wake up threads without
56 // taking a lock, and the common path through the scheduler is
57 // also lock-free.
58 StgTSO *run_queue_hd;
59 StgTSO *run_queue_tl;
60
61 // Tasks currently making safe foreign calls. Doubly-linked.
62 // When returning, a task first acquires the Capability before
63 // removing itself from this list, so that the GC can find all
64 // the suspended TSOs easily. Hence, when migrating a Task from
65 // the returning_tasks list, we must also migrate its entry from
66 // this list.
67 InCall *suspended_ccalls;
68
69 // One mutable list per generation, so we don't need to take any
70 // locks when updating an old-generation thunk. This also lets us
71 // keep track of which closures this CPU has been mutating, so we
72 // can traverse them using the right thread during GC and avoid
73 // unnecessarily moving the data from one cache to another.
74 bdescr **mut_lists;
75 bdescr **saved_mut_lists; // tmp use during GC
76
77 // block for allocating pinned objects into
78 bdescr *pinned_object_block;
79 // full pinned object blocks allocated since the last GC
80 bdescr *pinned_object_blocks;
81
82 // per-capability weak pointer list associated with nursery (older
83 // lists stored in generation object)
84 StgWeak *weak_ptr_list_hd;
85 StgWeak *weak_ptr_list_tl;
86
87 // Context switch flag. When non-zero, this means: stop running
88 // Haskell code, and switch threads.
89 int context_switch;
90
91 // Interrupt flag. Like the context_switch flag, this also
92 // indicates that we should stop running Haskell code, but we do
93 // *not* switch threads. This is used to stop a Capability in
94 // order to do GC, for example.
95 //
96 // The interrupt flag is always reset before we start running
97 // Haskell code, unlike the context_switch flag which is only
98 // reset after we have executed the context switch.
99 int interrupt;
100
101 // Total words allocated by this cap since rts start
102 // See [Note allocation accounting] in Storage.c
103 W_ total_allocated;
104
105 #if defined(THREADED_RTS)
106 // Worker Tasks waiting in the wings. Singly-linked.
107 Task *spare_workers;
108 uint32_t n_spare_workers; // count of above
109
110 // This lock protects:
111 // running_task
112 // returning_tasks_{hd,tl}
113 // wakeup_queue
114 // inbox
115 Mutex lock;
116
117 // Tasks waiting to return from a foreign call, or waiting to make
118 // a new call-in using this Capability (NULL if empty).
119 // NB. this field needs to be modified by tasks other than the
120 // running_task, so it requires cap->lock to modify. A task can
121 // check whether it is NULL without taking the lock, however.
122 Task *returning_tasks_hd; // Singly-linked, with head/tail
123 Task *returning_tasks_tl;
124
125 // Messages, or END_TSO_QUEUE.
126 // Locks required: cap->lock
127 Message *inbox;
128
129 SparkPool *sparks;
130
131 // Stats on spark creation/conversion
132 SparkCounters spark_stats;
133 #if !defined(mingw32_HOST_OS)
134 // IO manager for this cap
135 int io_manager_control_wr_fd;
136 #endif
137 #endif
138
139 // Per-capability STM-related data
140 StgTVarWatchQueue *free_tvar_watch_queues;
141 StgInvariantCheckQueue *free_invariant_check_queues;
142 StgTRecChunk *free_trec_chunks;
143 StgTRecHeader *free_trec_headers;
144 uint32_t transaction_tokens;
145 } // typedef Capability is defined in RtsAPI.h
146 // We never want a Capability to overlap a cache line with anything
147 // else, so round it up to a cache line size:
148 #ifndef mingw32_HOST_OS
149 ATTRIBUTE_ALIGNED(64)
150 #endif
151 ;
152
153
154 #if defined(THREADED_RTS)
155 #define ASSERT_TASK_ID(task) ASSERT(task->id == osThreadId())
156 #else
157 #define ASSERT_TASK_ID(task) /*empty*/
158 #endif
159
160 // These properties should be true when a Task is holding a Capability
161 #define ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task) \
162 ASSERT(cap->running_task != NULL && cap->running_task == task); \
163 ASSERT(task->cap == cap); \
164 ASSERT_PARTIAL_CAPABILITY_INVARIANTS(cap,task)
165
166 // Sometimes a Task holds a Capability, but the Task is not associated
167 // with that Capability (ie. task->cap != cap). This happens when
168 // (a) a Task holds multiple Capabilities, and (b) when the current
169 // Task is bound, its thread has just blocked, and it may have been
170 // moved to another Capability.
171 #define ASSERT_PARTIAL_CAPABILITY_INVARIANTS(cap,task) \
172 ASSERT(cap->run_queue_hd == END_TSO_QUEUE ? \
173 cap->run_queue_tl == END_TSO_QUEUE : 1); \
174 ASSERT(myTask() == task); \
175 ASSERT_TASK_ID(task);
176
177 #if defined(THREADED_RTS)
178 rtsBool checkSparkCountInvariant (void);
179 #endif
180
181 // Converts a *StgRegTable into a *Capability.
182 //
183 INLINE_HEADER Capability *
184 regTableToCapability (StgRegTable *reg)
185 {
186 return (Capability *)((void *)((unsigned char*)reg - STG_FIELD_OFFSET(Capability,r)));
187 }
188
189 // Initialise the available capabilities.
190 //
191 void initCapabilities (void);
192
193 // Add and initialise more Capabilities
194 //
195 void moreCapabilities (uint32_t from, uint32_t to);
196
197 // Release a capability. This is called by a Task that is exiting
198 // Haskell to make a foreign call, or in various other cases when we
199 // want to relinquish a Capability that we currently hold.
200 //
201 // ASSUMES: cap->running_task is the current Task.
202 //
203 #if defined(THREADED_RTS)
204 void releaseCapability (Capability* cap);
205 void releaseAndWakeupCapability (Capability* cap);
206 void releaseCapability_ (Capability* cap, rtsBool always_wakeup);
207 // assumes cap->lock is held
208 #else
209 // releaseCapability() is empty in non-threaded RTS
210 INLINE_HEADER void releaseCapability (Capability* cap STG_UNUSED) {};
211 INLINE_HEADER void releaseAndWakeupCapability (Capability* cap STG_UNUSED) {};
212 INLINE_HEADER void releaseCapability_ (Capability* cap STG_UNUSED,
213 rtsBool always_wakeup STG_UNUSED) {};
214 #endif
215
216 // declared in includes/rts/Threads.h:
217 // extern Capability MainCapability;
218
219 // declared in includes/rts/Threads.h:
220 // extern uint32_t n_capabilities;
221 // extern uint32_t enabled_capabilities;
222
223 // Array of all the capabilities
224 //
225 extern Capability **capabilities;
226
227 //
228 // Types of global synchronisation
229 //
230 typedef enum {
231 SYNC_OTHER,
232 SYNC_GC_SEQ,
233 SYNC_GC_PAR
234 } SyncType;
235
236 //
237 // Details about a global synchronisation
238 //
239 typedef struct {
240 SyncType type; // The kind of synchronisation
241 rtsBool *idle;
242 Task *task; // The Task performing the sync
243 } PendingSync;
244
245 //
246 // Indicates that the RTS wants to synchronise all the Capabilities
247 // for some reason. All Capabilities should stop and return to the
248 // scheduler.
249 //
250 extern PendingSync * volatile pending_sync;
251
252 // Acquires a capability at a return point. If *cap is non-NULL, then
253 // this is taken as a preference for the Capability we wish to
254 // acquire.
255 //
256 // OS threads waiting in this function get priority over those waiting
257 // in waitForCapability().
258 //
259 // On return, *cap is non-NULL, and points to the Capability acquired.
260 //
261 void waitForCapability (Capability **cap/*in/out*/, Task *task);
262
263 EXTERN_INLINE void recordMutableCap (const StgClosure *p, Capability *cap,
264 uint32_t gen);
265
266 EXTERN_INLINE void recordClosureMutated (Capability *cap, StgClosure *p);
267
268 #if defined(THREADED_RTS)
269
270 // Gives up the current capability IFF there is a higher-priority
271 // thread waiting for it. This happens in one of two ways:
272 //
273 // (a) we are passing the capability to another OS thread, so
274 // that it can run a bound Haskell thread, or
275 //
276 // (b) there is an OS thread waiting to return from a foreign call
277 //
278 // On return: *pCap is NULL if the capability was released. The
279 // current task should then re-acquire it using waitForCapability().
280 //
281 rtsBool yieldCapability (Capability** pCap, Task *task, rtsBool gcAllowed);
282
283 // Wakes up a worker thread on just one Capability, used when we
284 // need to service some global event.
285 //
286 void prodOneCapability (void);
287 void prodCapability (Capability *cap, Task *task);
288
289 // Similar to prodOneCapability(), but prods all of them.
290 //
291 void prodAllCapabilities (void);
292
293 // Attempt to gain control of a Capability if it is free.
294 //
295 rtsBool tryGrabCapability (Capability *cap, Task *task);
296
297 // Try to find a spark to run
298 //
299 StgClosure *findSpark (Capability *cap);
300
301 // True if any capabilities have sparks
302 //
303 rtsBool anySparks (void);
304
305 INLINE_HEADER rtsBool emptySparkPoolCap (Capability *cap);
306 INLINE_HEADER uint32_t sparkPoolSizeCap (Capability *cap);
307 INLINE_HEADER void discardSparksCap (Capability *cap);
308
309 #else // !THREADED_RTS
310
311 // Grab a capability. (Only in the non-threaded RTS; in the threaded
312 // RTS one of the waitFor*Capability() functions must be used).
313 //
314 extern void grabCapability (Capability **pCap);
315
316 #endif /* !THREADED_RTS */
317
318 // Shut down all capabilities.
319 //
320 void shutdownCapabilities(Task *task, rtsBool wait_foreign);
321
322 // cause all capabilities to context switch as soon as possible.
323 void contextSwitchAllCapabilities(void);
324 INLINE_HEADER void contextSwitchCapability(Capability *cap);
325
326 // cause all capabilities to stop running Haskell code and return to
327 // the scheduler as soon as possible.
328 void interruptAllCapabilities(void);
329 INLINE_HEADER void interruptCapability(Capability *cap);
330
331 // Free all capabilities
332 void freeCapabilities (void);
333
334 // For the GC:
335 void markCapability (evac_fn evac, void *user, Capability *cap,
336 rtsBool no_mark_sparks USED_IF_THREADS);
337
338 void markCapabilities (evac_fn evac, void *user);
339
340 void traverseSparkQueues (evac_fn evac, void *user);
341
342 /* -----------------------------------------------------------------------------
343 Messages
344 -------------------------------------------------------------------------- */
345
346 #ifdef THREADED_RTS
347
348 INLINE_HEADER rtsBool emptyInbox(Capability *cap);
349
350 #endif // THREADED_RTS
351
352 /* -----------------------------------------------------------------------------
353 * INLINE functions... private below here
354 * -------------------------------------------------------------------------- */
355
356 EXTERN_INLINE void
357 recordMutableCap (const StgClosure *p, Capability *cap, uint32_t gen)
358 {
359 bdescr *bd;
360
361 // We must own this Capability in order to modify its mutable list.
362 // ASSERT(cap->running_task == myTask());
363 // NO: assertion is violated by performPendingThrowTos()
364 bd = cap->mut_lists[gen];
365 if (bd->free >= bd->start + BLOCK_SIZE_W) {
366 bdescr *new_bd;
367 new_bd = allocBlock_lock();
368 new_bd->link = bd;
369 bd = new_bd;
370 cap->mut_lists[gen] = bd;
371 }
372 *bd->free++ = (StgWord)p;
373 }
374
375 EXTERN_INLINE void
376 recordClosureMutated (Capability *cap, StgClosure *p)
377 {
378 bdescr *bd;
379 bd = Bdescr((StgPtr)p);
380 if (bd->gen_no != 0) recordMutableCap(p,cap,bd->gen_no);
381 }
382
383
384 #if defined(THREADED_RTS)
385 INLINE_HEADER rtsBool
386 emptySparkPoolCap (Capability *cap)
387 { return looksEmpty(cap->sparks); }
388
389 INLINE_HEADER uint32_t
390 sparkPoolSizeCap (Capability *cap)
391 { return sparkPoolSize(cap->sparks); }
392
393 INLINE_HEADER void
394 discardSparksCap (Capability *cap)
395 { discardSparks(cap->sparks); }
396 #endif
397
398 INLINE_HEADER void
399 stopCapability (Capability *cap)
400 {
401 // setting HpLim to NULL tries to make the next heap check will
402 // fail, which will cause the thread to return to the scheduler.
403 // It may not work - the thread might be updating HpLim itself
404 // at the same time - so we also have the context_switch/interrupted
405 // flags as a sticky way to tell the thread to stop.
406 cap->r.rHpLim = NULL;
407 }
408
409 INLINE_HEADER void
410 interruptCapability (Capability *cap)
411 {
412 stopCapability(cap);
413 cap->interrupt = 1;
414 }
415
416 INLINE_HEADER void
417 contextSwitchCapability (Capability *cap)
418 {
419 stopCapability(cap);
420 cap->context_switch = 1;
421 }
422
423 #ifdef THREADED_RTS
424
425 INLINE_HEADER rtsBool emptyInbox(Capability *cap)
426 {
427 return (cap->inbox == (Message*)END_TSO_QUEUE);
428 }
429
430 #endif
431
432 #include "EndPrivate.h"
433
434 #endif /* CAPABILITY_H */