Allow the number of capabilities to be increased at runtime (#3729)
[ghc.git] / rts / Capability.h
1 /* ---------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 2001-2006
4 *
5 * Capabilities
6 *
7 * For details on the high-level design, see
8 * http://hackage.haskell.org/trac/ghc/wiki/Commentary/Rts/Scheduler
9 *
10 * A Capability holds all the state an OS thread/task needs to run
11 * Haskell code: its STG registers, a pointer to its TSO, a nursery
12 * etc. During STG execution, a pointer to the Capabilitity is kept in
13 * a register (BaseReg).
14 *
15 * Only in a THREADED_RTS build will there be multiple capabilities,
16 * in the non-threaded RTS there is one global capability, called
17 * MainCapability.
18 *
19 * --------------------------------------------------------------------------*/
20
21 #ifndef CAPABILITY_H
22 #define CAPABILITY_H
23
24 #include "sm/GC.h" // for evac_fn
25 #include "Task.h"
26 #include "Sparks.h"
27
28 #include "BeginPrivate.h"
29
30 struct Capability_ {
31 // State required by the STG virtual machine when running Haskell
32 // code. During STG execution, the BaseReg register always points
33 // to the StgRegTable of the current Capability (&cap->r).
34 StgFunTable f;
35 StgRegTable r;
36
37 nat no; // capability number.
38
39 // The Task currently holding this Capability. This task has
40 // exclusive access to the contents of this Capability (apart from
41 // returning_tasks_hd/returning_tasks_tl).
42 // Locks required: cap->lock.
43 Task *running_task;
44
45 // true if this Capability is running Haskell code, used for
46 // catching unsafe call-ins.
47 rtsBool in_haskell;
48
49 // The run queue. The Task owning this Capability has exclusive
50 // access to its run queue, so can wake up threads without
51 // taking a lock, and the common path through the scheduler is
52 // also lock-free.
53 StgTSO *run_queue_hd;
54 StgTSO *run_queue_tl;
55
56 // Tasks currently making safe foreign calls. Doubly-linked.
57 // When returning, a task first acquires the Capability before
58 // removing itself from this list, so that the GC can find all
59 // the suspended TSOs easily. Hence, when migrating a Task from
60 // the returning_tasks list, we must also migrate its entry from
61 // this list.
62 InCall *suspended_ccalls;
63
64 // One mutable list per generation, so we don't need to take any
65 // locks when updating an old-generation thunk. This also lets us
66 // keep track of which closures this CPU has been mutating, so we
67 // can traverse them using the right thread during GC and avoid
68 // unnecessarily moving the data from one cache to another.
69 bdescr **mut_lists;
70 bdescr **saved_mut_lists; // tmp use during GC
71
72 // block for allocating pinned objects into
73 bdescr *pinned_object_block;
74
75 // Context switch flag. When non-zero, this means: stop running
76 // Haskell code, and switch threads.
77 int context_switch;
78
79 // Interrupt flag. Like the context_switch flag, this also
80 // indicates that we should stop running Haskell code, but we do
81 // *not* switch threads. This is used to stop a Capability in
82 // order to do GC, for example.
83 //
84 // The interrupt flag is always reset before we start running
85 // Haskell code, unlike the context_switch flag which is only
86 // reset after we have executed the context switch.
87 int interrupt;
88
89 #if defined(THREADED_RTS)
90 // Worker Tasks waiting in the wings. Singly-linked.
91 Task *spare_workers;
92 nat n_spare_workers; // count of above
93
94 // This lock protects running_task, returning_tasks_{hd,tl}, wakeup_queue.
95 Mutex lock;
96
97 // Tasks waiting to return from a foreign call, or waiting to make
98 // a new call-in using this Capability (NULL if empty).
99 // NB. this field needs to be modified by tasks other than the
100 // running_task, so it requires cap->lock to modify. A task can
101 // check whether it is NULL without taking the lock, however.
102 Task *returning_tasks_hd; // Singly-linked, with head/tail
103 Task *returning_tasks_tl;
104
105 // Messages, or END_TSO_QUEUE.
106 Message *inbox;
107
108 SparkPool *sparks;
109
110 // Stats on spark creation/conversion
111 SparkCounters spark_stats;
112 #endif
113
114 // Per-capability STM-related data
115 StgTVarWatchQueue *free_tvar_watch_queues;
116 StgInvariantCheckQueue *free_invariant_check_queues;
117 StgTRecChunk *free_trec_chunks;
118 StgTRecHeader *free_trec_headers;
119 nat transaction_tokens;
120 } // typedef Capability is defined in RtsAPI.h
121 // Capabilities are stored in an array, so make sure that adjacent
122 // Capabilities don't share any cache-lines:
123 #ifndef mingw32_HOST_OS
124 ATTRIBUTE_ALIGNED(64)
125 #endif
126 ;
127
128
129 #if defined(THREADED_RTS)
130 #define ASSERT_TASK_ID(task) ASSERT(task->id == osThreadId())
131 #else
132 #define ASSERT_TASK_ID(task) /*empty*/
133 #endif
134
135 // These properties should be true when a Task is holding a Capability
136 #define ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task) \
137 ASSERT(cap->running_task != NULL && cap->running_task == task); \
138 ASSERT(task->cap == cap); \
139 ASSERT_PARTIAL_CAPABILITY_INVARIANTS(cap,task)
140
141 // Sometimes a Task holds a Capability, but the Task is not associated
142 // with that Capability (ie. task->cap != cap). This happens when
143 // (a) a Task holds multiple Capabilities, and (b) when the current
144 // Task is bound, its thread has just blocked, and it may have been
145 // moved to another Capability.
146 #define ASSERT_PARTIAL_CAPABILITY_INVARIANTS(cap,task) \
147 ASSERT(cap->run_queue_hd == END_TSO_QUEUE ? \
148 cap->run_queue_tl == END_TSO_QUEUE : 1); \
149 ASSERT(myTask() == task); \
150 ASSERT_TASK_ID(task);
151
152 #if defined(THREADED_RTS)
153 rtsBool checkSparkCountInvariant (void);
154 #endif
155
156 // Converts a *StgRegTable into a *Capability.
157 //
158 INLINE_HEADER Capability *
159 regTableToCapability (StgRegTable *reg)
160 {
161 return (Capability *)((void *)((unsigned char*)reg - STG_FIELD_OFFSET(Capability,r)));
162 }
163
164 // Initialise the available capabilities.
165 //
166 void initCapabilities (void);
167
168 // Add and initialise more Capabilities
169 //
170 Capability * moreCapabilities (nat from, nat to);
171
172 // Release a capability. This is called by a Task that is exiting
173 // Haskell to make a foreign call, or in various other cases when we
174 // want to relinquish a Capability that we currently hold.
175 //
176 // ASSUMES: cap->running_task is the current Task.
177 //
178 #if defined(THREADED_RTS)
179 void releaseCapability (Capability* cap);
180 void releaseAndWakeupCapability (Capability* cap);
181 void releaseCapability_ (Capability* cap, rtsBool always_wakeup);
182 // assumes cap->lock is held
183 #else
184 // releaseCapability() is empty in non-threaded RTS
185 INLINE_HEADER void releaseCapability (Capability* cap STG_UNUSED) {};
186 INLINE_HEADER void releaseAndWakeupCapability (Capability* cap STG_UNUSED) {};
187 INLINE_HEADER void releaseCapability_ (Capability* cap STG_UNUSED,
188 rtsBool always_wakeup STG_UNUSED) {};
189 #endif
190
191 // declared in includes/rts/Threads.h:
192 // extern Capability MainCapability;
193
194 // declared in includes/rts/Threads.h:
195 // extern nat n_capabilities;
196
197 // Array of all the capabilities
198 //
199 extern Capability *capabilities;
200
201 // The Capability that was last free. Used as a good guess for where
202 // to assign new threads.
203 //
204 extern Capability *last_free_capability;
205
206 //
207 // Indicates that the RTS wants to synchronise all the Capabilities
208 // for some reason. All Capabilities should stop and return to the
209 // scheduler.
210 //
211 #define SYNC_GC_SEQ 1
212 #define SYNC_GC_PAR 2
213 #define SYNC_OTHER 3
214 extern volatile StgWord pending_sync;
215
216 // Acquires a capability at a return point. If *cap is non-NULL, then
217 // this is taken as a preference for the Capability we wish to
218 // acquire.
219 //
220 // OS threads waiting in this function get priority over those waiting
221 // in waitForCapability().
222 //
223 // On return, *cap is non-NULL, and points to the Capability acquired.
224 //
225 void waitForReturnCapability (Capability **cap/*in/out*/, Task *task);
226
227 EXTERN_INLINE void recordMutableCap (StgClosure *p, Capability *cap, nat gen);
228
229 EXTERN_INLINE void recordClosureMutated (Capability *cap, StgClosure *p);
230
231 #if defined(THREADED_RTS)
232
233 // Gives up the current capability IFF there is a higher-priority
234 // thread waiting for it. This happens in one of two ways:
235 //
236 // (a) we are passing the capability to another OS thread, so
237 // that it can run a bound Haskell thread, or
238 //
239 // (b) there is an OS thread waiting to return from a foreign call
240 //
241 // On return: *pCap is NULL if the capability was released. The
242 // current task should then re-acquire it using waitForCapability().
243 //
244 void yieldCapability (Capability** pCap, Task *task);
245
246 // Acquires a capability for doing some work.
247 //
248 // On return: pCap points to the capability.
249 //
250 void waitForCapability (Task *task, Mutex *mutex, Capability **pCap);
251
252 // Wakes up a worker thread on just one Capability, used when we
253 // need to service some global event.
254 //
255 void prodOneCapability (void);
256 void prodCapability (Capability *cap, Task *task);
257
258 // Similar to prodOneCapability(), but prods all of them.
259 //
260 void prodAllCapabilities (void);
261
262 // Attempt to gain control of a Capability if it is free.
263 //
264 rtsBool tryGrabCapability (Capability *cap, Task *task);
265
266 // Try to find a spark to run
267 //
268 StgClosure *findSpark (Capability *cap);
269
270 // True if any capabilities have sparks
271 //
272 rtsBool anySparks (void);
273
274 INLINE_HEADER rtsBool emptySparkPoolCap (Capability *cap);
275 INLINE_HEADER nat sparkPoolSizeCap (Capability *cap);
276 INLINE_HEADER void discardSparksCap (Capability *cap);
277
278 #else // !THREADED_RTS
279
280 // Grab a capability. (Only in the non-threaded RTS; in the threaded
281 // RTS one of the waitFor*Capability() functions must be used).
282 //
283 extern void grabCapability (Capability **pCap);
284
285 #endif /* !THREADED_RTS */
286
287 // Waits for a capability to drain of runnable threads and workers,
288 // and then acquires it. Used at shutdown time.
289 //
290 void shutdownCapability (Capability *cap, Task *task, rtsBool wait_foreign);
291
292 // Shut down all capabilities.
293 //
294 void shutdownCapabilities(Task *task, rtsBool wait_foreign);
295
296 // cause all capabilities to context switch as soon as possible.
297 void contextSwitchAllCapabilities(void);
298 INLINE_HEADER void contextSwitchCapability(Capability *cap);
299
300 // cause all capabilities to stop running Haskell code and return to
301 // the scheduler as soon as possible.
302 void interruptAllCapabilities(void);
303 INLINE_HEADER void interruptCapability(Capability *cap);
304
305 // Free all capabilities
306 void freeCapabilities (void);
307
308 // For the GC:
309 void markCapability (evac_fn evac, void *user, Capability *cap,
310 rtsBool no_mark_sparks USED_IF_THREADS);
311
312 void markCapabilities (evac_fn evac, void *user);
313
314 void traverseSparkQueues (evac_fn evac, void *user);
315
316 /* -----------------------------------------------------------------------------
317 Messages
318 -------------------------------------------------------------------------- */
319
320 #ifdef THREADED_RTS
321
322 INLINE_HEADER rtsBool emptyInbox(Capability *cap);;
323
324 #endif // THREADED_RTS
325
326 /* -----------------------------------------------------------------------------
327 * INLINE functions... private below here
328 * -------------------------------------------------------------------------- */
329
330 EXTERN_INLINE void
331 recordMutableCap (StgClosure *p, Capability *cap, nat gen)
332 {
333 bdescr *bd;
334
335 // We must own this Capability in order to modify its mutable list.
336 // ASSERT(cap->running_task == myTask());
337 // NO: assertion is violated by performPendingThrowTos()
338 bd = cap->mut_lists[gen];
339 if (bd->free >= bd->start + BLOCK_SIZE_W) {
340 bdescr *new_bd;
341 new_bd = allocBlock_lock();
342 new_bd->link = bd;
343 bd = new_bd;
344 cap->mut_lists[gen] = bd;
345 }
346 *bd->free++ = (StgWord)p;
347 }
348
349 EXTERN_INLINE void
350 recordClosureMutated (Capability *cap, StgClosure *p)
351 {
352 bdescr *bd;
353 bd = Bdescr((StgPtr)p);
354 if (bd->gen_no != 0) recordMutableCap(p,cap,bd->gen_no);
355 }
356
357
358 #if defined(THREADED_RTS)
359 INLINE_HEADER rtsBool
360 emptySparkPoolCap (Capability *cap)
361 { return looksEmpty(cap->sparks); }
362
363 INLINE_HEADER nat
364 sparkPoolSizeCap (Capability *cap)
365 { return sparkPoolSize(cap->sparks); }
366
367 INLINE_HEADER void
368 discardSparksCap (Capability *cap)
369 { discardSparks(cap->sparks); }
370 #endif
371
372 INLINE_HEADER void
373 stopCapability (Capability *cap)
374 {
375 // setting HpLim to NULL tries to make the next heap check will
376 // fail, which will cause the thread to return to the scheduler.
377 // It may not work - the thread might be updating HpLim itself
378 // at the same time - so we also have the context_switch/interrupted
379 // flags as a sticky way to tell the thread to stop.
380 cap->r.rHpLim = NULL;
381 }
382
383 INLINE_HEADER void
384 interruptCapability (Capability *cap)
385 {
386 stopCapability(cap);
387 cap->interrupt = 1;
388 }
389
390 INLINE_HEADER void
391 contextSwitchCapability (Capability *cap)
392 {
393 stopCapability(cap);
394 cap->context_switch = 1;
395 }
396
397 #ifdef THREADED_RTS
398
399 INLINE_HEADER rtsBool emptyInbox(Capability *cap)
400 {
401 return (cap->inbox == (Message*)END_TSO_QUEUE);
402 }
403
404 #endif
405
406 #include "EndPrivate.h"
407
408 #endif /* CAPABILITY_H */