Merge branch 'master' of darcs.haskell.org:/srv/darcs//ghc
[ghc.git] / rts / Capability.h
1 /* ---------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 2001-2006
4 *
5 * Capabilities
6 *
7 * For details on the high-level design, see
8 * http://hackage.haskell.org/trac/ghc/wiki/Commentary/Rts/Scheduler
9 *
10 * A Capability holds all the state an OS thread/task needs to run
11 * Haskell code: its STG registers, a pointer to its TSO, a nursery
12 * etc. During STG execution, a pointer to the Capabilitity is kept in
13 * a register (BaseReg).
14 *
15 * Only in a THREADED_RTS build will there be multiple capabilities,
16 * in the non-threaded RTS there is one global capability, called
17 * MainCapability.
18 *
19 * --------------------------------------------------------------------------*/
20
21 #ifndef CAPABILITY_H
22 #define CAPABILITY_H
23
24 #include "sm/GC.h" // for evac_fn
25 #include "Task.h"
26 #include "Sparks.h"
27
28 #include "BeginPrivate.h"
29
30 struct Capability_ {
31 // State required by the STG virtual machine when running Haskell
32 // code. During STG execution, the BaseReg register always points
33 // to the StgRegTable of the current Capability (&cap->r).
34 StgFunTable f;
35 StgRegTable r;
36
37 nat no; // capability number.
38
39 // The Task currently holding this Capability. This task has
40 // exclusive access to the contents of this Capability (apart from
41 // returning_tasks_hd/returning_tasks_tl).
42 // Locks required: cap->lock.
43 Task *running_task;
44
45 // true if this Capability is running Haskell code, used for
46 // catching unsafe call-ins.
47 rtsBool in_haskell;
48
49 // Has there been any activity on this Capability since the last GC?
50 nat idle;
51
52 rtsBool disabled;
53
54 // The run queue. The Task owning this Capability has exclusive
55 // access to its run queue, so can wake up threads without
56 // taking a lock, and the common path through the scheduler is
57 // also lock-free.
58 StgTSO *run_queue_hd;
59 StgTSO *run_queue_tl;
60
61 // Tasks currently making safe foreign calls. Doubly-linked.
62 // When returning, a task first acquires the Capability before
63 // removing itself from this list, so that the GC can find all
64 // the suspended TSOs easily. Hence, when migrating a Task from
65 // the returning_tasks list, we must also migrate its entry from
66 // this list.
67 InCall *suspended_ccalls;
68
69 // One mutable list per generation, so we don't need to take any
70 // locks when updating an old-generation thunk. This also lets us
71 // keep track of which closures this CPU has been mutating, so we
72 // can traverse them using the right thread during GC and avoid
73 // unnecessarily moving the data from one cache to another.
74 bdescr **mut_lists;
75 bdescr **saved_mut_lists; // tmp use during GC
76
77 // block for allocating pinned objects into
78 bdescr *pinned_object_block;
79 // full pinned object blocks allocated since the last GC
80 bdescr *pinned_object_blocks;
81
82 // Context switch flag. When non-zero, this means: stop running
83 // Haskell code, and switch threads.
84 int context_switch;
85
86 // Interrupt flag. Like the context_switch flag, this also
87 // indicates that we should stop running Haskell code, but we do
88 // *not* switch threads. This is used to stop a Capability in
89 // order to do GC, for example.
90 //
91 // The interrupt flag is always reset before we start running
92 // Haskell code, unlike the context_switch flag which is only
93 // reset after we have executed the context switch.
94 int interrupt;
95
96 #if defined(THREADED_RTS)
97 // Worker Tasks waiting in the wings. Singly-linked.
98 Task *spare_workers;
99 nat n_spare_workers; // count of above
100
101 // This lock protects:
102 // running_task
103 // returning_tasks_{hd,tl}
104 // wakeup_queue
105 // inbox
106 Mutex lock;
107
108 // Tasks waiting to return from a foreign call, or waiting to make
109 // a new call-in using this Capability (NULL if empty).
110 // NB. this field needs to be modified by tasks other than the
111 // running_task, so it requires cap->lock to modify. A task can
112 // check whether it is NULL without taking the lock, however.
113 Task *returning_tasks_hd; // Singly-linked, with head/tail
114 Task *returning_tasks_tl;
115
116 // Messages, or END_TSO_QUEUE.
117 // Locks required: cap->lock
118 Message *inbox;
119
120 SparkPool *sparks;
121
122 // Stats on spark creation/conversion
123 SparkCounters spark_stats;
124 #endif
125 // Total words allocated by this cap since rts start
126 W_ total_allocated;
127
128 // Per-capability STM-related data
129 StgTVarWatchQueue *free_tvar_watch_queues;
130 StgInvariantCheckQueue *free_invariant_check_queues;
131 StgTRecChunk *free_trec_chunks;
132 StgTRecHeader *free_trec_headers;
133 nat transaction_tokens;
134 } // typedef Capability is defined in RtsAPI.h
135 // Capabilities are stored in an array, so make sure that adjacent
136 // Capabilities don't share any cache-lines:
137 #ifndef mingw32_HOST_OS
138 ATTRIBUTE_ALIGNED(64)
139 #endif
140 ;
141
142
143 #if defined(THREADED_RTS)
144 #define ASSERT_TASK_ID(task) ASSERT(task->id == osThreadId())
145 #else
146 #define ASSERT_TASK_ID(task) /*empty*/
147 #endif
148
149 // These properties should be true when a Task is holding a Capability
150 #define ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task) \
151 ASSERT(cap->running_task != NULL && cap->running_task == task); \
152 ASSERT(task->cap == cap); \
153 ASSERT_PARTIAL_CAPABILITY_INVARIANTS(cap,task)
154
155 // Sometimes a Task holds a Capability, but the Task is not associated
156 // with that Capability (ie. task->cap != cap). This happens when
157 // (a) a Task holds multiple Capabilities, and (b) when the current
158 // Task is bound, its thread has just blocked, and it may have been
159 // moved to another Capability.
160 #define ASSERT_PARTIAL_CAPABILITY_INVARIANTS(cap,task) \
161 ASSERT(cap->run_queue_hd == END_TSO_QUEUE ? \
162 cap->run_queue_tl == END_TSO_QUEUE : 1); \
163 ASSERT(myTask() == task); \
164 ASSERT_TASK_ID(task);
165
166 #if defined(THREADED_RTS)
167 rtsBool checkSparkCountInvariant (void);
168 #endif
169
170 // Converts a *StgRegTable into a *Capability.
171 //
172 INLINE_HEADER Capability *
173 regTableToCapability (StgRegTable *reg)
174 {
175 return (Capability *)((void *)((unsigned char*)reg - STG_FIELD_OFFSET(Capability,r)));
176 }
177
178 // Initialise the available capabilities.
179 //
180 void initCapabilities (void);
181
182 // Add and initialise more Capabilities
183 //
184 Capability * moreCapabilities (nat from, nat to);
185
186 // Release a capability. This is called by a Task that is exiting
187 // Haskell to make a foreign call, or in various other cases when we
188 // want to relinquish a Capability that we currently hold.
189 //
190 // ASSUMES: cap->running_task is the current Task.
191 //
192 #if defined(THREADED_RTS)
193 void releaseCapability (Capability* cap);
194 void releaseAndWakeupCapability (Capability* cap);
195 void releaseCapability_ (Capability* cap, rtsBool always_wakeup);
196 // assumes cap->lock is held
197 #else
198 // releaseCapability() is empty in non-threaded RTS
199 INLINE_HEADER void releaseCapability (Capability* cap STG_UNUSED) {};
200 INLINE_HEADER void releaseAndWakeupCapability (Capability* cap STG_UNUSED) {};
201 INLINE_HEADER void releaseCapability_ (Capability* cap STG_UNUSED,
202 rtsBool always_wakeup STG_UNUSED) {};
203 #endif
204
205 // declared in includes/rts/Threads.h:
206 // extern Capability MainCapability;
207
208 // declared in includes/rts/Threads.h:
209 // extern nat n_capabilities;
210 // extern nat enabled_capabilities;
211
212 // Array of all the capabilities
213 //
214 extern Capability *capabilities;
215
216 // The Capability that was last free. Used as a good guess for where
217 // to assign new threads.
218 //
219 extern Capability *last_free_capability;
220
221 //
222 // Indicates that the RTS wants to synchronise all the Capabilities
223 // for some reason. All Capabilities should stop and return to the
224 // scheduler.
225 //
226 #define SYNC_GC_SEQ 1
227 #define SYNC_GC_PAR 2
228 #define SYNC_OTHER 3
229 extern volatile StgWord pending_sync;
230
231 // Acquires a capability at a return point. If *cap is non-NULL, then
232 // this is taken as a preference for the Capability we wish to
233 // acquire.
234 //
235 // OS threads waiting in this function get priority over those waiting
236 // in waitForCapability().
237 //
238 // On return, *cap is non-NULL, and points to the Capability acquired.
239 //
240 void waitForReturnCapability (Capability **cap/*in/out*/, Task *task);
241
242 EXTERN_INLINE void recordMutableCap (StgClosure *p, Capability *cap, nat gen);
243
244 EXTERN_INLINE void recordClosureMutated (Capability *cap, StgClosure *p);
245
246 #if defined(THREADED_RTS)
247
248 // Gives up the current capability IFF there is a higher-priority
249 // thread waiting for it. This happens in one of two ways:
250 //
251 // (a) we are passing the capability to another OS thread, so
252 // that it can run a bound Haskell thread, or
253 //
254 // (b) there is an OS thread waiting to return from a foreign call
255 //
256 // On return: *pCap is NULL if the capability was released. The
257 // current task should then re-acquire it using waitForCapability().
258 //
259 rtsBool yieldCapability (Capability** pCap, Task *task, rtsBool gcAllowed);
260
261 // Acquires a capability for doing some work.
262 //
263 // On return: pCap points to the capability.
264 //
265 void waitForCapability (Task *task, Mutex *mutex, Capability **pCap);
266
267 // Wakes up a worker thread on just one Capability, used when we
268 // need to service some global event.
269 //
270 void prodOneCapability (void);
271 void prodCapability (Capability *cap, Task *task);
272
273 // Similar to prodOneCapability(), but prods all of them.
274 //
275 void prodAllCapabilities (void);
276
277 // Attempt to gain control of a Capability if it is free.
278 //
279 rtsBool tryGrabCapability (Capability *cap, Task *task);
280
281 // Try to find a spark to run
282 //
283 StgClosure *findSpark (Capability *cap);
284
285 // True if any capabilities have sparks
286 //
287 rtsBool anySparks (void);
288
289 INLINE_HEADER rtsBool emptySparkPoolCap (Capability *cap);
290 INLINE_HEADER nat sparkPoolSizeCap (Capability *cap);
291 INLINE_HEADER void discardSparksCap (Capability *cap);
292
293 #else // !THREADED_RTS
294
295 // Grab a capability. (Only in the non-threaded RTS; in the threaded
296 // RTS one of the waitFor*Capability() functions must be used).
297 //
298 extern void grabCapability (Capability **pCap);
299
300 #endif /* !THREADED_RTS */
301
302 // Waits for a capability to drain of runnable threads and workers,
303 // and then acquires it. Used at shutdown time.
304 //
305 void shutdownCapability (Capability *cap, Task *task, rtsBool wait_foreign);
306
307 // Shut down all capabilities.
308 //
309 void shutdownCapabilities(Task *task, rtsBool wait_foreign);
310
311 // cause all capabilities to context switch as soon as possible.
312 void contextSwitchAllCapabilities(void);
313 INLINE_HEADER void contextSwitchCapability(Capability *cap);
314
315 // cause all capabilities to stop running Haskell code and return to
316 // the scheduler as soon as possible.
317 void interruptAllCapabilities(void);
318 INLINE_HEADER void interruptCapability(Capability *cap);
319
320 // Free all capabilities
321 void freeCapabilities (void);
322
323 // For the GC:
324 void markCapability (evac_fn evac, void *user, Capability *cap,
325 rtsBool no_mark_sparks USED_IF_THREADS);
326
327 void markCapabilities (evac_fn evac, void *user);
328
329 void traverseSparkQueues (evac_fn evac, void *user);
330
331 /* -----------------------------------------------------------------------------
332 Messages
333 -------------------------------------------------------------------------- */
334
335 #ifdef THREADED_RTS
336
337 INLINE_HEADER rtsBool emptyInbox(Capability *cap);
338
339 #endif // THREADED_RTS
340
341 /* -----------------------------------------------------------------------------
342 * INLINE functions... private below here
343 * -------------------------------------------------------------------------- */
344
345 EXTERN_INLINE void
346 recordMutableCap (StgClosure *p, Capability *cap, nat gen)
347 {
348 bdescr *bd;
349
350 // We must own this Capability in order to modify its mutable list.
351 // ASSERT(cap->running_task == myTask());
352 // NO: assertion is violated by performPendingThrowTos()
353 bd = cap->mut_lists[gen];
354 if (bd->free >= bd->start + BLOCK_SIZE_W) {
355 bdescr *new_bd;
356 new_bd = allocBlock_lock();
357 new_bd->link = bd;
358 bd = new_bd;
359 cap->mut_lists[gen] = bd;
360 }
361 *bd->free++ = (StgWord)p;
362 }
363
364 EXTERN_INLINE void
365 recordClosureMutated (Capability *cap, StgClosure *p)
366 {
367 bdescr *bd;
368 bd = Bdescr((StgPtr)p);
369 if (bd->gen_no != 0) recordMutableCap(p,cap,bd->gen_no);
370 }
371
372
373 #if defined(THREADED_RTS)
374 INLINE_HEADER rtsBool
375 emptySparkPoolCap (Capability *cap)
376 { return looksEmpty(cap->sparks); }
377
378 INLINE_HEADER nat
379 sparkPoolSizeCap (Capability *cap)
380 { return sparkPoolSize(cap->sparks); }
381
382 INLINE_HEADER void
383 discardSparksCap (Capability *cap)
384 { discardSparks(cap->sparks); }
385 #endif
386
387 INLINE_HEADER void
388 stopCapability (Capability *cap)
389 {
390 // setting HpLim to NULL tries to make the next heap check will
391 // fail, which will cause the thread to return to the scheduler.
392 // It may not work - the thread might be updating HpLim itself
393 // at the same time - so we also have the context_switch/interrupted
394 // flags as a sticky way to tell the thread to stop.
395 cap->r.rHpLim = NULL;
396 }
397
398 INLINE_HEADER void
399 interruptCapability (Capability *cap)
400 {
401 stopCapability(cap);
402 cap->interrupt = 1;
403 }
404
405 INLINE_HEADER void
406 contextSwitchCapability (Capability *cap)
407 {
408 stopCapability(cap);
409 cap->context_switch = 1;
410 }
411
412 #ifdef THREADED_RTS
413
414 INLINE_HEADER rtsBool emptyInbox(Capability *cap)
415 {
416 return (cap->inbox == (Message*)END_TSO_QUEUE);
417 }
418
419 #endif
420
421 #include "EndPrivate.h"
422
423 #endif /* CAPABILITY_H */