1 /* ---------------------------------------------------------------------------
3 * (c) The GHC Team, 2003-2006
7 * A Capability represent the token required to execute STG code,
8 * and all the state an OS thread/task needs to run Haskell code:
9 * its STG registers, a pointer to its TSO, a nursery etc. During
10 * STG execution, a pointer to the capabilitity is kept in a
11 * register (BaseReg; actually it is a pointer to cap->r).
13 * Only in an THREADED_RTS build will there be multiple capabilities,
14 * for non-threaded builds there is only one global capability, namely
17 * --------------------------------------------------------------------------*/
19 #include "PosixSource.h"
22 #include "Capability.h"
26 #include "sm/GC.h" // for gcWorkerThread()
30 // one global capability, this is the Capability for non-threaded
31 // builds, and for +RTS -N1
32 Capability MainCapability
;
34 nat n_capabilities
= 0;
35 Capability
*capabilities
= NULL
;
37 // Holds the Capability which last became free. This is used so that
38 // an in-call has a chance of quickly finding a free Capability.
39 // Maintaining a global free list of Capabilities would require global
40 // locking, so we don't do that.
41 Capability
*last_free_capability
= NULL
;
43 /* GC indicator, in scope for the scheduler, init'ed to false */
44 volatile StgWord waiting_for_gc
= 0;
46 /* Let foreign code get the current Capability -- assuming there is one!
47 * This is useful for unsafe foreign calls because they are called with
48 * the current Capability held, but they are not passed it. For example,
49 * see see the integer-gmp package which calls allocateLocal() in its
50 * stgAllocForGMP() function (which gets called by gmp functions).
52 Capability
* rts_unsafeGetMyCapability (void)
54 #if defined(THREADED_RTS)
57 return &MainCapability
;
61 #if defined(THREADED_RTS)
65 return blackholes_need_checking
66 || sched_state
>= SCHED_INTERRUPTING
71 #if defined(THREADED_RTS)
73 findSpark (Capability
*cap
)
80 if (!emptyRunQueue(cap
)) {
81 // If there are other threads, don't try to run any new
82 // sparks: sparks might be speculative, we don't want to take
83 // resources away from the main computation.
87 // first try to get a spark from our own pool.
88 // We should be using reclaimSpark(), because it works without
89 // needing any atomic instructions:
90 // spark = reclaimSpark(cap->sparks);
91 // However, measurements show that this makes at least one benchmark
92 // slower (prsa) and doesn't affect the others.
93 spark
= tryStealSpark(cap
);
95 cap
->sparks_converted
++;
97 // Post event for running a spark from capability's own pool.
98 postEvent(cap
, EVENT_RUN_SPARK
, cap
->r
.rCurrentTSO
->id
, 0);
103 if (n_capabilities
== 1) { return NULL
; } // makes no sense...
105 debugTrace(DEBUG_sched
,
106 "cap %d: Trying to steal work from other capabilities",
112 /* visit cap.s 0..n-1 in sequence until a theft succeeds. We could
113 start at a random place instead of 0 as well. */
114 for ( i
=0 ; i
< n_capabilities
; i
++ ) {
115 robbed
= &capabilities
[i
];
116 if (cap
== robbed
) // ourselves...
119 if (emptySparkPoolCap(robbed
)) // nothing to steal here
122 spark
= tryStealSpark(robbed
);
123 if (spark
== NULL
&& !emptySparkPoolCap(robbed
)) {
124 // we conflicted with another thread while trying to steal;
130 debugTrace(DEBUG_sched
,
131 "cap %d: Stole a spark from capability %d",
132 cap
->no
, robbed
->no
);
133 cap
->sparks_converted
++;
135 postEvent(cap
, EVENT_STEAL_SPARK
,
136 cap
->r
.rCurrentTSO
->id
, robbed
->no
);
141 // otherwise: no success, try next one
145 debugTrace(DEBUG_sched
, "No sparks stolen");
149 // Returns True if any spark pool is non-empty at this moment in time
150 // The result is only valid for an instant, of course, so in a sense
151 // is immediately invalid, and should not be relied upon for
158 for (i
=0; i
< n_capabilities
; i
++) {
159 if (!emptySparkPoolCap(&capabilities
[i
])) {
167 /* -----------------------------------------------------------------------------
168 * Manage the returning_tasks lists.
170 * These functions require cap->lock
171 * -------------------------------------------------------------------------- */
173 #if defined(THREADED_RTS)
175 newReturningTask (Capability
*cap
, Task
*task
)
177 ASSERT_LOCK_HELD(&cap
->lock
);
178 ASSERT(task
->return_link
== NULL
);
179 if (cap
->returning_tasks_hd
) {
180 ASSERT(cap
->returning_tasks_tl
->return_link
== NULL
);
181 cap
->returning_tasks_tl
->return_link
= task
;
183 cap
->returning_tasks_hd
= task
;
185 cap
->returning_tasks_tl
= task
;
189 popReturningTask (Capability
*cap
)
191 ASSERT_LOCK_HELD(&cap
->lock
);
193 task
= cap
->returning_tasks_hd
;
195 cap
->returning_tasks_hd
= task
->return_link
;
196 if (!cap
->returning_tasks_hd
) {
197 cap
->returning_tasks_tl
= NULL
;
199 task
->return_link
= NULL
;
204 /* ----------------------------------------------------------------------------
207 * The Capability is initially marked not free.
208 * ------------------------------------------------------------------------- */
211 initCapability( Capability
*cap
, nat i
)
216 cap
->in_haskell
= rtsFalse
;
217 cap
->in_gc
= rtsFalse
;
219 cap
->run_queue_hd
= END_TSO_QUEUE
;
220 cap
->run_queue_tl
= END_TSO_QUEUE
;
222 #if defined(THREADED_RTS)
223 initMutex(&cap
->lock
);
224 cap
->running_task
= NULL
; // indicates cap is free
225 cap
->spare_workers
= NULL
;
226 cap
->suspended_ccalling_tasks
= NULL
;
227 cap
->returning_tasks_hd
= NULL
;
228 cap
->returning_tasks_tl
= NULL
;
229 cap
->wakeup_queue_hd
= END_TSO_QUEUE
;
230 cap
->wakeup_queue_tl
= END_TSO_QUEUE
;
231 cap
->sparks_created
= 0;
232 cap
->sparks_converted
= 0;
233 cap
->sparks_pruned
= 0;
236 cap
->f
.stgEagerBlackholeInfo
= (W_
)&__stg_EAGER_BLACKHOLE_info
;
237 cap
->f
.stgGCEnter1
= (StgFunPtr
)__stg_gc_enter_1
;
238 cap
->f
.stgGCFun
= (StgFunPtr
)__stg_gc_fun
;
240 cap
->mut_lists
= stgMallocBytes(sizeof(bdescr
*) *
241 RtsFlags
.GcFlags
.generations
,
243 cap
->saved_mut_lists
= stgMallocBytes(sizeof(bdescr
*) *
244 RtsFlags
.GcFlags
.generations
,
247 for (g
= 0; g
< RtsFlags
.GcFlags
.generations
; g
++) {
248 cap
->mut_lists
[g
] = NULL
;
251 cap
->free_tvar_watch_queues
= END_STM_WATCH_QUEUE
;
252 cap
->free_invariant_check_queues
= END_INVARIANT_CHECK_QUEUE
;
253 cap
->free_trec_chunks
= END_STM_CHUNK_LIST
;
254 cap
->free_trec_headers
= NO_TREC
;
255 cap
->transaction_tokens
= 0;
256 cap
->context_switch
= 0;
259 /* ---------------------------------------------------------------------------
260 * Function: initCapabilities()
262 * Purpose: set up the Capability handling. For the THREADED_RTS build,
263 * we keep a table of them, the size of which is
264 * controlled by the user via the RTS flag -N.
266 * ------------------------------------------------------------------------- */
268 initCapabilities( void )
270 #if defined(THREADED_RTS)
274 // We can't support multiple CPUs if BaseReg is not a register
275 if (RtsFlags
.ParFlags
.nNodes
> 1) {
276 errorBelch("warning: multiple CPUs not supported in this build, reverting to 1");
277 RtsFlags
.ParFlags
.nNodes
= 1;
281 n_capabilities
= RtsFlags
.ParFlags
.nNodes
;
283 if (n_capabilities
== 1) {
284 capabilities
= &MainCapability
;
285 // THREADED_RTS must work on builds that don't have a mutable
286 // BaseReg (eg. unregisterised), so in this case
287 // capabilities[0] must coincide with &MainCapability.
289 capabilities
= stgMallocBytes(n_capabilities
* sizeof(Capability
),
293 for (i
= 0; i
< n_capabilities
; i
++) {
294 initCapability(&capabilities
[i
], i
);
297 debugTrace(DEBUG_sched
, "allocated %d capabilities", n_capabilities
);
299 #else /* !THREADED_RTS */
302 capabilities
= &MainCapability
;
303 initCapability(&MainCapability
, 0);
307 // There are no free capabilities to begin with. We will start
308 // a worker Task to each Capability, which will quickly put the
309 // Capability on the free list when it finds nothing to do.
310 last_free_capability
= &capabilities
[0];
313 /* ----------------------------------------------------------------------------
314 * setContextSwitches: cause all capabilities to context switch as
316 * ------------------------------------------------------------------------- */
318 void setContextSwitches(void)
321 for (i
=0; i
< n_capabilities
; i
++) {
322 contextSwitchCapability(&capabilities
[i
]);
326 /* ----------------------------------------------------------------------------
327 * Give a Capability to a Task. The task must currently be sleeping
328 * on its condition variable.
330 * Requires cap->lock (modifies cap->running_task).
332 * When migrating a Task, the migrater must take task->lock before
333 * modifying task->cap, to synchronise with the waking up Task.
334 * Additionally, the migrater should own the Capability (when
335 * migrating the run queue), or cap->lock (when migrating
336 * returning_workers).
338 * ------------------------------------------------------------------------- */
340 #if defined(THREADED_RTS)
342 giveCapabilityToTask (Capability
*cap USED_IF_DEBUG
, Task
*task
)
344 ASSERT_LOCK_HELD(&cap
->lock
);
345 ASSERT(task
->cap
== cap
);
346 debugTrace(DEBUG_sched
, "passing capability %d to %s %p",
347 cap
->no
, task
->tso ?
"bound task" : "worker",
349 ACQUIRE_LOCK(&task
->lock
);
350 task
->wakeup
= rtsTrue
;
351 // the wakeup flag is needed because signalCondition() doesn't
352 // flag the condition if the thread is already runniing, but we want
354 signalCondition(&task
->cond
);
355 RELEASE_LOCK(&task
->lock
);
359 /* ----------------------------------------------------------------------------
360 * Function: releaseCapability(Capability*)
362 * Purpose: Letting go of a capability. Causes a
363 * 'returning worker' thread or a 'waiting worker'
364 * to wake up, in that order.
365 * ------------------------------------------------------------------------- */
367 #if defined(THREADED_RTS)
369 releaseCapability_ (Capability
* cap
,
370 rtsBool always_wakeup
)
374 task
= cap
->running_task
;
376 ASSERT_PARTIAL_CAPABILITY_INVARIANTS(cap
,task
);
378 cap
->running_task
= NULL
;
380 // Check to see whether a worker thread can be given
381 // the go-ahead to return the result of an external call..
382 if (cap
->returning_tasks_hd
!= NULL
) {
383 giveCapabilityToTask(cap
,cap
->returning_tasks_hd
);
384 // The Task pops itself from the queue (see waitForReturnCapability())
388 if (waiting_for_gc
== PENDING_GC_SEQ
) {
389 last_free_capability
= cap
; // needed?
390 debugTrace(DEBUG_sched
, "GC pending, set capability %d free", cap
->no
);
395 // If the next thread on the run queue is a bound thread,
396 // give this Capability to the appropriate Task.
397 if (!emptyRunQueue(cap
) && cap
->run_queue_hd
->bound
) {
398 // Make sure we're not about to try to wake ourselves up
399 ASSERT(task
!= cap
->run_queue_hd
->bound
);
400 task
= cap
->run_queue_hd
->bound
;
401 giveCapabilityToTask(cap
,task
);
405 if (!cap
->spare_workers
) {
406 // Create a worker thread if we don't have one. If the system
407 // is interrupted, we only create a worker task if there
408 // are threads that need to be completed. If the system is
409 // shutting down, we never create a new worker.
410 if (sched_state
< SCHED_SHUTTING_DOWN
|| !emptyRunQueue(cap
)) {
411 debugTrace(DEBUG_sched
,
412 "starting new worker on capability %d", cap
->no
);
413 startWorkerTask(cap
, workerStart
);
418 // If we have an unbound thread on the run queue, or if there's
419 // anything else to do, give the Capability to a worker thread.
421 !emptyRunQueue(cap
) || !emptyWakeupQueue(cap
) ||
422 !emptySparkPoolCap(cap
) || globalWorkToDo()) {
423 if (cap
->spare_workers
) {
424 giveCapabilityToTask(cap
,cap
->spare_workers
);
425 // The worker Task pops itself from the queue;
430 last_free_capability
= cap
;
431 debugTrace(DEBUG_sched
, "freeing capability %d", cap
->no
);
435 releaseCapability (Capability
* cap USED_IF_THREADS
)
437 ACQUIRE_LOCK(&cap
->lock
);
438 releaseCapability_(cap
, rtsFalse
);
439 RELEASE_LOCK(&cap
->lock
);
443 releaseAndWakeupCapability (Capability
* cap USED_IF_THREADS
)
445 ACQUIRE_LOCK(&cap
->lock
);
446 releaseCapability_(cap
, rtsTrue
);
447 RELEASE_LOCK(&cap
->lock
);
451 releaseCapabilityAndQueueWorker (Capability
* cap USED_IF_THREADS
)
455 ACQUIRE_LOCK(&cap
->lock
);
457 task
= cap
->running_task
;
459 // If the current task is a worker, save it on the spare_workers
460 // list of this Capability. A worker can mark itself as stopped,
461 // in which case it is not replaced on the spare_worker queue.
462 // This happens when the system is shutting down (see
463 // Schedule.c:workerStart()).
464 // Also, be careful to check that this task hasn't just exited
465 // Haskell to do a foreign call (task->suspended_tso).
466 if (!isBoundTask(task
) && !task
->stopped
&& !task
->suspended_tso
) {
467 task
->next
= cap
->spare_workers
;
468 cap
->spare_workers
= task
;
470 // Bound tasks just float around attached to their TSOs.
472 releaseCapability_(cap
,rtsFalse
);
474 RELEASE_LOCK(&cap
->lock
);
478 /* ----------------------------------------------------------------------------
479 * waitForReturnCapability( Task *task )
481 * Purpose: when an OS thread returns from an external call,
482 * it calls waitForReturnCapability() (via Schedule.resumeThread())
483 * to wait for permission to enter the RTS & communicate the
484 * result of the external call back to the Haskell thread that
487 * ------------------------------------------------------------------------- */
489 waitForReturnCapability (Capability
**pCap
, Task
*task
)
491 #if !defined(THREADED_RTS)
493 MainCapability
.running_task
= task
;
494 task
->cap
= &MainCapability
;
495 *pCap
= &MainCapability
;
498 Capability
*cap
= *pCap
;
501 // Try last_free_capability first
502 cap
= last_free_capability
;
503 if (cap
->running_task
) {
505 // otherwise, search for a free capability
507 for (i
= 0; i
< n_capabilities
; i
++) {
508 if (!capabilities
[i
].running_task
) {
509 cap
= &capabilities
[i
];
514 // Can't find a free one, use last_free_capability.
515 cap
= last_free_capability
;
519 // record the Capability as the one this Task is now assocated with.
523 ASSERT(task
->cap
== cap
);
526 ACQUIRE_LOCK(&cap
->lock
);
528 debugTrace(DEBUG_sched
, "returning; I want capability %d", cap
->no
);
530 if (!cap
->running_task
) {
531 // It's free; just grab it
532 cap
->running_task
= task
;
533 RELEASE_LOCK(&cap
->lock
);
535 newReturningTask(cap
,task
);
536 RELEASE_LOCK(&cap
->lock
);
539 ACQUIRE_LOCK(&task
->lock
);
540 // task->lock held, cap->lock not held
541 if (!task
->wakeup
) waitCondition(&task
->cond
, &task
->lock
);
543 task
->wakeup
= rtsFalse
;
544 RELEASE_LOCK(&task
->lock
);
546 // now check whether we should wake up...
547 ACQUIRE_LOCK(&cap
->lock
);
548 if (cap
->running_task
== NULL
) {
549 if (cap
->returning_tasks_hd
!= task
) {
550 giveCapabilityToTask(cap
,cap
->returning_tasks_hd
);
551 RELEASE_LOCK(&cap
->lock
);
554 cap
->running_task
= task
;
555 popReturningTask(cap
);
556 RELEASE_LOCK(&cap
->lock
);
559 RELEASE_LOCK(&cap
->lock
);
564 ASSERT_FULL_CAPABILITY_INVARIANTS(cap
,task
);
566 debugTrace(DEBUG_sched
, "resuming capability %d", cap
->no
);
572 #if defined(THREADED_RTS)
573 /* ----------------------------------------------------------------------------
575 * ------------------------------------------------------------------------- */
578 yieldCapability (Capability
** pCap
, Task
*task
)
580 Capability
*cap
= *pCap
;
582 if (waiting_for_gc
== PENDING_GC_PAR
) {
583 debugTrace(DEBUG_sched
, "capability %d: becoming a GC thread", cap
->no
);
584 postEvent(cap
, EVENT_GC_START
, 0, 0);
586 postEvent(cap
, EVENT_GC_END
, 0, 0);
590 debugTrace(DEBUG_sched
, "giving up capability %d", cap
->no
);
592 // We must now release the capability and wait to be woken up
594 task
->wakeup
= rtsFalse
;
595 releaseCapabilityAndQueueWorker(cap
);
598 ACQUIRE_LOCK(&task
->lock
);
599 // task->lock held, cap->lock not held
600 if (!task
->wakeup
) waitCondition(&task
->cond
, &task
->lock
);
602 task
->wakeup
= rtsFalse
;
603 RELEASE_LOCK(&task
->lock
);
605 debugTrace(DEBUG_sched
, "woken up on capability %d", cap
->no
);
607 ACQUIRE_LOCK(&cap
->lock
);
608 if (cap
->running_task
!= NULL
) {
609 debugTrace(DEBUG_sched
,
610 "capability %d is owned by another task", cap
->no
);
611 RELEASE_LOCK(&cap
->lock
);
615 if (task
->tso
== NULL
) {
616 ASSERT(cap
->spare_workers
!= NULL
);
617 // if we're not at the front of the queue, release it
618 // again. This is unlikely to happen.
619 if (cap
->spare_workers
!= task
) {
620 giveCapabilityToTask(cap
,cap
->spare_workers
);
621 RELEASE_LOCK(&cap
->lock
);
624 cap
->spare_workers
= task
->next
;
627 cap
->running_task
= task
;
628 RELEASE_LOCK(&cap
->lock
);
632 debugTrace(DEBUG_sched
, "resuming capability %d", cap
->no
);
633 ASSERT(cap
->running_task
== task
);
637 ASSERT_FULL_CAPABILITY_INVARIANTS(cap
,task
);
642 /* ----------------------------------------------------------------------------
643 * Wake up a thread on a Capability.
645 * This is used when the current Task is running on a Capability and
646 * wishes to wake up a thread on a different Capability.
647 * ------------------------------------------------------------------------- */
650 wakeupThreadOnCapability (Capability
*my_cap
,
651 Capability
*other_cap
,
654 ACQUIRE_LOCK(&other_cap
->lock
);
656 // ASSUMES: cap->lock is held (asserted in wakeupThreadOnCapability)
658 ASSERT(tso
->bound
->cap
== tso
->cap
);
659 tso
->bound
->cap
= other_cap
;
661 tso
->cap
= other_cap
;
663 ASSERT(tso
->bound ? tso
->bound
->cap
== other_cap
: 1);
665 if (other_cap
->running_task
== NULL
) {
666 // nobody is running this Capability, we can add our thread
667 // directly onto the run queue and start up a Task to run it.
669 other_cap
->running_task
= myTask();
670 // precond for releaseCapability_() and appendToRunQueue()
672 appendToRunQueue(other_cap
,tso
);
674 releaseCapability_(other_cap
,rtsFalse
);
676 appendToWakeupQueue(my_cap
,other_cap
,tso
);
677 other_cap
->context_switch
= 1;
678 // someone is running on this Capability, so it cannot be
679 // freed without first checking the wakeup queue (see
680 // releaseCapability_).
683 RELEASE_LOCK(&other_cap
->lock
);
686 /* ----------------------------------------------------------------------------
689 * If a Capability is currently idle, wake up a Task on it. Used to
690 * get every Capability into the GC.
691 * ------------------------------------------------------------------------- */
694 prodCapability (Capability
*cap
, Task
*task
)
696 ACQUIRE_LOCK(&cap
->lock
);
697 if (!cap
->running_task
) {
698 cap
->running_task
= task
;
699 releaseCapability_(cap
,rtsTrue
);
701 RELEASE_LOCK(&cap
->lock
);
704 /* ----------------------------------------------------------------------------
707 * At shutdown time, we want to let everything exit as cleanly as
708 * possible. For each capability, we let its run queue drain, and
709 * allow the workers to stop.
711 * This function should be called when interrupted and
712 * shutting_down_scheduler = rtsTrue, thus any worker that wakes up
713 * will exit the scheduler and call taskStop(), and any bound thread
714 * that wakes up will return to its caller. Runnable threads are
717 * ------------------------------------------------------------------------- */
720 shutdownCapability (Capability
*cap
, Task
*task
, rtsBool safe
)
726 // Loop indefinitely until all the workers have exited and there
727 // are no Haskell threads left. We used to bail out after 50
728 // iterations of this loop, but that occasionally left a worker
729 // running which caused problems later (the closeMutex() below
730 // isn't safe, for one thing).
732 for (i
= 0; /* i < 50 */; i
++) {
733 ASSERT(sched_state
== SCHED_SHUTTING_DOWN
);
735 debugTrace(DEBUG_sched
,
736 "shutting down capability %d, attempt %d", cap
->no
, i
);
737 ACQUIRE_LOCK(&cap
->lock
);
738 if (cap
->running_task
) {
739 RELEASE_LOCK(&cap
->lock
);
740 debugTrace(DEBUG_sched
, "not owner, yielding");
744 cap
->running_task
= task
;
746 if (cap
->spare_workers
) {
747 // Look for workers that have died without removing
748 // themselves from the list; this could happen if the OS
749 // summarily killed the thread, for example. This
750 // actually happens on Windows when the system is
751 // terminating the program, and the RTS is running in a
755 for (t
= cap
->spare_workers
; t
!= NULL
; t
= t
->next
) {
756 if (!osThreadIsAlive(t
->id
)) {
757 debugTrace(DEBUG_sched
,
758 "worker thread %p has died unexpectedly", (void *)t
->id
);
760 cap
->spare_workers
= t
->next
;
762 prev
->next
= t
->next
;
769 if (!emptyRunQueue(cap
) || cap
->spare_workers
) {
770 debugTrace(DEBUG_sched
,
771 "runnable threads or workers still alive, yielding");
772 releaseCapability_(cap
,rtsFalse
); // this will wake up a worker
773 RELEASE_LOCK(&cap
->lock
);
778 // If "safe", then busy-wait for any threads currently doing
779 // foreign calls. If we're about to unload this DLL, for
780 // example, we need to be sure that there are no OS threads
781 // that will try to return to code that has been unloaded.
782 // We can be a bit more relaxed when this is a standalone
783 // program that is about to terminate, and let safe=false.
784 if (cap
->suspended_ccalling_tasks
&& safe
) {
785 debugTrace(DEBUG_sched
,
786 "thread(s) are involved in foreign calls, yielding");
787 cap
->running_task
= NULL
;
788 RELEASE_LOCK(&cap
->lock
);
793 postEvent(cap
, EVENT_SHUTDOWN
, 0, 0);
794 debugTrace(DEBUG_sched
, "capability %d is stopped.", cap
->no
);
795 RELEASE_LOCK(&cap
->lock
);
798 // we now have the Capability, its run queue and spare workers
799 // list are both empty.
801 // ToDo: we can't drop this mutex, because there might still be
802 // threads performing foreign calls that will eventually try to
803 // return via resumeThread() and attempt to grab cap->lock.
804 // closeMutex(&cap->lock);
807 /* ----------------------------------------------------------------------------
810 * Attempt to gain control of a Capability if it is free.
812 * ------------------------------------------------------------------------- */
815 tryGrabCapability (Capability
*cap
, Task
*task
)
817 if (cap
->running_task
!= NULL
) return rtsFalse
;
818 ACQUIRE_LOCK(&cap
->lock
);
819 if (cap
->running_task
!= NULL
) {
820 RELEASE_LOCK(&cap
->lock
);
824 cap
->running_task
= task
;
825 RELEASE_LOCK(&cap
->lock
);
830 #endif /* THREADED_RTS */
833 freeCapability (Capability
*cap
)
835 stgFree(cap
->mut_lists
);
836 #if defined(THREADED_RTS)
837 freeSparkPool(cap
->sparks
);
842 freeCapabilities (void)
844 #if defined(THREADED_RTS)
846 for (i
=0; i
< n_capabilities
; i
++) {
847 freeCapability(&capabilities
[i
]);
850 freeCapability(&MainCapability
);
854 /* ---------------------------------------------------------------------------
855 Mark everything directly reachable from the Capabilities. When
856 using multiple GC threads, each GC thread marks all Capabilities
857 for which (c `mod` n == 0), for Capability c and thread n.
858 ------------------------------------------------------------------------ */
861 markSomeCapabilities (evac_fn evac
, void *user
, nat i0
, nat delta
,
862 rtsBool prune_sparks USED_IF_THREADS
)
868 // Each GC thread is responsible for following roots from the
869 // Capability of the same number. There will usually be the same
870 // or fewer Capabilities as GC threads, but just in case there
871 // are more, we mark every Capability whose number is the GC
872 // thread's index plus a multiple of the number of GC threads.
873 for (i
= i0
; i
< n_capabilities
; i
+= delta
) {
874 cap
= &capabilities
[i
];
875 evac(user
, (StgClosure
**)(void *)&cap
->run_queue_hd
);
876 evac(user
, (StgClosure
**)(void *)&cap
->run_queue_tl
);
877 #if defined(THREADED_RTS)
878 evac(user
, (StgClosure
**)(void *)&cap
->wakeup_queue_hd
);
879 evac(user
, (StgClosure
**)(void *)&cap
->wakeup_queue_tl
);
881 for (task
= cap
->suspended_ccalling_tasks
; task
!= NULL
;
883 debugTrace(DEBUG_sched
,
884 "evac'ing suspended TSO %lu", (unsigned long)task
->suspended_tso
->id
);
885 evac(user
, (StgClosure
**)(void *)&task
->suspended_tso
);
888 #if defined(THREADED_RTS)
890 pruneSparkQueue (evac
, user
, cap
);
892 traverseSparkQueue (evac
, user
, cap
);
897 #if !defined(THREADED_RTS)
898 evac(user
, (StgClosure
**)(void *)&blocked_queue_hd
);
899 evac(user
, (StgClosure
**)(void *)&blocked_queue_tl
);
900 evac(user
, (StgClosure
**)(void *)&sleeping_queue
);
905 markCapabilities (evac_fn evac
, void *user
)
907 markSomeCapabilities(evac
, user
, 0, 1, rtsFalse
);