1 /* ---------------------------------------------------------------------------
3 * (c) The GHC Team, 2003-2006
7 * A Capability represent the token required to execute STG code,
8 * and all the state an OS thread/task needs to run Haskell code:
9 * its STG registers, a pointer to its TSO, a nursery etc. During
10 * STG execution, a pointer to the capabilitity is kept in a
11 * register (BaseReg; actually it is a pointer to cap->r).
13 * Only in an THREADED_RTS build will there be multiple capabilities,
14 * for non-threaded builds there is only one global capability, namely
17 * --------------------------------------------------------------------------*/
19 #include "PosixSource.h"
24 #include "OSThreads.h"
25 #include "Capability.h"
31 // one global capability, this is the Capability for non-threaded
32 // builds, and for +RTS -N1
33 Capability MainCapability
;
36 Capability
*capabilities
= NULL
;
38 // Holds the Capability which last became free. This is used so that
39 // an in-call has a chance of quickly finding a free Capability.
40 // Maintaining a global free list of Capabilities would require global
41 // locking, so we don't do that.
42 Capability
*last_free_capability
;
44 /* GC indicator, in scope for the scheduler, init'ed to false */
45 volatile StgWord waiting_for_gc
= 0;
47 #if defined(THREADED_RTS)
51 return blackholes_need_checking
52 || sched_state
>= SCHED_INTERRUPTING
57 #if defined(THREADED_RTS)
59 findSpark (Capability
*cap
)
66 if (!emptyRunQueue(cap
)) {
67 // If there are other threads, don't try to run any new
68 // sparks: sparks might be speculative, we don't want to take
69 // resources away from the main computation.
73 // first try to get a spark from our own pool.
74 // We should be using reclaimSpark(), because it works without
75 // needing any atomic instructions:
76 // spark = reclaimSpark(cap->sparks);
77 // However, measurements show that this makes at least one benchmark
78 // slower (prsa) and doesn't affect the others.
79 spark
= tryStealSpark(cap
);
81 cap
->sparks_converted
++;
85 if (n_capabilities
== 1) { return NULL
; } // makes no sense...
87 debugTrace(DEBUG_sched
,
88 "cap %d: Trying to steal work from other capabilities",
94 /* visit cap.s 0..n-1 in sequence until a theft succeeds. We could
95 start at a random place instead of 0 as well. */
96 for ( i
=0 ; i
< n_capabilities
; i
++ ) {
97 robbed
= &capabilities
[i
];
98 if (cap
== robbed
) // ourselves...
101 if (emptySparkPoolCap(robbed
)) // nothing to steal here
104 spark
= tryStealSpark(robbed
);
105 if (spark
== NULL
&& !emptySparkPoolCap(robbed
)) {
106 // we conflicted with another thread while trying to steal;
112 debugTrace(DEBUG_sched
,
113 "cap %d: Stole a spark from capability %d",
114 cap
->no
, robbed
->no
);
115 cap
->sparks_converted
++;
118 // otherwise: no success, try next one
122 debugTrace(DEBUG_sched
, "No sparks stolen");
126 // Returns True if any spark pool is non-empty at this moment in time
127 // The result is only valid for an instant, of course, so in a sense
128 // is immediately invalid, and should not be relied upon for
135 for (i
=0; i
< n_capabilities
; i
++) {
136 if (!emptySparkPoolCap(&capabilities
[i
])) {
144 /* -----------------------------------------------------------------------------
145 * Manage the returning_tasks lists.
147 * These functions require cap->lock
148 * -------------------------------------------------------------------------- */
150 #if defined(THREADED_RTS)
152 newReturningTask (Capability
*cap
, Task
*task
)
154 ASSERT_LOCK_HELD(&cap
->lock
);
155 ASSERT(task
->return_link
== NULL
);
156 if (cap
->returning_tasks_hd
) {
157 ASSERT(cap
->returning_tasks_tl
->return_link
== NULL
);
158 cap
->returning_tasks_tl
->return_link
= task
;
160 cap
->returning_tasks_hd
= task
;
162 cap
->returning_tasks_tl
= task
;
166 popReturningTask (Capability
*cap
)
168 ASSERT_LOCK_HELD(&cap
->lock
);
170 task
= cap
->returning_tasks_hd
;
172 cap
->returning_tasks_hd
= task
->return_link
;
173 if (!cap
->returning_tasks_hd
) {
174 cap
->returning_tasks_tl
= NULL
;
176 task
->return_link
= NULL
;
181 /* ----------------------------------------------------------------------------
184 * The Capability is initially marked not free.
185 * ------------------------------------------------------------------------- */
188 initCapability( Capability
*cap
, nat i
)
193 cap
->in_haskell
= rtsFalse
;
194 cap
->in_gc
= rtsFalse
;
196 cap
->run_queue_hd
= END_TSO_QUEUE
;
197 cap
->run_queue_tl
= END_TSO_QUEUE
;
199 #if defined(THREADED_RTS)
200 initMutex(&cap
->lock
);
201 cap
->running_task
= NULL
; // indicates cap is free
202 cap
->spare_workers
= NULL
;
203 cap
->suspended_ccalling_tasks
= NULL
;
204 cap
->returning_tasks_hd
= NULL
;
205 cap
->returning_tasks_tl
= NULL
;
206 cap
->wakeup_queue_hd
= END_TSO_QUEUE
;
207 cap
->wakeup_queue_tl
= END_TSO_QUEUE
;
208 cap
->sparks_created
= 0;
209 cap
->sparks_converted
= 0;
210 cap
->sparks_pruned
= 0;
213 cap
->f
.stgEagerBlackholeInfo
= (W_
)&__stg_EAGER_BLACKHOLE_info
;
214 cap
->f
.stgGCEnter1
= (F_
)__stg_gc_enter_1
;
215 cap
->f
.stgGCFun
= (F_
)__stg_gc_fun
;
217 cap
->mut_lists
= stgMallocBytes(sizeof(bdescr
*) *
218 RtsFlags
.GcFlags
.generations
,
221 for (g
= 0; g
< RtsFlags
.GcFlags
.generations
; g
++) {
222 cap
->mut_lists
[g
] = NULL
;
225 cap
->free_tvar_watch_queues
= END_STM_WATCH_QUEUE
;
226 cap
->free_invariant_check_queues
= END_INVARIANT_CHECK_QUEUE
;
227 cap
->free_trec_chunks
= END_STM_CHUNK_LIST
;
228 cap
->free_trec_headers
= NO_TREC
;
229 cap
->transaction_tokens
= 0;
230 cap
->context_switch
= 0;
233 /* ---------------------------------------------------------------------------
234 * Function: initCapabilities()
236 * Purpose: set up the Capability handling. For the THREADED_RTS build,
237 * we keep a table of them, the size of which is
238 * controlled by the user via the RTS flag -N.
240 * ------------------------------------------------------------------------- */
242 initCapabilities( void )
244 #if defined(THREADED_RTS)
248 // We can't support multiple CPUs if BaseReg is not a register
249 if (RtsFlags
.ParFlags
.nNodes
> 1) {
250 errorBelch("warning: multiple CPUs not supported in this build, reverting to 1");
251 RtsFlags
.ParFlags
.nNodes
= 1;
255 n_capabilities
= RtsFlags
.ParFlags
.nNodes
;
257 if (n_capabilities
== 1) {
258 capabilities
= &MainCapability
;
259 // THREADED_RTS must work on builds that don't have a mutable
260 // BaseReg (eg. unregisterised), so in this case
261 // capabilities[0] must coincide with &MainCapability.
263 capabilities
= stgMallocBytes(n_capabilities
* sizeof(Capability
),
267 for (i
= 0; i
< n_capabilities
; i
++) {
268 initCapability(&capabilities
[i
], i
);
271 debugTrace(DEBUG_sched
, "allocated %d capabilities", n_capabilities
);
273 #else /* !THREADED_RTS */
276 capabilities
= &MainCapability
;
277 initCapability(&MainCapability
, 0);
281 // There are no free capabilities to begin with. We will start
282 // a worker Task to each Capability, which will quickly put the
283 // Capability on the free list when it finds nothing to do.
284 last_free_capability
= &capabilities
[0];
287 /* ----------------------------------------------------------------------------
288 * setContextSwitches: cause all capabilities to context switch as
290 * ------------------------------------------------------------------------- */
292 void setContextSwitches(void)
295 for (i
=0; i
< n_capabilities
; i
++) {
296 capabilities
[i
].context_switch
= 1;
300 /* ----------------------------------------------------------------------------
301 * Give a Capability to a Task. The task must currently be sleeping
302 * on its condition variable.
304 * Requires cap->lock (modifies cap->running_task).
306 * When migrating a Task, the migrater must take task->lock before
307 * modifying task->cap, to synchronise with the waking up Task.
308 * Additionally, the migrater should own the Capability (when
309 * migrating the run queue), or cap->lock (when migrating
310 * returning_workers).
312 * ------------------------------------------------------------------------- */
314 #if defined(THREADED_RTS)
316 giveCapabilityToTask (Capability
*cap USED_IF_DEBUG
, Task
*task
)
318 ASSERT_LOCK_HELD(&cap
->lock
);
319 ASSERT(task
->cap
== cap
);
320 trace(TRACE_sched
| DEBUG_sched
,
321 "passing capability %d to %s %p",
322 cap
->no
, task
->tso ?
"bound task" : "worker",
324 ACQUIRE_LOCK(&task
->lock
);
325 task
->wakeup
= rtsTrue
;
326 // the wakeup flag is needed because signalCondition() doesn't
327 // flag the condition if the thread is already runniing, but we want
329 signalCondition(&task
->cond
);
330 RELEASE_LOCK(&task
->lock
);
334 /* ----------------------------------------------------------------------------
335 * Function: releaseCapability(Capability*)
337 * Purpose: Letting go of a capability. Causes a
338 * 'returning worker' thread or a 'waiting worker'
339 * to wake up, in that order.
340 * ------------------------------------------------------------------------- */
342 #if defined(THREADED_RTS)
344 releaseCapability_ (Capability
* cap
,
345 rtsBool always_wakeup
)
349 task
= cap
->running_task
;
351 ASSERT_PARTIAL_CAPABILITY_INVARIANTS(cap
,task
);
353 cap
->running_task
= NULL
;
355 // Check to see whether a worker thread can be given
356 // the go-ahead to return the result of an external call..
357 if (cap
->returning_tasks_hd
!= NULL
) {
358 giveCapabilityToTask(cap
,cap
->returning_tasks_hd
);
359 // The Task pops itself from the queue (see waitForReturnCapability())
363 if (waiting_for_gc
== PENDING_GC_SEQ
) {
364 last_free_capability
= cap
; // needed?
365 trace(TRACE_sched
| DEBUG_sched
,
366 "GC pending, set capability %d free", cap
->no
);
371 // If the next thread on the run queue is a bound thread,
372 // give this Capability to the appropriate Task.
373 if (!emptyRunQueue(cap
) && cap
->run_queue_hd
->bound
) {
374 // Make sure we're not about to try to wake ourselves up
375 ASSERT(task
!= cap
->run_queue_hd
->bound
);
376 task
= cap
->run_queue_hd
->bound
;
377 giveCapabilityToTask(cap
,task
);
381 if (!cap
->spare_workers
) {
382 // Create a worker thread if we don't have one. If the system
383 // is interrupted, we only create a worker task if there
384 // are threads that need to be completed. If the system is
385 // shutting down, we never create a new worker.
386 if (sched_state
< SCHED_SHUTTING_DOWN
|| !emptyRunQueue(cap
)) {
387 debugTrace(DEBUG_sched
,
388 "starting new worker on capability %d", cap
->no
);
389 startWorkerTask(cap
, workerStart
);
394 // If we have an unbound thread on the run queue, or if there's
395 // anything else to do, give the Capability to a worker thread.
397 !emptyRunQueue(cap
) || !emptyWakeupQueue(cap
) ||
398 !emptySparkPoolCap(cap
) || globalWorkToDo()) {
399 if (cap
->spare_workers
) {
400 giveCapabilityToTask(cap
,cap
->spare_workers
);
401 // The worker Task pops itself from the queue;
406 last_free_capability
= cap
;
407 trace(TRACE_sched
| DEBUG_sched
, "freeing capability %d", cap
->no
);
411 releaseCapability (Capability
* cap USED_IF_THREADS
)
413 ACQUIRE_LOCK(&cap
->lock
);
414 releaseCapability_(cap
, rtsFalse
);
415 RELEASE_LOCK(&cap
->lock
);
419 releaseAndWakeupCapability (Capability
* cap USED_IF_THREADS
)
421 ACQUIRE_LOCK(&cap
->lock
);
422 releaseCapability_(cap
, rtsTrue
);
423 RELEASE_LOCK(&cap
->lock
);
427 releaseCapabilityAndQueueWorker (Capability
* cap USED_IF_THREADS
)
431 ACQUIRE_LOCK(&cap
->lock
);
433 task
= cap
->running_task
;
435 // If the current task is a worker, save it on the spare_workers
436 // list of this Capability. A worker can mark itself as stopped,
437 // in which case it is not replaced on the spare_worker queue.
438 // This happens when the system is shutting down (see
439 // Schedule.c:workerStart()).
440 // Also, be careful to check that this task hasn't just exited
441 // Haskell to do a foreign call (task->suspended_tso).
442 if (!isBoundTask(task
) && !task
->stopped
&& !task
->suspended_tso
) {
443 task
->next
= cap
->spare_workers
;
444 cap
->spare_workers
= task
;
446 // Bound tasks just float around attached to their TSOs.
448 releaseCapability_(cap
,rtsFalse
);
450 RELEASE_LOCK(&cap
->lock
);
454 /* ----------------------------------------------------------------------------
455 * waitForReturnCapability( Task *task )
457 * Purpose: when an OS thread returns from an external call,
458 * it calls waitForReturnCapability() (via Schedule.resumeThread())
459 * to wait for permission to enter the RTS & communicate the
460 * result of the external call back to the Haskell thread that
463 * ------------------------------------------------------------------------- */
465 waitForReturnCapability (Capability
**pCap
, Task
*task
)
467 #if !defined(THREADED_RTS)
469 MainCapability
.running_task
= task
;
470 task
->cap
= &MainCapability
;
471 *pCap
= &MainCapability
;
474 Capability
*cap
= *pCap
;
477 // Try last_free_capability first
478 cap
= last_free_capability
;
479 if (!cap
->running_task
) {
481 // otherwise, search for a free capability
482 for (i
= 0; i
< n_capabilities
; i
++) {
483 cap
= &capabilities
[i
];
484 if (!cap
->running_task
) {
488 // Can't find a free one, use last_free_capability.
489 cap
= last_free_capability
;
492 // record the Capability as the one this Task is now assocated with.
496 ASSERT(task
->cap
== cap
);
499 ACQUIRE_LOCK(&cap
->lock
);
501 debugTrace(DEBUG_sched
, "returning; I want capability %d", cap
->no
);
503 if (!cap
->running_task
) {
504 // It's free; just grab it
505 cap
->running_task
= task
;
506 RELEASE_LOCK(&cap
->lock
);
508 newReturningTask(cap
,task
);
509 RELEASE_LOCK(&cap
->lock
);
512 ACQUIRE_LOCK(&task
->lock
);
513 // task->lock held, cap->lock not held
514 if (!task
->wakeup
) waitCondition(&task
->cond
, &task
->lock
);
516 task
->wakeup
= rtsFalse
;
517 RELEASE_LOCK(&task
->lock
);
519 // now check whether we should wake up...
520 ACQUIRE_LOCK(&cap
->lock
);
521 if (cap
->running_task
== NULL
) {
522 if (cap
->returning_tasks_hd
!= task
) {
523 giveCapabilityToTask(cap
,cap
->returning_tasks_hd
);
524 RELEASE_LOCK(&cap
->lock
);
527 cap
->running_task
= task
;
528 popReturningTask(cap
);
529 RELEASE_LOCK(&cap
->lock
);
532 RELEASE_LOCK(&cap
->lock
);
537 ASSERT_FULL_CAPABILITY_INVARIANTS(cap
,task
);
539 trace(TRACE_sched
| DEBUG_sched
, "resuming capability %d", cap
->no
);
545 #if defined(THREADED_RTS)
546 /* ----------------------------------------------------------------------------
548 * ------------------------------------------------------------------------- */
551 yieldCapability (Capability
** pCap
, Task
*task
)
553 Capability
*cap
= *pCap
;
555 if (waiting_for_gc
== PENDING_GC_PAR
) {
556 debugTrace(DEBUG_sched
, "capability %d: becoming a GC thread", cap
->no
);
561 debugTrace(DEBUG_sched
, "giving up capability %d", cap
->no
);
563 // We must now release the capability and wait to be woken up
565 task
->wakeup
= rtsFalse
;
566 releaseCapabilityAndQueueWorker(cap
);
569 ACQUIRE_LOCK(&task
->lock
);
570 // task->lock held, cap->lock not held
571 if (!task
->wakeup
) waitCondition(&task
->cond
, &task
->lock
);
573 task
->wakeup
= rtsFalse
;
574 RELEASE_LOCK(&task
->lock
);
576 debugTrace(DEBUG_sched
, "woken up on capability %d", cap
->no
);
578 ACQUIRE_LOCK(&cap
->lock
);
579 if (cap
->running_task
!= NULL
) {
580 debugTrace(DEBUG_sched
,
581 "capability %d is owned by another task", cap
->no
);
582 RELEASE_LOCK(&cap
->lock
);
586 if (task
->tso
== NULL
) {
587 ASSERT(cap
->spare_workers
!= NULL
);
588 // if we're not at the front of the queue, release it
589 // again. This is unlikely to happen.
590 if (cap
->spare_workers
!= task
) {
591 giveCapabilityToTask(cap
,cap
->spare_workers
);
592 RELEASE_LOCK(&cap
->lock
);
595 cap
->spare_workers
= task
->next
;
598 cap
->running_task
= task
;
599 RELEASE_LOCK(&cap
->lock
);
603 trace(TRACE_sched
| DEBUG_sched
, "resuming capability %d", cap
->no
);
604 ASSERT(cap
->running_task
== task
);
608 ASSERT_FULL_CAPABILITY_INVARIANTS(cap
,task
);
613 /* ----------------------------------------------------------------------------
614 * Wake up a thread on a Capability.
616 * This is used when the current Task is running on a Capability and
617 * wishes to wake up a thread on a different Capability.
618 * ------------------------------------------------------------------------- */
621 wakeupThreadOnCapability (Capability
*my_cap
,
622 Capability
*other_cap
,
625 ACQUIRE_LOCK(&other_cap
->lock
);
627 // ASSUMES: cap->lock is held (asserted in wakeupThreadOnCapability)
629 ASSERT(tso
->bound
->cap
== tso
->cap
);
630 tso
->bound
->cap
= other_cap
;
632 tso
->cap
= other_cap
;
634 ASSERT(tso
->bound ? tso
->bound
->cap
== other_cap
: 1);
636 if (other_cap
->running_task
== NULL
) {
637 // nobody is running this Capability, we can add our thread
638 // directly onto the run queue and start up a Task to run it.
640 other_cap
->running_task
= myTask();
641 // precond for releaseCapability_() and appendToRunQueue()
643 appendToRunQueue(other_cap
,tso
);
645 trace(TRACE_sched
, "resuming capability %d", other_cap
->no
);
646 releaseCapability_(other_cap
,rtsFalse
);
648 appendToWakeupQueue(my_cap
,other_cap
,tso
);
649 other_cap
->context_switch
= 1;
650 // someone is running on this Capability, so it cannot be
651 // freed without first checking the wakeup queue (see
652 // releaseCapability_).
655 RELEASE_LOCK(&other_cap
->lock
);
658 /* ----------------------------------------------------------------------------
661 * If a Capability is currently idle, wake up a Task on it. Used to
662 * get every Capability into the GC.
663 * ------------------------------------------------------------------------- */
666 prodCapability (Capability
*cap
, Task
*task
)
668 ACQUIRE_LOCK(&cap
->lock
);
669 if (!cap
->running_task
) {
670 cap
->running_task
= task
;
671 releaseCapability_(cap
,rtsTrue
);
673 RELEASE_LOCK(&cap
->lock
);
676 /* ----------------------------------------------------------------------------
679 * At shutdown time, we want to let everything exit as cleanly as
680 * possible. For each capability, we let its run queue drain, and
681 * allow the workers to stop.
683 * This function should be called when interrupted and
684 * shutting_down_scheduler = rtsTrue, thus any worker that wakes up
685 * will exit the scheduler and call taskStop(), and any bound thread
686 * that wakes up will return to its caller. Runnable threads are
689 * ------------------------------------------------------------------------- */
692 shutdownCapability (Capability
*cap
, Task
*task
, rtsBool safe
)
698 // Loop indefinitely until all the workers have exited and there
699 // are no Haskell threads left. We used to bail out after 50
700 // iterations of this loop, but that occasionally left a worker
701 // running which caused problems later (the closeMutex() below
702 // isn't safe, for one thing).
704 for (i
= 0; /* i < 50 */; i
++) {
705 ASSERT(sched_state
== SCHED_SHUTTING_DOWN
);
707 debugTrace(DEBUG_sched
,
708 "shutting down capability %d, attempt %d", cap
->no
, i
);
709 ACQUIRE_LOCK(&cap
->lock
);
710 if (cap
->running_task
) {
711 RELEASE_LOCK(&cap
->lock
);
712 debugTrace(DEBUG_sched
, "not owner, yielding");
716 cap
->running_task
= task
;
718 if (cap
->spare_workers
) {
719 // Look for workers that have died without removing
720 // themselves from the list; this could happen if the OS
721 // summarily killed the thread, for example. This
722 // actually happens on Windows when the system is
723 // terminating the program, and the RTS is running in a
727 for (t
= cap
->spare_workers
; t
!= NULL
; t
= t
->next
) {
728 if (!osThreadIsAlive(t
->id
)) {
729 debugTrace(DEBUG_sched
,
730 "worker thread %p has died unexpectedly", (void *)t
->id
);
732 cap
->spare_workers
= t
->next
;
734 prev
->next
= t
->next
;
741 if (!emptyRunQueue(cap
) || cap
->spare_workers
) {
742 debugTrace(DEBUG_sched
,
743 "runnable threads or workers still alive, yielding");
744 releaseCapability_(cap
,rtsFalse
); // this will wake up a worker
745 RELEASE_LOCK(&cap
->lock
);
750 // If "safe", then busy-wait for any threads currently doing
751 // foreign calls. If we're about to unload this DLL, for
752 // example, we need to be sure that there are no OS threads
753 // that will try to return to code that has been unloaded.
754 // We can be a bit more relaxed when this is a standalone
755 // program that is about to terminate, and let safe=false.
756 if (cap
->suspended_ccalling_tasks
&& safe
) {
757 debugTrace(DEBUG_sched
,
758 "thread(s) are involved in foreign calls, yielding");
759 cap
->running_task
= NULL
;
760 RELEASE_LOCK(&cap
->lock
);
765 debugTrace(DEBUG_sched
, "capability %d is stopped.", cap
->no
);
766 RELEASE_LOCK(&cap
->lock
);
769 // we now have the Capability, its run queue and spare workers
770 // list are both empty.
772 // ToDo: we can't drop this mutex, because there might still be
773 // threads performing foreign calls that will eventually try to
774 // return via resumeThread() and attempt to grab cap->lock.
775 // closeMutex(&cap->lock);
778 /* ----------------------------------------------------------------------------
781 * Attempt to gain control of a Capability if it is free.
783 * ------------------------------------------------------------------------- */
786 tryGrabCapability (Capability
*cap
, Task
*task
)
788 if (cap
->running_task
!= NULL
) return rtsFalse
;
789 ACQUIRE_LOCK(&cap
->lock
);
790 if (cap
->running_task
!= NULL
) {
791 RELEASE_LOCK(&cap
->lock
);
795 cap
->running_task
= task
;
796 RELEASE_LOCK(&cap
->lock
);
801 #endif /* THREADED_RTS */
804 freeCapability (Capability
*cap
)
806 stgFree(cap
->mut_lists
);
807 #if defined(THREADED_RTS) || defined(PARALLEL_HASKELL)
808 freeSparkPool(cap
->sparks
);
813 freeCapabilities (void)
815 #if defined(THREADED_RTS)
817 for (i
=0; i
< n_capabilities
; i
++) {
818 freeCapability(&capabilities
[i
]);
821 freeCapability(&MainCapability
);
825 /* ---------------------------------------------------------------------------
826 Mark everything directly reachable from the Capabilities. When
827 using multiple GC threads, each GC thread marks all Capabilities
828 for which (c `mod` n == 0), for Capability c and thread n.
829 ------------------------------------------------------------------------ */
832 markSomeCapabilities (evac_fn evac
, void *user
, nat i0
, nat delta
,
833 rtsBool prune_sparks USED_IF_THREADS
)
839 // Each GC thread is responsible for following roots from the
840 // Capability of the same number. There will usually be the same
841 // or fewer Capabilities as GC threads, but just in case there
842 // are more, we mark every Capability whose number is the GC
843 // thread's index plus a multiple of the number of GC threads.
844 for (i
= i0
; i
< n_capabilities
; i
+= delta
) {
845 cap
= &capabilities
[i
];
846 evac(user
, (StgClosure
**)(void *)&cap
->run_queue_hd
);
847 evac(user
, (StgClosure
**)(void *)&cap
->run_queue_tl
);
848 #if defined(THREADED_RTS)
849 evac(user
, (StgClosure
**)(void *)&cap
->wakeup_queue_hd
);
850 evac(user
, (StgClosure
**)(void *)&cap
->wakeup_queue_tl
);
852 for (task
= cap
->suspended_ccalling_tasks
; task
!= NULL
;
854 debugTrace(DEBUG_sched
,
855 "evac'ing suspended TSO %lu", (unsigned long)task
->suspended_tso
->id
);
856 evac(user
, (StgClosure
**)(void *)&task
->suspended_tso
);
859 #if defined(THREADED_RTS)
861 pruneSparkQueue (evac
, user
, cap
);
863 traverseSparkQueue (evac
, user
, cap
);
868 #if !defined(THREADED_RTS)
869 evac(user
, (StgClosure
**)(void *)&blocked_queue_hd
);
870 evac(user
, (StgClosure
**)(void *)&blocked_queue_tl
);
871 evac(user
, (StgClosure
**)(void *)&sleeping_queue
);
876 markCapabilities (evac_fn evac
, void *user
)
878 markSomeCapabilities(evac
, user
, 0, 1, rtsFalse
);