1 /* ---------------------------------------------------------------------------
3 * (c) The GHC Team, 2003-2006
7 * A Capability represent the token required to execute STG code,
8 * and all the state an OS thread/task needs to run Haskell code:
9 * its STG registers, a pointer to its TSO, a nursery etc. During
10 * STG execution, a pointer to the capabilitity is kept in a
11 * register (BaseReg; actually it is a pointer to cap->r).
13 * Only in an THREADED_RTS build will there be multiple capabilities,
14 * for non-threaded builds there is only one global capability, namely
17 * --------------------------------------------------------------------------*/
19 #include "PosixSource.h"
24 #include "OSThreads.h"
25 #include "Capability.h"
30 // one global capability, this is the Capability for non-threaded
31 // builds, and for +RTS -N1
32 Capability MainCapability
;
35 Capability
*capabilities
= NULL
;
37 // Holds the Capability which last became free. This is used so that
38 // an in-call has a chance of quickly finding a free Capability.
39 // Maintaining a global free list of Capabilities would require global
40 // locking, so we don't do that.
41 Capability
*last_free_capability
;
43 /* GC indicator, in scope for the scheduler, init'ed to false */
44 volatile StgWord waiting_for_gc
= 0;
46 #if defined(THREADED_RTS)
50 return blackholes_need_checking
51 || sched_state
>= SCHED_INTERRUPTING
56 #if defined(THREADED_RTS)
58 anyWorkForMe( Capability
*cap
, Task
*task
)
60 if (task
->tso
!= NULL
) {
61 // A bound task only runs if its thread is on the run queue of
62 // the capability on which it was woken up. Otherwise, we
63 // can't be sure that we have the right capability: the thread
64 // might be woken up on some other capability, and task->cap
65 // could change under our feet.
66 return !emptyRunQueue(cap
) && cap
->run_queue_hd
->bound
== task
;
68 // A vanilla worker task runs if either there is a lightweight
69 // thread at the head of the run queue, or the run queue is
70 // empty and (there are sparks to execute, or there is some
71 // other global condition to check, such as threads blocked on
73 if (emptyRunQueue(cap
)) {
74 return !emptySparkPoolCap(cap
)
75 || !emptyWakeupQueue(cap
)
78 return cap
->run_queue_hd
->bound
== NULL
;
83 /* -----------------------------------------------------------------------------
84 * Manage the returning_tasks lists.
86 * These functions require cap->lock
87 * -------------------------------------------------------------------------- */
89 #if defined(THREADED_RTS)
91 newReturningTask (Capability
*cap
, Task
*task
)
93 ASSERT_LOCK_HELD(&cap
->lock
);
94 ASSERT(task
->return_link
== NULL
);
95 if (cap
->returning_tasks_hd
) {
96 ASSERT(cap
->returning_tasks_tl
->return_link
== NULL
);
97 cap
->returning_tasks_tl
->return_link
= task
;
99 cap
->returning_tasks_hd
= task
;
101 cap
->returning_tasks_tl
= task
;
105 popReturningTask (Capability
*cap
)
107 ASSERT_LOCK_HELD(&cap
->lock
);
109 task
= cap
->returning_tasks_hd
;
111 cap
->returning_tasks_hd
= task
->return_link
;
112 if (!cap
->returning_tasks_hd
) {
113 cap
->returning_tasks_tl
= NULL
;
115 task
->return_link
= NULL
;
120 /* ----------------------------------------------------------------------------
123 * The Capability is initially marked not free.
124 * ------------------------------------------------------------------------- */
127 initCapability( Capability
*cap
, nat i
)
132 cap
->in_haskell
= rtsFalse
;
134 cap
->run_queue_hd
= END_TSO_QUEUE
;
135 cap
->run_queue_tl
= END_TSO_QUEUE
;
137 #if defined(THREADED_RTS)
138 initMutex(&cap
->lock
);
139 cap
->running_task
= NULL
; // indicates cap is free
140 cap
->spare_workers
= NULL
;
141 cap
->suspended_ccalling_tasks
= NULL
;
142 cap
->returning_tasks_hd
= NULL
;
143 cap
->returning_tasks_tl
= NULL
;
144 cap
->wakeup_queue_hd
= END_TSO_QUEUE
;
145 cap
->wakeup_queue_tl
= END_TSO_QUEUE
;
148 cap
->f
.stgGCEnter1
= (F_
)__stg_gc_enter_1
;
149 cap
->f
.stgGCFun
= (F_
)__stg_gc_fun
;
151 cap
->mut_lists
= stgMallocBytes(sizeof(bdescr
*) *
152 RtsFlags
.GcFlags
.generations
,
155 for (g
= 0; g
< RtsFlags
.GcFlags
.generations
; g
++) {
156 cap
->mut_lists
[g
] = NULL
;
159 cap
->free_tvar_watch_queues
= END_STM_WATCH_QUEUE
;
160 cap
->free_invariant_check_queues
= END_INVARIANT_CHECK_QUEUE
;
161 cap
->free_trec_chunks
= END_STM_CHUNK_LIST
;
162 cap
->free_trec_headers
= NO_TREC
;
163 cap
->transaction_tokens
= 0;
164 cap
->context_switch
= 0;
167 /* ---------------------------------------------------------------------------
168 * Function: initCapabilities()
170 * Purpose: set up the Capability handling. For the THREADED_RTS build,
171 * we keep a table of them, the size of which is
172 * controlled by the user via the RTS flag -N.
174 * ------------------------------------------------------------------------- */
176 initCapabilities( void )
178 #if defined(THREADED_RTS)
182 // We can't support multiple CPUs if BaseReg is not a register
183 if (RtsFlags
.ParFlags
.nNodes
> 1) {
184 errorBelch("warning: multiple CPUs not supported in this build, reverting to 1");
185 RtsFlags
.ParFlags
.nNodes
= 1;
189 n_capabilities
= RtsFlags
.ParFlags
.nNodes
;
191 if (n_capabilities
== 1) {
192 capabilities
= &MainCapability
;
193 // THREADED_RTS must work on builds that don't have a mutable
194 // BaseReg (eg. unregisterised), so in this case
195 // capabilities[0] must coincide with &MainCapability.
197 capabilities
= stgMallocBytes(n_capabilities
* sizeof(Capability
),
201 for (i
= 0; i
< n_capabilities
; i
++) {
202 initCapability(&capabilities
[i
], i
);
205 debugTrace(DEBUG_sched
, "allocated %d capabilities", n_capabilities
);
207 #else /* !THREADED_RTS */
210 capabilities
= &MainCapability
;
211 initCapability(&MainCapability
, 0);
215 // There are no free capabilities to begin with. We will start
216 // a worker Task to each Capability, which will quickly put the
217 // Capability on the free list when it finds nothing to do.
218 last_free_capability
= &capabilities
[0];
221 /* ----------------------------------------------------------------------------
222 * setContextSwitches: cause all capabilities to context switch as
224 * ------------------------------------------------------------------------- */
226 void setContextSwitches(void)
229 for (i
=0; i
< n_capabilities
; i
++) {
230 capabilities
[i
].context_switch
= 1;
234 /* ----------------------------------------------------------------------------
235 * Give a Capability to a Task. The task must currently be sleeping
236 * on its condition variable.
238 * Requires cap->lock (modifies cap->running_task).
240 * When migrating a Task, the migrater must take task->lock before
241 * modifying task->cap, to synchronise with the waking up Task.
242 * Additionally, the migrater should own the Capability (when
243 * migrating the run queue), or cap->lock (when migrating
244 * returning_workers).
246 * ------------------------------------------------------------------------- */
248 #if defined(THREADED_RTS)
250 giveCapabilityToTask (Capability
*cap USED_IF_DEBUG
, Task
*task
)
252 ASSERT_LOCK_HELD(&cap
->lock
);
253 ASSERT(task
->cap
== cap
);
254 trace(TRACE_sched
| DEBUG_sched
,
255 "passing capability %d to %s %p",
256 cap
->no
, task
->tso ?
"bound task" : "worker",
258 ACQUIRE_LOCK(&task
->lock
);
259 task
->wakeup
= rtsTrue
;
260 // the wakeup flag is needed because signalCondition() doesn't
261 // flag the condition if the thread is already runniing, but we want
263 signalCondition(&task
->cond
);
264 RELEASE_LOCK(&task
->lock
);
268 /* ----------------------------------------------------------------------------
269 * Function: releaseCapability(Capability*)
271 * Purpose: Letting go of a capability. Causes a
272 * 'returning worker' thread or a 'waiting worker'
273 * to wake up, in that order.
274 * ------------------------------------------------------------------------- */
276 #if defined(THREADED_RTS)
278 releaseCapability_ (Capability
* cap
)
282 task
= cap
->running_task
;
284 ASSERT_PARTIAL_CAPABILITY_INVARIANTS(cap
,task
);
286 cap
->running_task
= NULL
;
288 // Check to see whether a worker thread can be given
289 // the go-ahead to return the result of an external call..
290 if (cap
->returning_tasks_hd
!= NULL
) {
291 giveCapabilityToTask(cap
,cap
->returning_tasks_hd
);
292 // The Task pops itself from the queue (see waitForReturnCapability())
296 /* if waiting_for_gc was the reason to release the cap: thread
297 comes from yieldCap->releaseAndQueueWorker. Unconditionally set
298 cap. free and return (see default after the if-protected other
299 special cases). Thread will wait on cond.var and re-acquire the
300 same cap after GC (GC-triggering cap. calls releaseCap and
301 enters the spare_workers case)
303 if (waiting_for_gc
) {
304 last_free_capability
= cap
; // needed?
305 trace(TRACE_sched
| DEBUG_sched
,
306 "GC pending, set capability %d free", cap
->no
);
311 // If the next thread on the run queue is a bound thread,
312 // give this Capability to the appropriate Task.
313 if (!emptyRunQueue(cap
) && cap
->run_queue_hd
->bound
) {
314 // Make sure we're not about to try to wake ourselves up
315 ASSERT(task
!= cap
->run_queue_hd
->bound
);
316 task
= cap
->run_queue_hd
->bound
;
317 giveCapabilityToTask(cap
,task
);
321 if (!cap
->spare_workers
) {
322 // Create a worker thread if we don't have one. If the system
323 // is interrupted, we only create a worker task if there
324 // are threads that need to be completed. If the system is
325 // shutting down, we never create a new worker.
326 if (sched_state
< SCHED_SHUTTING_DOWN
|| !emptyRunQueue(cap
)) {
327 debugTrace(DEBUG_sched
,
328 "starting new worker on capability %d", cap
->no
);
329 startWorkerTask(cap
, workerStart
);
334 // If we have an unbound thread on the run queue, or if there's
335 // anything else to do, give the Capability to a worker thread.
336 if (!emptyRunQueue(cap
) || !emptyWakeupQueue(cap
)
337 || !emptySparkPoolCap(cap
) || globalWorkToDo()) {
338 if (cap
->spare_workers
) {
339 giveCapabilityToTask(cap
,cap
->spare_workers
);
340 // The worker Task pops itself from the queue;
345 last_free_capability
= cap
;
346 trace(TRACE_sched
| DEBUG_sched
, "freeing capability %d", cap
->no
);
350 releaseCapability (Capability
* cap USED_IF_THREADS
)
352 ACQUIRE_LOCK(&cap
->lock
);
353 releaseCapability_(cap
);
354 RELEASE_LOCK(&cap
->lock
);
358 releaseCapabilityAndQueueWorker (Capability
* cap USED_IF_THREADS
)
362 ACQUIRE_LOCK(&cap
->lock
);
364 task
= cap
->running_task
;
366 // If the current task is a worker, save it on the spare_workers
367 // list of this Capability. A worker can mark itself as stopped,
368 // in which case it is not replaced on the spare_worker queue.
369 // This happens when the system is shutting down (see
370 // Schedule.c:workerStart()).
371 // Also, be careful to check that this task hasn't just exited
372 // Haskell to do a foreign call (task->suspended_tso).
373 if (!isBoundTask(task
) && !task
->stopped
&& !task
->suspended_tso
) {
374 task
->next
= cap
->spare_workers
;
375 cap
->spare_workers
= task
;
377 // Bound tasks just float around attached to their TSOs.
379 releaseCapability_(cap
);
381 RELEASE_LOCK(&cap
->lock
);
385 /* ----------------------------------------------------------------------------
386 * waitForReturnCapability( Task *task )
388 * Purpose: when an OS thread returns from an external call,
389 * it calls waitForReturnCapability() (via Schedule.resumeThread())
390 * to wait for permission to enter the RTS & communicate the
391 * result of the external call back to the Haskell thread that
394 * ------------------------------------------------------------------------- */
396 waitForReturnCapability (Capability
**pCap
, Task
*task
)
398 #if !defined(THREADED_RTS)
400 MainCapability
.running_task
= task
;
401 task
->cap
= &MainCapability
;
402 *pCap
= &MainCapability
;
405 Capability
*cap
= *pCap
;
408 // Try last_free_capability first
409 cap
= last_free_capability
;
410 if (!cap
->running_task
) {
412 // otherwise, search for a free capability
413 for (i
= 0; i
< n_capabilities
; i
++) {
414 cap
= &capabilities
[i
];
415 if (!cap
->running_task
) {
419 // Can't find a free one, use last_free_capability.
420 cap
= last_free_capability
;
423 // record the Capability as the one this Task is now assocated with.
427 ASSERT(task
->cap
== cap
);
430 ACQUIRE_LOCK(&cap
->lock
);
432 debugTrace(DEBUG_sched
, "returning; I want capability %d", cap
->no
);
434 if (!cap
->running_task
) {
435 // It's free; just grab it
436 cap
->running_task
= task
;
437 RELEASE_LOCK(&cap
->lock
);
439 newReturningTask(cap
,task
);
440 RELEASE_LOCK(&cap
->lock
);
443 ACQUIRE_LOCK(&task
->lock
);
444 // task->lock held, cap->lock not held
445 if (!task
->wakeup
) waitCondition(&task
->cond
, &task
->lock
);
447 task
->wakeup
= rtsFalse
;
448 RELEASE_LOCK(&task
->lock
);
450 // now check whether we should wake up...
451 ACQUIRE_LOCK(&cap
->lock
);
452 if (cap
->running_task
== NULL
) {
453 if (cap
->returning_tasks_hd
!= task
) {
454 giveCapabilityToTask(cap
,cap
->returning_tasks_hd
);
455 RELEASE_LOCK(&cap
->lock
);
458 cap
->running_task
= task
;
459 popReturningTask(cap
);
460 RELEASE_LOCK(&cap
->lock
);
463 RELEASE_LOCK(&cap
->lock
);
468 ASSERT_FULL_CAPABILITY_INVARIANTS(cap
,task
);
470 trace(TRACE_sched
| DEBUG_sched
, "resuming capability %d", cap
->no
);
476 #if defined(THREADED_RTS)
477 /* ----------------------------------------------------------------------------
479 * ------------------------------------------------------------------------- */
482 yieldCapability (Capability
** pCap
, Task
*task
)
484 Capability
*cap
= *pCap
;
486 // The fast path has no locking, if we don't enter this while loop
488 while ( waiting_for_gc
489 /* i.e. another capability triggered HeapOverflow, is busy
490 getting capabilities (stopping their owning tasks) */
491 || cap
->returning_tasks_hd
!= NULL
492 /* cap reserved for another task */
493 || !anyWorkForMe(cap
,task
)
494 /* cap/task have no work */
496 debugTrace(DEBUG_sched
, "giving up capability %d", cap
->no
);
498 // We must now release the capability and wait to be woken up
500 task
->wakeup
= rtsFalse
;
501 releaseCapabilityAndQueueWorker(cap
);
504 ACQUIRE_LOCK(&task
->lock
);
505 // task->lock held, cap->lock not held
506 if (!task
->wakeup
) waitCondition(&task
->cond
, &task
->lock
);
508 task
->wakeup
= rtsFalse
;
509 RELEASE_LOCK(&task
->lock
);
511 debugTrace(DEBUG_sched
, "woken up on capability %d", cap
->no
);
513 ACQUIRE_LOCK(&cap
->lock
);
514 if (cap
->running_task
!= NULL
) {
515 debugTrace(DEBUG_sched
,
516 "capability %d is owned by another task", cap
->no
);
517 RELEASE_LOCK(&cap
->lock
);
521 if (task
->tso
== NULL
) {
522 ASSERT(cap
->spare_workers
!= NULL
);
523 // if we're not at the front of the queue, release it
524 // again. This is unlikely to happen.
525 if (cap
->spare_workers
!= task
) {
526 giveCapabilityToTask(cap
,cap
->spare_workers
);
527 RELEASE_LOCK(&cap
->lock
);
530 cap
->spare_workers
= task
->next
;
533 cap
->running_task
= task
;
534 RELEASE_LOCK(&cap
->lock
);
538 trace(TRACE_sched
| DEBUG_sched
, "resuming capability %d", cap
->no
);
539 ASSERT(cap
->running_task
== task
);
544 ASSERT_FULL_CAPABILITY_INVARIANTS(cap
,task
);
549 /* ----------------------------------------------------------------------------
550 * Wake up a thread on a Capability.
552 * This is used when the current Task is running on a Capability and
553 * wishes to wake up a thread on a different Capability.
554 * ------------------------------------------------------------------------- */
557 wakeupThreadOnCapability (Capability
*my_cap
,
558 Capability
*other_cap
,
561 ACQUIRE_LOCK(&other_cap
->lock
);
563 // ASSUMES: cap->lock is held (asserted in wakeupThreadOnCapability)
565 ASSERT(tso
->bound
->cap
== tso
->cap
);
566 tso
->bound
->cap
= other_cap
;
568 tso
->cap
= other_cap
;
570 ASSERT(tso
->bound ? tso
->bound
->cap
== other_cap
: 1);
572 if (other_cap
->running_task
== NULL
) {
573 // nobody is running this Capability, we can add our thread
574 // directly onto the run queue and start up a Task to run it.
576 other_cap
->running_task
= myTask();
577 // precond for releaseCapability_() and appendToRunQueue()
579 appendToRunQueue(other_cap
,tso
);
581 trace(TRACE_sched
, "resuming capability %d", other_cap
->no
);
582 releaseCapability_(other_cap
);
584 appendToWakeupQueue(my_cap
,other_cap
,tso
);
585 other_cap
->context_switch
= 1;
586 // someone is running on this Capability, so it cannot be
587 // freed without first checking the wakeup queue (see
588 // releaseCapability_).
591 RELEASE_LOCK(&other_cap
->lock
);
594 /* ----------------------------------------------------------------------------
597 * Used to indicate that the interrupted flag is now set, or some
598 * other global condition that might require waking up a Task on each
600 * ------------------------------------------------------------------------- */
603 prodCapabilities(rtsBool all
)
609 for (i
=0; i
< n_capabilities
; i
++) {
610 cap
= &capabilities
[i
];
611 ACQUIRE_LOCK(&cap
->lock
);
612 if (!cap
->running_task
) {
613 if (cap
->spare_workers
) {
614 trace(TRACE_sched
, "resuming capability %d", cap
->no
);
615 task
= cap
->spare_workers
;
616 ASSERT(!task
->stopped
);
617 giveCapabilityToTask(cap
,task
);
619 RELEASE_LOCK(&cap
->lock
);
624 RELEASE_LOCK(&cap
->lock
);
630 prodAllCapabilities (void)
632 prodCapabilities(rtsTrue
);
635 /* ----------------------------------------------------------------------------
638 * Like prodAllCapabilities, but we only require a single Task to wake
639 * up in order to service some global event, such as checking for
640 * deadlock after some idle time has passed.
641 * ------------------------------------------------------------------------- */
644 prodOneCapability (void)
646 prodCapabilities(rtsFalse
);
649 /* ----------------------------------------------------------------------------
652 * At shutdown time, we want to let everything exit as cleanly as
653 * possible. For each capability, we let its run queue drain, and
654 * allow the workers to stop.
656 * This function should be called when interrupted and
657 * shutting_down_scheduler = rtsTrue, thus any worker that wakes up
658 * will exit the scheduler and call taskStop(), and any bound thread
659 * that wakes up will return to its caller. Runnable threads are
662 * ------------------------------------------------------------------------- */
665 shutdownCapability (Capability
*cap
, Task
*task
, rtsBool safe
)
669 ASSERT(sched_state
== SCHED_SHUTTING_DOWN
);
673 // Loop indefinitely until all the workers have exited and there
674 // are no Haskell threads left. We used to bail out after 50
675 // iterations of this loop, but that occasionally left a worker
676 // running which caused problems later (the closeMutex() below
677 // isn't safe, for one thing).
679 for (i
= 0; /* i < 50 */; i
++) {
680 debugTrace(DEBUG_sched
,
681 "shutting down capability %d, attempt %d", cap
->no
, i
);
682 ACQUIRE_LOCK(&cap
->lock
);
683 if (cap
->running_task
) {
684 RELEASE_LOCK(&cap
->lock
);
685 debugTrace(DEBUG_sched
, "not owner, yielding");
689 cap
->running_task
= task
;
691 if (cap
->spare_workers
) {
692 // Look for workers that have died without removing
693 // themselves from the list; this could happen if the OS
694 // summarily killed the thread, for example. This
695 // actually happens on Windows when the system is
696 // terminating the program, and the RTS is running in a
700 for (t
= cap
->spare_workers
; t
!= NULL
; t
= t
->next
) {
701 if (!osThreadIsAlive(t
->id
)) {
702 debugTrace(DEBUG_sched
,
703 "worker thread %p has died unexpectedly", (void *)t
->id
);
705 cap
->spare_workers
= t
->next
;
707 prev
->next
= t
->next
;
714 if (!emptyRunQueue(cap
) || cap
->spare_workers
) {
715 debugTrace(DEBUG_sched
,
716 "runnable threads or workers still alive, yielding");
717 releaseCapability_(cap
); // this will wake up a worker
718 RELEASE_LOCK(&cap
->lock
);
723 // If "safe", then busy-wait for any threads currently doing
724 // foreign calls. If we're about to unload this DLL, for
725 // example, we need to be sure that there are no OS threads
726 // that will try to return to code that has been unloaded.
727 // We can be a bit more relaxed when this is a standalone
728 // program that is about to terminate, and let safe=false.
729 if (cap
->suspended_ccalling_tasks
&& safe
) {
730 debugTrace(DEBUG_sched
,
731 "thread(s) are involved in foreign calls, yielding");
732 cap
->running_task
= NULL
;
733 RELEASE_LOCK(&cap
->lock
);
738 debugTrace(DEBUG_sched
, "capability %d is stopped.", cap
->no
);
740 RELEASE_LOCK(&cap
->lock
);
743 // we now have the Capability, its run queue and spare workers
744 // list are both empty.
746 // ToDo: we can't drop this mutex, because there might still be
747 // threads performing foreign calls that will eventually try to
748 // return via resumeThread() and attempt to grab cap->lock.
749 // closeMutex(&cap->lock);
752 /* ----------------------------------------------------------------------------
755 * Attempt to gain control of a Capability if it is free.
757 * ------------------------------------------------------------------------- */
760 tryGrabCapability (Capability
*cap
, Task
*task
)
762 if (cap
->running_task
!= NULL
) return rtsFalse
;
763 ACQUIRE_LOCK(&cap
->lock
);
764 if (cap
->running_task
!= NULL
) {
765 RELEASE_LOCK(&cap
->lock
);
769 cap
->running_task
= task
;
770 RELEASE_LOCK(&cap
->lock
);
775 #endif /* THREADED_RTS */
778 freeCapability (Capability
*cap
) {
779 stgFree(cap
->mut_lists
);
780 #if defined(THREADED_RTS) || defined(PARALLEL_HASKELL)
781 freeSparkPool(&cap
->r
.rSparks
);
785 /* ---------------------------------------------------------------------------
786 Mark everything directly reachable from the Capabilities. When
787 using multiple GC threads, each GC thread marks all Capabilities
788 for which (c `mod` n == 0), for Capability c and thread n.
789 ------------------------------------------------------------------------ */
792 markSomeCapabilities (evac_fn evac
, void *user
, nat i0
, nat delta
)
798 // Each GC thread is responsible for following roots from the
799 // Capability of the same number. There will usually be the same
800 // or fewer Capabilities as GC threads, but just in case there
801 // are more, we mark every Capability whose number is the GC
802 // thread's index plus a multiple of the number of GC threads.
803 for (i
= i0
; i
< n_capabilities
; i
+= delta
) {
804 cap
= &capabilities
[i
];
805 evac(user
, (StgClosure
**)(void *)&cap
->run_queue_hd
);
806 evac(user
, (StgClosure
**)(void *)&cap
->run_queue_tl
);
807 #if defined(THREADED_RTS)
808 evac(user
, (StgClosure
**)(void *)&cap
->wakeup_queue_hd
);
809 evac(user
, (StgClosure
**)(void *)&cap
->wakeup_queue_tl
);
811 for (task
= cap
->suspended_ccalling_tasks
; task
!= NULL
;
813 debugTrace(DEBUG_sched
,
814 "evac'ing suspended TSO %lu", (unsigned long)task
->suspended_tso
->id
);
815 evac(user
, (StgClosure
**)(void *)&task
->suspended_tso
);
818 #if defined(THREADED_RTS)
819 traverseSparkQueue (evac
, user
, cap
);
823 #if !defined(THREADED_RTS)
824 evac(user
, (StgClosure
**)(void *)&blocked_queue_hd
);
825 evac(user
, (StgClosure
**)(void *)&blocked_queue_tl
);
826 evac(user
, (StgClosure
**)(void *)&sleeping_queue
);
830 // This function is used by the compacting GC to thread all the
831 // pointers from spark queues.
833 traverseSparkQueues (evac_fn evac USED_IF_THREADS
, void *user USED_IF_THREADS
)
835 #if defined(THREADED_RTS)
837 for (i
= 0; i
< n_capabilities
; i
++) {
838 traverseSparkQueue (evac
, user
, &capabilities
[i
]);
840 #endif // THREADED_RTS
845 markCapabilities (evac_fn evac
, void *user
)
847 markSomeCapabilities(evac
, user
, 0, 1);