Use mutator threads to do GC, instead of having a separate pool of GC threads
[ghc.git] / rts / Capability.c
1 /* ---------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 2003-2006
4 *
5 * Capabilities
6 *
7 * A Capability represent the token required to execute STG code,
8 * and all the state an OS thread/task needs to run Haskell code:
9 * its STG registers, a pointer to its TSO, a nursery etc. During
10 * STG execution, a pointer to the capabilitity is kept in a
11 * register (BaseReg; actually it is a pointer to cap->r).
12 *
13 * Only in an THREADED_RTS build will there be multiple capabilities,
14 * for non-threaded builds there is only one global capability, namely
15 * MainCapability.
16 *
17 * --------------------------------------------------------------------------*/
18
19 #include "PosixSource.h"
20 #include "Rts.h"
21 #include "RtsUtils.h"
22 #include "RtsFlags.h"
23 #include "STM.h"
24 #include "OSThreads.h"
25 #include "Capability.h"
26 #include "Schedule.h"
27 #include "Sparks.h"
28 #include "Trace.h"
29 #include "GC.h"
30
31 // one global capability, this is the Capability for non-threaded
32 // builds, and for +RTS -N1
33 Capability MainCapability;
34
35 nat n_capabilities;
36 Capability *capabilities = NULL;
37
38 // Holds the Capability which last became free. This is used so that
39 // an in-call has a chance of quickly finding a free Capability.
40 // Maintaining a global free list of Capabilities would require global
41 // locking, so we don't do that.
42 Capability *last_free_capability;
43
44 /* GC indicator, in scope for the scheduler, init'ed to false */
45 volatile StgWord waiting_for_gc = 0;
46
47 #if defined(THREADED_RTS)
48 STATIC_INLINE rtsBool
49 globalWorkToDo (void)
50 {
51 return blackholes_need_checking
52 || sched_state >= SCHED_INTERRUPTING
53 ;
54 }
55 #endif
56
57 #if defined(THREADED_RTS)
58 StgClosure *
59 findSpark (Capability *cap)
60 {
61 Capability *robbed;
62 StgClosurePtr spark;
63 rtsBool retry;
64 nat i = 0;
65
66 if (!emptyRunQueue(cap)) {
67 // If there are other threads, don't try to run any new
68 // sparks: sparks might be speculative, we don't want to take
69 // resources away from the main computation.
70 return 0;
71 }
72
73 // first try to get a spark from our own pool.
74 // We should be using reclaimSpark(), because it works without
75 // needing any atomic instructions:
76 // spark = reclaimSpark(cap->sparks);
77 // However, measurements show that this makes at least one benchmark
78 // slower (prsa) and doesn't affect the others.
79 spark = tryStealSpark(cap);
80 if (spark != NULL) {
81 cap->sparks_converted++;
82 return spark;
83 }
84
85 if (n_capabilities == 1) { return NULL; } // makes no sense...
86
87 debugTrace(DEBUG_sched,
88 "cap %d: Trying to steal work from other capabilities",
89 cap->no);
90
91 do {
92 retry = rtsFalse;
93
94 /* visit cap.s 0..n-1 in sequence until a theft succeeds. We could
95 start at a random place instead of 0 as well. */
96 for ( i=0 ; i < n_capabilities ; i++ ) {
97 robbed = &capabilities[i];
98 if (cap == robbed) // ourselves...
99 continue;
100
101 if (emptySparkPoolCap(robbed)) // nothing to steal here
102 continue;
103
104 spark = tryStealSpark(robbed);
105 if (spark == NULL && !emptySparkPoolCap(robbed)) {
106 // we conflicted with another thread while trying to steal;
107 // try again later.
108 retry = rtsTrue;
109 }
110
111 if (spark != NULL) {
112 debugTrace(DEBUG_sched,
113 "cap %d: Stole a spark from capability %d",
114 cap->no, robbed->no);
115 cap->sparks_converted++;
116 return spark;
117 }
118 // otherwise: no success, try next one
119 }
120 } while (retry);
121
122 debugTrace(DEBUG_sched, "No sparks stolen");
123 return NULL;
124 }
125
126 // Returns True if any spark pool is non-empty at this moment in time
127 // The result is only valid for an instant, of course, so in a sense
128 // is immediately invalid, and should not be relied upon for
129 // correctness.
130 rtsBool
131 anySparks (void)
132 {
133 nat i;
134
135 for (i=0; i < n_capabilities; i++) {
136 if (!emptySparkPoolCap(&capabilities[i])) {
137 return rtsTrue;
138 }
139 }
140 return rtsFalse;
141 }
142 #endif
143
144 /* -----------------------------------------------------------------------------
145 * Manage the returning_tasks lists.
146 *
147 * These functions require cap->lock
148 * -------------------------------------------------------------------------- */
149
150 #if defined(THREADED_RTS)
151 STATIC_INLINE void
152 newReturningTask (Capability *cap, Task *task)
153 {
154 ASSERT_LOCK_HELD(&cap->lock);
155 ASSERT(task->return_link == NULL);
156 if (cap->returning_tasks_hd) {
157 ASSERT(cap->returning_tasks_tl->return_link == NULL);
158 cap->returning_tasks_tl->return_link = task;
159 } else {
160 cap->returning_tasks_hd = task;
161 }
162 cap->returning_tasks_tl = task;
163 }
164
165 STATIC_INLINE Task *
166 popReturningTask (Capability *cap)
167 {
168 ASSERT_LOCK_HELD(&cap->lock);
169 Task *task;
170 task = cap->returning_tasks_hd;
171 ASSERT(task);
172 cap->returning_tasks_hd = task->return_link;
173 if (!cap->returning_tasks_hd) {
174 cap->returning_tasks_tl = NULL;
175 }
176 task->return_link = NULL;
177 return task;
178 }
179 #endif
180
181 /* ----------------------------------------------------------------------------
182 * Initialisation
183 *
184 * The Capability is initially marked not free.
185 * ------------------------------------------------------------------------- */
186
187 static void
188 initCapability( Capability *cap, nat i )
189 {
190 nat g;
191
192 cap->no = i;
193 cap->in_haskell = rtsFalse;
194 cap->in_gc = rtsFalse;
195
196 cap->run_queue_hd = END_TSO_QUEUE;
197 cap->run_queue_tl = END_TSO_QUEUE;
198
199 #if defined(THREADED_RTS)
200 initMutex(&cap->lock);
201 cap->running_task = NULL; // indicates cap is free
202 cap->spare_workers = NULL;
203 cap->suspended_ccalling_tasks = NULL;
204 cap->returning_tasks_hd = NULL;
205 cap->returning_tasks_tl = NULL;
206 cap->wakeup_queue_hd = END_TSO_QUEUE;
207 cap->wakeup_queue_tl = END_TSO_QUEUE;
208 cap->sparks_created = 0;
209 cap->sparks_converted = 0;
210 cap->sparks_pruned = 0;
211 #endif
212
213 cap->f.stgEagerBlackholeInfo = (W_)&__stg_EAGER_BLACKHOLE_info;
214 cap->f.stgGCEnter1 = (F_)__stg_gc_enter_1;
215 cap->f.stgGCFun = (F_)__stg_gc_fun;
216
217 cap->mut_lists = stgMallocBytes(sizeof(bdescr *) *
218 RtsFlags.GcFlags.generations,
219 "initCapability");
220
221 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
222 cap->mut_lists[g] = NULL;
223 }
224
225 cap->free_tvar_watch_queues = END_STM_WATCH_QUEUE;
226 cap->free_invariant_check_queues = END_INVARIANT_CHECK_QUEUE;
227 cap->free_trec_chunks = END_STM_CHUNK_LIST;
228 cap->free_trec_headers = NO_TREC;
229 cap->transaction_tokens = 0;
230 cap->context_switch = 0;
231 }
232
233 /* ---------------------------------------------------------------------------
234 * Function: initCapabilities()
235 *
236 * Purpose: set up the Capability handling. For the THREADED_RTS build,
237 * we keep a table of them, the size of which is
238 * controlled by the user via the RTS flag -N.
239 *
240 * ------------------------------------------------------------------------- */
241 void
242 initCapabilities( void )
243 {
244 #if defined(THREADED_RTS)
245 nat i;
246
247 #ifndef REG_Base
248 // We can't support multiple CPUs if BaseReg is not a register
249 if (RtsFlags.ParFlags.nNodes > 1) {
250 errorBelch("warning: multiple CPUs not supported in this build, reverting to 1");
251 RtsFlags.ParFlags.nNodes = 1;
252 }
253 #endif
254
255 n_capabilities = RtsFlags.ParFlags.nNodes;
256
257 if (n_capabilities == 1) {
258 capabilities = &MainCapability;
259 // THREADED_RTS must work on builds that don't have a mutable
260 // BaseReg (eg. unregisterised), so in this case
261 // capabilities[0] must coincide with &MainCapability.
262 } else {
263 capabilities = stgMallocBytes(n_capabilities * sizeof(Capability),
264 "initCapabilities");
265 }
266
267 for (i = 0; i < n_capabilities; i++) {
268 initCapability(&capabilities[i], i);
269 }
270
271 debugTrace(DEBUG_sched, "allocated %d capabilities", n_capabilities);
272
273 #else /* !THREADED_RTS */
274
275 n_capabilities = 1;
276 capabilities = &MainCapability;
277 initCapability(&MainCapability, 0);
278
279 #endif
280
281 // There are no free capabilities to begin with. We will start
282 // a worker Task to each Capability, which will quickly put the
283 // Capability on the free list when it finds nothing to do.
284 last_free_capability = &capabilities[0];
285 }
286
287 /* ----------------------------------------------------------------------------
288 * setContextSwitches: cause all capabilities to context switch as
289 * soon as possible.
290 * ------------------------------------------------------------------------- */
291
292 void setContextSwitches(void)
293 {
294 nat i;
295 for (i=0; i < n_capabilities; i++) {
296 capabilities[i].context_switch = 1;
297 }
298 }
299
300 /* ----------------------------------------------------------------------------
301 * Give a Capability to a Task. The task must currently be sleeping
302 * on its condition variable.
303 *
304 * Requires cap->lock (modifies cap->running_task).
305 *
306 * When migrating a Task, the migrater must take task->lock before
307 * modifying task->cap, to synchronise with the waking up Task.
308 * Additionally, the migrater should own the Capability (when
309 * migrating the run queue), or cap->lock (when migrating
310 * returning_workers).
311 *
312 * ------------------------------------------------------------------------- */
313
314 #if defined(THREADED_RTS)
315 STATIC_INLINE void
316 giveCapabilityToTask (Capability *cap USED_IF_DEBUG, Task *task)
317 {
318 ASSERT_LOCK_HELD(&cap->lock);
319 ASSERT(task->cap == cap);
320 trace(TRACE_sched | DEBUG_sched,
321 "passing capability %d to %s %p",
322 cap->no, task->tso ? "bound task" : "worker",
323 (void *)task->id);
324 ACQUIRE_LOCK(&task->lock);
325 task->wakeup = rtsTrue;
326 // the wakeup flag is needed because signalCondition() doesn't
327 // flag the condition if the thread is already runniing, but we want
328 // it to be sticky.
329 signalCondition(&task->cond);
330 RELEASE_LOCK(&task->lock);
331 }
332 #endif
333
334 /* ----------------------------------------------------------------------------
335 * Function: releaseCapability(Capability*)
336 *
337 * Purpose: Letting go of a capability. Causes a
338 * 'returning worker' thread or a 'waiting worker'
339 * to wake up, in that order.
340 * ------------------------------------------------------------------------- */
341
342 #if defined(THREADED_RTS)
343 void
344 releaseCapability_ (Capability* cap,
345 rtsBool always_wakeup)
346 {
347 Task *task;
348
349 task = cap->running_task;
350
351 ASSERT_PARTIAL_CAPABILITY_INVARIANTS(cap,task);
352
353 cap->running_task = NULL;
354
355 // Check to see whether a worker thread can be given
356 // the go-ahead to return the result of an external call..
357 if (cap->returning_tasks_hd != NULL) {
358 giveCapabilityToTask(cap,cap->returning_tasks_hd);
359 // The Task pops itself from the queue (see waitForReturnCapability())
360 return;
361 }
362
363 if (waiting_for_gc == PENDING_GC_SEQ) {
364 last_free_capability = cap; // needed?
365 trace(TRACE_sched | DEBUG_sched,
366 "GC pending, set capability %d free", cap->no);
367 return;
368 }
369
370
371 // If the next thread on the run queue is a bound thread,
372 // give this Capability to the appropriate Task.
373 if (!emptyRunQueue(cap) && cap->run_queue_hd->bound) {
374 // Make sure we're not about to try to wake ourselves up
375 ASSERT(task != cap->run_queue_hd->bound);
376 task = cap->run_queue_hd->bound;
377 giveCapabilityToTask(cap,task);
378 return;
379 }
380
381 if (!cap->spare_workers) {
382 // Create a worker thread if we don't have one. If the system
383 // is interrupted, we only create a worker task if there
384 // are threads that need to be completed. If the system is
385 // shutting down, we never create a new worker.
386 if (sched_state < SCHED_SHUTTING_DOWN || !emptyRunQueue(cap)) {
387 debugTrace(DEBUG_sched,
388 "starting new worker on capability %d", cap->no);
389 startWorkerTask(cap, workerStart);
390 return;
391 }
392 }
393
394 // If we have an unbound thread on the run queue, or if there's
395 // anything else to do, give the Capability to a worker thread.
396 if (always_wakeup ||
397 !emptyRunQueue(cap) || !emptyWakeupQueue(cap) ||
398 !emptySparkPoolCap(cap) || globalWorkToDo()) {
399 if (cap->spare_workers) {
400 giveCapabilityToTask(cap,cap->spare_workers);
401 // The worker Task pops itself from the queue;
402 return;
403 }
404 }
405
406 last_free_capability = cap;
407 trace(TRACE_sched | DEBUG_sched, "freeing capability %d", cap->no);
408 }
409
410 void
411 releaseCapability (Capability* cap USED_IF_THREADS)
412 {
413 ACQUIRE_LOCK(&cap->lock);
414 releaseCapability_(cap, rtsFalse);
415 RELEASE_LOCK(&cap->lock);
416 }
417
418 void
419 releaseAndWakeupCapability (Capability* cap USED_IF_THREADS)
420 {
421 ACQUIRE_LOCK(&cap->lock);
422 releaseCapability_(cap, rtsTrue);
423 RELEASE_LOCK(&cap->lock);
424 }
425
426 static void
427 releaseCapabilityAndQueueWorker (Capability* cap USED_IF_THREADS)
428 {
429 Task *task;
430
431 ACQUIRE_LOCK(&cap->lock);
432
433 task = cap->running_task;
434
435 // If the current task is a worker, save it on the spare_workers
436 // list of this Capability. A worker can mark itself as stopped,
437 // in which case it is not replaced on the spare_worker queue.
438 // This happens when the system is shutting down (see
439 // Schedule.c:workerStart()).
440 // Also, be careful to check that this task hasn't just exited
441 // Haskell to do a foreign call (task->suspended_tso).
442 if (!isBoundTask(task) && !task->stopped && !task->suspended_tso) {
443 task->next = cap->spare_workers;
444 cap->spare_workers = task;
445 }
446 // Bound tasks just float around attached to their TSOs.
447
448 releaseCapability_(cap,rtsFalse);
449
450 RELEASE_LOCK(&cap->lock);
451 }
452 #endif
453
454 /* ----------------------------------------------------------------------------
455 * waitForReturnCapability( Task *task )
456 *
457 * Purpose: when an OS thread returns from an external call,
458 * it calls waitForReturnCapability() (via Schedule.resumeThread())
459 * to wait for permission to enter the RTS & communicate the
460 * result of the external call back to the Haskell thread that
461 * made it.
462 *
463 * ------------------------------------------------------------------------- */
464 void
465 waitForReturnCapability (Capability **pCap, Task *task)
466 {
467 #if !defined(THREADED_RTS)
468
469 MainCapability.running_task = task;
470 task->cap = &MainCapability;
471 *pCap = &MainCapability;
472
473 #else
474 Capability *cap = *pCap;
475
476 if (cap == NULL) {
477 // Try last_free_capability first
478 cap = last_free_capability;
479 if (!cap->running_task) {
480 nat i;
481 // otherwise, search for a free capability
482 for (i = 0; i < n_capabilities; i++) {
483 cap = &capabilities[i];
484 if (!cap->running_task) {
485 break;
486 }
487 }
488 // Can't find a free one, use last_free_capability.
489 cap = last_free_capability;
490 }
491
492 // record the Capability as the one this Task is now assocated with.
493 task->cap = cap;
494
495 } else {
496 ASSERT(task->cap == cap);
497 }
498
499 ACQUIRE_LOCK(&cap->lock);
500
501 debugTrace(DEBUG_sched, "returning; I want capability %d", cap->no);
502
503 if (!cap->running_task) {
504 // It's free; just grab it
505 cap->running_task = task;
506 RELEASE_LOCK(&cap->lock);
507 } else {
508 newReturningTask(cap,task);
509 RELEASE_LOCK(&cap->lock);
510
511 for (;;) {
512 ACQUIRE_LOCK(&task->lock);
513 // task->lock held, cap->lock not held
514 if (!task->wakeup) waitCondition(&task->cond, &task->lock);
515 cap = task->cap;
516 task->wakeup = rtsFalse;
517 RELEASE_LOCK(&task->lock);
518
519 // now check whether we should wake up...
520 ACQUIRE_LOCK(&cap->lock);
521 if (cap->running_task == NULL) {
522 if (cap->returning_tasks_hd != task) {
523 giveCapabilityToTask(cap,cap->returning_tasks_hd);
524 RELEASE_LOCK(&cap->lock);
525 continue;
526 }
527 cap->running_task = task;
528 popReturningTask(cap);
529 RELEASE_LOCK(&cap->lock);
530 break;
531 }
532 RELEASE_LOCK(&cap->lock);
533 }
534
535 }
536
537 ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
538
539 trace(TRACE_sched | DEBUG_sched, "resuming capability %d", cap->no);
540
541 *pCap = cap;
542 #endif
543 }
544
545 #if defined(THREADED_RTS)
546 /* ----------------------------------------------------------------------------
547 * yieldCapability
548 * ------------------------------------------------------------------------- */
549
550 void
551 yieldCapability (Capability** pCap, Task *task)
552 {
553 Capability *cap = *pCap;
554
555 if (waiting_for_gc == PENDING_GC_PAR) {
556 debugTrace(DEBUG_sched, "capability %d: becoming a GC thread", cap->no);
557 gcWorkerThread(cap);
558 return;
559 }
560
561 debugTrace(DEBUG_sched, "giving up capability %d", cap->no);
562
563 // We must now release the capability and wait to be woken up
564 // again.
565 task->wakeup = rtsFalse;
566 releaseCapabilityAndQueueWorker(cap);
567
568 for (;;) {
569 ACQUIRE_LOCK(&task->lock);
570 // task->lock held, cap->lock not held
571 if (!task->wakeup) waitCondition(&task->cond, &task->lock);
572 cap = task->cap;
573 task->wakeup = rtsFalse;
574 RELEASE_LOCK(&task->lock);
575
576 debugTrace(DEBUG_sched, "woken up on capability %d", cap->no);
577
578 ACQUIRE_LOCK(&cap->lock);
579 if (cap->running_task != NULL) {
580 debugTrace(DEBUG_sched,
581 "capability %d is owned by another task", cap->no);
582 RELEASE_LOCK(&cap->lock);
583 continue;
584 }
585
586 if (task->tso == NULL) {
587 ASSERT(cap->spare_workers != NULL);
588 // if we're not at the front of the queue, release it
589 // again. This is unlikely to happen.
590 if (cap->spare_workers != task) {
591 giveCapabilityToTask(cap,cap->spare_workers);
592 RELEASE_LOCK(&cap->lock);
593 continue;
594 }
595 cap->spare_workers = task->next;
596 task->next = NULL;
597 }
598 cap->running_task = task;
599 RELEASE_LOCK(&cap->lock);
600 break;
601 }
602
603 trace(TRACE_sched | DEBUG_sched, "resuming capability %d", cap->no);
604 ASSERT(cap->running_task == task);
605
606 *pCap = cap;
607
608 ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
609
610 return;
611 }
612
613 /* ----------------------------------------------------------------------------
614 * Wake up a thread on a Capability.
615 *
616 * This is used when the current Task is running on a Capability and
617 * wishes to wake up a thread on a different Capability.
618 * ------------------------------------------------------------------------- */
619
620 void
621 wakeupThreadOnCapability (Capability *my_cap,
622 Capability *other_cap,
623 StgTSO *tso)
624 {
625 ACQUIRE_LOCK(&other_cap->lock);
626
627 // ASSUMES: cap->lock is held (asserted in wakeupThreadOnCapability)
628 if (tso->bound) {
629 ASSERT(tso->bound->cap == tso->cap);
630 tso->bound->cap = other_cap;
631 }
632 tso->cap = other_cap;
633
634 ASSERT(tso->bound ? tso->bound->cap == other_cap : 1);
635
636 if (other_cap->running_task == NULL) {
637 // nobody is running this Capability, we can add our thread
638 // directly onto the run queue and start up a Task to run it.
639
640 other_cap->running_task = myTask();
641 // precond for releaseCapability_() and appendToRunQueue()
642
643 appendToRunQueue(other_cap,tso);
644
645 trace(TRACE_sched, "resuming capability %d", other_cap->no);
646 releaseCapability_(other_cap,rtsFalse);
647 } else {
648 appendToWakeupQueue(my_cap,other_cap,tso);
649 other_cap->context_switch = 1;
650 // someone is running on this Capability, so it cannot be
651 // freed without first checking the wakeup queue (see
652 // releaseCapability_).
653 }
654
655 RELEASE_LOCK(&other_cap->lock);
656 }
657
658 /* ----------------------------------------------------------------------------
659 * prodCapability
660 *
661 * If a Capability is currently idle, wake up a Task on it. Used to
662 * get every Capability into the GC.
663 * ------------------------------------------------------------------------- */
664
665 void
666 prodCapability (Capability *cap, Task *task)
667 {
668 ACQUIRE_LOCK(&cap->lock);
669 if (!cap->running_task) {
670 cap->running_task = task;
671 releaseCapability_(cap,rtsTrue);
672 }
673 RELEASE_LOCK(&cap->lock);
674 }
675
676 /* ----------------------------------------------------------------------------
677 * shutdownCapability
678 *
679 * At shutdown time, we want to let everything exit as cleanly as
680 * possible. For each capability, we let its run queue drain, and
681 * allow the workers to stop.
682 *
683 * This function should be called when interrupted and
684 * shutting_down_scheduler = rtsTrue, thus any worker that wakes up
685 * will exit the scheduler and call taskStop(), and any bound thread
686 * that wakes up will return to its caller. Runnable threads are
687 * killed.
688 *
689 * ------------------------------------------------------------------------- */
690
691 void
692 shutdownCapability (Capability *cap, Task *task, rtsBool safe)
693 {
694 nat i;
695
696 task->cap = cap;
697
698 // Loop indefinitely until all the workers have exited and there
699 // are no Haskell threads left. We used to bail out after 50
700 // iterations of this loop, but that occasionally left a worker
701 // running which caused problems later (the closeMutex() below
702 // isn't safe, for one thing).
703
704 for (i = 0; /* i < 50 */; i++) {
705 ASSERT(sched_state == SCHED_SHUTTING_DOWN);
706
707 debugTrace(DEBUG_sched,
708 "shutting down capability %d, attempt %d", cap->no, i);
709 ACQUIRE_LOCK(&cap->lock);
710 if (cap->running_task) {
711 RELEASE_LOCK(&cap->lock);
712 debugTrace(DEBUG_sched, "not owner, yielding");
713 yieldThread();
714 continue;
715 }
716 cap->running_task = task;
717
718 if (cap->spare_workers) {
719 // Look for workers that have died without removing
720 // themselves from the list; this could happen if the OS
721 // summarily killed the thread, for example. This
722 // actually happens on Windows when the system is
723 // terminating the program, and the RTS is running in a
724 // DLL.
725 Task *t, *prev;
726 prev = NULL;
727 for (t = cap->spare_workers; t != NULL; t = t->next) {
728 if (!osThreadIsAlive(t->id)) {
729 debugTrace(DEBUG_sched,
730 "worker thread %p has died unexpectedly", (void *)t->id);
731 if (!prev) {
732 cap->spare_workers = t->next;
733 } else {
734 prev->next = t->next;
735 }
736 prev = t;
737 }
738 }
739 }
740
741 if (!emptyRunQueue(cap) || cap->spare_workers) {
742 debugTrace(DEBUG_sched,
743 "runnable threads or workers still alive, yielding");
744 releaseCapability_(cap,rtsFalse); // this will wake up a worker
745 RELEASE_LOCK(&cap->lock);
746 yieldThread();
747 continue;
748 }
749
750 // If "safe", then busy-wait for any threads currently doing
751 // foreign calls. If we're about to unload this DLL, for
752 // example, we need to be sure that there are no OS threads
753 // that will try to return to code that has been unloaded.
754 // We can be a bit more relaxed when this is a standalone
755 // program that is about to terminate, and let safe=false.
756 if (cap->suspended_ccalling_tasks && safe) {
757 debugTrace(DEBUG_sched,
758 "thread(s) are involved in foreign calls, yielding");
759 cap->running_task = NULL;
760 RELEASE_LOCK(&cap->lock);
761 yieldThread();
762 continue;
763 }
764
765 debugTrace(DEBUG_sched, "capability %d is stopped.", cap->no);
766 RELEASE_LOCK(&cap->lock);
767 break;
768 }
769 // we now have the Capability, its run queue and spare workers
770 // list are both empty.
771
772 // ToDo: we can't drop this mutex, because there might still be
773 // threads performing foreign calls that will eventually try to
774 // return via resumeThread() and attempt to grab cap->lock.
775 // closeMutex(&cap->lock);
776 }
777
778 /* ----------------------------------------------------------------------------
779 * tryGrabCapability
780 *
781 * Attempt to gain control of a Capability if it is free.
782 *
783 * ------------------------------------------------------------------------- */
784
785 rtsBool
786 tryGrabCapability (Capability *cap, Task *task)
787 {
788 if (cap->running_task != NULL) return rtsFalse;
789 ACQUIRE_LOCK(&cap->lock);
790 if (cap->running_task != NULL) {
791 RELEASE_LOCK(&cap->lock);
792 return rtsFalse;
793 }
794 task->cap = cap;
795 cap->running_task = task;
796 RELEASE_LOCK(&cap->lock);
797 return rtsTrue;
798 }
799
800
801 #endif /* THREADED_RTS */
802
803 static void
804 freeCapability (Capability *cap)
805 {
806 stgFree(cap->mut_lists);
807 #if defined(THREADED_RTS) || defined(PARALLEL_HASKELL)
808 freeSparkPool(cap->sparks);
809 #endif
810 }
811
812 void
813 freeCapabilities (void)
814 {
815 #if defined(THREADED_RTS)
816 nat i;
817 for (i=0; i < n_capabilities; i++) {
818 freeCapability(&capabilities[i]);
819 }
820 #else
821 freeCapability(&MainCapability);
822 #endif
823 }
824
825 /* ---------------------------------------------------------------------------
826 Mark everything directly reachable from the Capabilities. When
827 using multiple GC threads, each GC thread marks all Capabilities
828 for which (c `mod` n == 0), for Capability c and thread n.
829 ------------------------------------------------------------------------ */
830
831 void
832 markSomeCapabilities (evac_fn evac, void *user, nat i0, nat delta,
833 rtsBool prune_sparks USED_IF_THREADS)
834 {
835 nat i;
836 Capability *cap;
837 Task *task;
838
839 // Each GC thread is responsible for following roots from the
840 // Capability of the same number. There will usually be the same
841 // or fewer Capabilities as GC threads, but just in case there
842 // are more, we mark every Capability whose number is the GC
843 // thread's index plus a multiple of the number of GC threads.
844 for (i = i0; i < n_capabilities; i += delta) {
845 cap = &capabilities[i];
846 evac(user, (StgClosure **)(void *)&cap->run_queue_hd);
847 evac(user, (StgClosure **)(void *)&cap->run_queue_tl);
848 #if defined(THREADED_RTS)
849 evac(user, (StgClosure **)(void *)&cap->wakeup_queue_hd);
850 evac(user, (StgClosure **)(void *)&cap->wakeup_queue_tl);
851 #endif
852 for (task = cap->suspended_ccalling_tasks; task != NULL;
853 task=task->next) {
854 debugTrace(DEBUG_sched,
855 "evac'ing suspended TSO %lu", (unsigned long)task->suspended_tso->id);
856 evac(user, (StgClosure **)(void *)&task->suspended_tso);
857 }
858
859 #if defined(THREADED_RTS)
860 if (prune_sparks) {
861 pruneSparkQueue (evac, user, cap);
862 } else {
863 traverseSparkQueue (evac, user, cap);
864 }
865 #endif
866 }
867
868 #if !defined(THREADED_RTS)
869 evac(user, (StgClosure **)(void *)&blocked_queue_hd);
870 evac(user, (StgClosure **)(void *)&blocked_queue_tl);
871 evac(user, (StgClosure **)(void *)&sleeping_queue);
872 #endif
873 }
874
875 void
876 markCapabilities (evac_fn evac, void *user)
877 {
878 markSomeCapabilities(evac, user, 0, 1, rtsFalse);
879 }