05e9126420cd9557f9f8a9128cc604e71d64a257
[ghc.git] / rts / Capability.c
1 /* ---------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 2003-2006
4 *
5 * Capabilities
6 *
7 * A Capability represent the token required to execute STG code,
8 * and all the state an OS thread/task needs to run Haskell code:
9 * its STG registers, a pointer to its TSO, a nursery etc. During
10 * STG execution, a pointer to the capabilitity is kept in a
11 * register (BaseReg; actually it is a pointer to cap->r).
12 *
13 * Only in an THREADED_RTS build will there be multiple capabilities,
14 * for non-threaded builds there is only one global capability, namely
15 * MainCapability.
16 *
17 * --------------------------------------------------------------------------*/
18
19 #include "PosixSource.h"
20 #include "Rts.h"
21 #include "RtsUtils.h"
22 #include "RtsFlags.h"
23 #include "STM.h"
24 #include "OSThreads.h"
25 #include "Capability.h"
26 #include "Schedule.h"
27 #include "Sparks.h"
28 #include "Trace.h"
29 #include "GC.h"
30
31 // one global capability, this is the Capability for non-threaded
32 // builds, and for +RTS -N1
33 Capability MainCapability;
34
35 nat n_capabilities;
36 Capability *capabilities = NULL;
37
38 // Holds the Capability which last became free. This is used so that
39 // an in-call has a chance of quickly finding a free Capability.
40 // Maintaining a global free list of Capabilities would require global
41 // locking, so we don't do that.
42 Capability *last_free_capability;
43
44 /* GC indicator, in scope for the scheduler, init'ed to false */
45 volatile StgWord waiting_for_gc = 0;
46
47 #if defined(THREADED_RTS)
48 STATIC_INLINE rtsBool
49 globalWorkToDo (void)
50 {
51 return blackholes_need_checking
52 || sched_state >= SCHED_INTERRUPTING
53 ;
54 }
55 #endif
56
57 #if defined(THREADED_RTS)
58 StgClosure *
59 findSpark (Capability *cap)
60 {
61 Capability *robbed;
62 StgClosurePtr spark;
63 rtsBool retry;
64 nat i = 0;
65
66 if (!emptyRunQueue(cap)) {
67 // If there are other threads, don't try to run any new
68 // sparks: sparks might be speculative, we don't want to take
69 // resources away from the main computation.
70 return 0;
71 }
72
73 // first try to get a spark from our own pool.
74 // We should be using reclaimSpark(), because it works without
75 // needing any atomic instructions:
76 // spark = reclaimSpark(cap->sparks);
77 // However, measurements show that this makes at least one benchmark
78 // slower (prsa) and doesn't affect the others.
79 spark = tryStealSpark(cap);
80 if (spark != NULL) {
81 cap->sparks_converted++;
82 return spark;
83 }
84
85 if (n_capabilities == 1) { return NULL; } // makes no sense...
86
87 debugTrace(DEBUG_sched,
88 "cap %d: Trying to steal work from other capabilities",
89 cap->no);
90
91 do {
92 retry = rtsFalse;
93
94 /* visit cap.s 0..n-1 in sequence until a theft succeeds. We could
95 start at a random place instead of 0 as well. */
96 for ( i=0 ; i < n_capabilities ; i++ ) {
97 robbed = &capabilities[i];
98 if (cap == robbed) // ourselves...
99 continue;
100
101 if (emptySparkPoolCap(robbed)) // nothing to steal here
102 continue;
103
104 spark = tryStealSpark(robbed);
105 if (spark == NULL && !emptySparkPoolCap(robbed)) {
106 // we conflicted with another thread while trying to steal;
107 // try again later.
108 retry = rtsTrue;
109 }
110
111 if (spark != NULL) {
112 debugTrace(DEBUG_sched,
113 "cap %d: Stole a spark from capability %d",
114 cap->no, robbed->no);
115 cap->sparks_converted++;
116 return spark;
117 }
118 // otherwise: no success, try next one
119 }
120 } while (retry);
121
122 debugTrace(DEBUG_sched, "No sparks stolen");
123 return NULL;
124 }
125
126 // Returns True if any spark pool is non-empty at this moment in time
127 // The result is only valid for an instant, of course, so in a sense
128 // is immediately invalid, and should not be relied upon for
129 // correctness.
130 rtsBool
131 anySparks (void)
132 {
133 nat i;
134
135 for (i=0; i < n_capabilities; i++) {
136 if (!emptySparkPoolCap(&capabilities[i])) {
137 return rtsTrue;
138 }
139 }
140 return rtsFalse;
141 }
142 #endif
143
144 /* -----------------------------------------------------------------------------
145 * Manage the returning_tasks lists.
146 *
147 * These functions require cap->lock
148 * -------------------------------------------------------------------------- */
149
150 #if defined(THREADED_RTS)
151 STATIC_INLINE void
152 newReturningTask (Capability *cap, Task *task)
153 {
154 ASSERT_LOCK_HELD(&cap->lock);
155 ASSERT(task->return_link == NULL);
156 if (cap->returning_tasks_hd) {
157 ASSERT(cap->returning_tasks_tl->return_link == NULL);
158 cap->returning_tasks_tl->return_link = task;
159 } else {
160 cap->returning_tasks_hd = task;
161 }
162 cap->returning_tasks_tl = task;
163 }
164
165 STATIC_INLINE Task *
166 popReturningTask (Capability *cap)
167 {
168 ASSERT_LOCK_HELD(&cap->lock);
169 Task *task;
170 task = cap->returning_tasks_hd;
171 ASSERT(task);
172 cap->returning_tasks_hd = task->return_link;
173 if (!cap->returning_tasks_hd) {
174 cap->returning_tasks_tl = NULL;
175 }
176 task->return_link = NULL;
177 return task;
178 }
179 #endif
180
181 /* ----------------------------------------------------------------------------
182 * Initialisation
183 *
184 * The Capability is initially marked not free.
185 * ------------------------------------------------------------------------- */
186
187 static void
188 initCapability( Capability *cap, nat i )
189 {
190 nat g;
191
192 cap->no = i;
193 cap->in_haskell = rtsFalse;
194 cap->in_gc = rtsFalse;
195
196 cap->run_queue_hd = END_TSO_QUEUE;
197 cap->run_queue_tl = END_TSO_QUEUE;
198
199 #if defined(THREADED_RTS)
200 initMutex(&cap->lock);
201 cap->running_task = NULL; // indicates cap is free
202 cap->spare_workers = NULL;
203 cap->suspended_ccalling_tasks = NULL;
204 cap->returning_tasks_hd = NULL;
205 cap->returning_tasks_tl = NULL;
206 cap->wakeup_queue_hd = END_TSO_QUEUE;
207 cap->wakeup_queue_tl = END_TSO_QUEUE;
208 cap->sparks_created = 0;
209 cap->sparks_converted = 0;
210 cap->sparks_pruned = 0;
211 #endif
212
213 cap->f.stgEagerBlackholeInfo = (W_)&__stg_EAGER_BLACKHOLE_info;
214 cap->f.stgGCEnter1 = (F_)__stg_gc_enter_1;
215 cap->f.stgGCFun = (F_)__stg_gc_fun;
216
217 cap->mut_lists = stgMallocBytes(sizeof(bdescr *) *
218 RtsFlags.GcFlags.generations,
219 "initCapability");
220 cap->saved_mut_lists = stgMallocBytes(sizeof(bdescr *) *
221 RtsFlags.GcFlags.generations,
222 "initCapability");
223
224 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
225 cap->mut_lists[g] = NULL;
226 }
227
228 cap->free_tvar_watch_queues = END_STM_WATCH_QUEUE;
229 cap->free_invariant_check_queues = END_INVARIANT_CHECK_QUEUE;
230 cap->free_trec_chunks = END_STM_CHUNK_LIST;
231 cap->free_trec_headers = NO_TREC;
232 cap->transaction_tokens = 0;
233 cap->context_switch = 0;
234 }
235
236 /* ---------------------------------------------------------------------------
237 * Function: initCapabilities()
238 *
239 * Purpose: set up the Capability handling. For the THREADED_RTS build,
240 * we keep a table of them, the size of which is
241 * controlled by the user via the RTS flag -N.
242 *
243 * ------------------------------------------------------------------------- */
244 void
245 initCapabilities( void )
246 {
247 #if defined(THREADED_RTS)
248 nat i;
249
250 #ifndef REG_Base
251 // We can't support multiple CPUs if BaseReg is not a register
252 if (RtsFlags.ParFlags.nNodes > 1) {
253 errorBelch("warning: multiple CPUs not supported in this build, reverting to 1");
254 RtsFlags.ParFlags.nNodes = 1;
255 }
256 #endif
257
258 n_capabilities = RtsFlags.ParFlags.nNodes;
259
260 if (n_capabilities == 1) {
261 capabilities = &MainCapability;
262 // THREADED_RTS must work on builds that don't have a mutable
263 // BaseReg (eg. unregisterised), so in this case
264 // capabilities[0] must coincide with &MainCapability.
265 } else {
266 capabilities = stgMallocBytes(n_capabilities * sizeof(Capability),
267 "initCapabilities");
268 }
269
270 for (i = 0; i < n_capabilities; i++) {
271 initCapability(&capabilities[i], i);
272 }
273
274 debugTrace(DEBUG_sched, "allocated %d capabilities", n_capabilities);
275
276 #else /* !THREADED_RTS */
277
278 n_capabilities = 1;
279 capabilities = &MainCapability;
280 initCapability(&MainCapability, 0);
281
282 #endif
283
284 // There are no free capabilities to begin with. We will start
285 // a worker Task to each Capability, which will quickly put the
286 // Capability on the free list when it finds nothing to do.
287 last_free_capability = &capabilities[0];
288 }
289
290 /* ----------------------------------------------------------------------------
291 * setContextSwitches: cause all capabilities to context switch as
292 * soon as possible.
293 * ------------------------------------------------------------------------- */
294
295 void setContextSwitches(void)
296 {
297 nat i;
298 for (i=0; i < n_capabilities; i++) {
299 contextSwitchCapability(&capabilities[i]);
300 }
301 }
302
303 /* ----------------------------------------------------------------------------
304 * Give a Capability to a Task. The task must currently be sleeping
305 * on its condition variable.
306 *
307 * Requires cap->lock (modifies cap->running_task).
308 *
309 * When migrating a Task, the migrater must take task->lock before
310 * modifying task->cap, to synchronise with the waking up Task.
311 * Additionally, the migrater should own the Capability (when
312 * migrating the run queue), or cap->lock (when migrating
313 * returning_workers).
314 *
315 * ------------------------------------------------------------------------- */
316
317 #if defined(THREADED_RTS)
318 STATIC_INLINE void
319 giveCapabilityToTask (Capability *cap USED_IF_DEBUG, Task *task)
320 {
321 ASSERT_LOCK_HELD(&cap->lock);
322 ASSERT(task->cap == cap);
323 debugTrace(DEBUG_sched, "passing capability %d to %s %p",
324 cap->no, task->tso ? "bound task" : "worker",
325 (void *)task->id);
326 ACQUIRE_LOCK(&task->lock);
327 task->wakeup = rtsTrue;
328 // the wakeup flag is needed because signalCondition() doesn't
329 // flag the condition if the thread is already runniing, but we want
330 // it to be sticky.
331 signalCondition(&task->cond);
332 RELEASE_LOCK(&task->lock);
333 }
334 #endif
335
336 /* ----------------------------------------------------------------------------
337 * Function: releaseCapability(Capability*)
338 *
339 * Purpose: Letting go of a capability. Causes a
340 * 'returning worker' thread or a 'waiting worker'
341 * to wake up, in that order.
342 * ------------------------------------------------------------------------- */
343
344 #if defined(THREADED_RTS)
345 void
346 releaseCapability_ (Capability* cap,
347 rtsBool always_wakeup)
348 {
349 Task *task;
350
351 task = cap->running_task;
352
353 ASSERT_PARTIAL_CAPABILITY_INVARIANTS(cap,task);
354
355 cap->running_task = NULL;
356
357 // Check to see whether a worker thread can be given
358 // the go-ahead to return the result of an external call..
359 if (cap->returning_tasks_hd != NULL) {
360 giveCapabilityToTask(cap,cap->returning_tasks_hd);
361 // The Task pops itself from the queue (see waitForReturnCapability())
362 return;
363 }
364
365 if (waiting_for_gc == PENDING_GC_SEQ) {
366 last_free_capability = cap; // needed?
367 debugTrace(DEBUG_sched, "GC pending, set capability %d free", cap->no);
368 return;
369 }
370
371
372 // If the next thread on the run queue is a bound thread,
373 // give this Capability to the appropriate Task.
374 if (!emptyRunQueue(cap) && cap->run_queue_hd->bound) {
375 // Make sure we're not about to try to wake ourselves up
376 ASSERT(task != cap->run_queue_hd->bound);
377 task = cap->run_queue_hd->bound;
378 giveCapabilityToTask(cap,task);
379 return;
380 }
381
382 if (!cap->spare_workers) {
383 // Create a worker thread if we don't have one. If the system
384 // is interrupted, we only create a worker task if there
385 // are threads that need to be completed. If the system is
386 // shutting down, we never create a new worker.
387 if (sched_state < SCHED_SHUTTING_DOWN || !emptyRunQueue(cap)) {
388 debugTrace(DEBUG_sched,
389 "starting new worker on capability %d", cap->no);
390 startWorkerTask(cap, workerStart);
391 return;
392 }
393 }
394
395 // If we have an unbound thread on the run queue, or if there's
396 // anything else to do, give the Capability to a worker thread.
397 if (always_wakeup ||
398 !emptyRunQueue(cap) || !emptyWakeupQueue(cap) ||
399 !emptySparkPoolCap(cap) || globalWorkToDo()) {
400 if (cap->spare_workers) {
401 giveCapabilityToTask(cap,cap->spare_workers);
402 // The worker Task pops itself from the queue;
403 return;
404 }
405 }
406
407 last_free_capability = cap;
408 debugTrace(DEBUG_sched, "freeing capability %d", cap->no);
409 }
410
411 void
412 releaseCapability (Capability* cap USED_IF_THREADS)
413 {
414 ACQUIRE_LOCK(&cap->lock);
415 releaseCapability_(cap, rtsFalse);
416 RELEASE_LOCK(&cap->lock);
417 }
418
419 void
420 releaseAndWakeupCapability (Capability* cap USED_IF_THREADS)
421 {
422 ACQUIRE_LOCK(&cap->lock);
423 releaseCapability_(cap, rtsTrue);
424 RELEASE_LOCK(&cap->lock);
425 }
426
427 static void
428 releaseCapabilityAndQueueWorker (Capability* cap USED_IF_THREADS)
429 {
430 Task *task;
431
432 ACQUIRE_LOCK(&cap->lock);
433
434 task = cap->running_task;
435
436 // If the current task is a worker, save it on the spare_workers
437 // list of this Capability. A worker can mark itself as stopped,
438 // in which case it is not replaced on the spare_worker queue.
439 // This happens when the system is shutting down (see
440 // Schedule.c:workerStart()).
441 // Also, be careful to check that this task hasn't just exited
442 // Haskell to do a foreign call (task->suspended_tso).
443 if (!isBoundTask(task) && !task->stopped && !task->suspended_tso) {
444 task->next = cap->spare_workers;
445 cap->spare_workers = task;
446 }
447 // Bound tasks just float around attached to their TSOs.
448
449 releaseCapability_(cap,rtsFalse);
450
451 RELEASE_LOCK(&cap->lock);
452 }
453 #endif
454
455 /* ----------------------------------------------------------------------------
456 * waitForReturnCapability( Task *task )
457 *
458 * Purpose: when an OS thread returns from an external call,
459 * it calls waitForReturnCapability() (via Schedule.resumeThread())
460 * to wait for permission to enter the RTS & communicate the
461 * result of the external call back to the Haskell thread that
462 * made it.
463 *
464 * ------------------------------------------------------------------------- */
465 void
466 waitForReturnCapability (Capability **pCap, Task *task)
467 {
468 #if !defined(THREADED_RTS)
469
470 MainCapability.running_task = task;
471 task->cap = &MainCapability;
472 *pCap = &MainCapability;
473
474 #else
475 Capability *cap = *pCap;
476
477 if (cap == NULL) {
478 // Try last_free_capability first
479 cap = last_free_capability;
480 if (!cap->running_task) {
481 nat i;
482 // otherwise, search for a free capability
483 cap = NULL;
484 for (i = 0; i < n_capabilities; i++) {
485 if (!capabilities[i].running_task) {
486 cap = &capabilities[i];
487 break;
488 }
489 }
490 if (cap == NULL) {
491 // Can't find a free one, use last_free_capability.
492 cap = last_free_capability;
493 }
494 }
495
496 // record the Capability as the one this Task is now assocated with.
497 task->cap = cap;
498
499 } else {
500 ASSERT(task->cap == cap);
501 }
502
503 ACQUIRE_LOCK(&cap->lock);
504
505 debugTrace(DEBUG_sched, "returning; I want capability %d", cap->no);
506
507 if (!cap->running_task) {
508 // It's free; just grab it
509 cap->running_task = task;
510 RELEASE_LOCK(&cap->lock);
511 } else {
512 newReturningTask(cap,task);
513 RELEASE_LOCK(&cap->lock);
514
515 for (;;) {
516 ACQUIRE_LOCK(&task->lock);
517 // task->lock held, cap->lock not held
518 if (!task->wakeup) waitCondition(&task->cond, &task->lock);
519 cap = task->cap;
520 task->wakeup = rtsFalse;
521 RELEASE_LOCK(&task->lock);
522
523 // now check whether we should wake up...
524 ACQUIRE_LOCK(&cap->lock);
525 if (cap->running_task == NULL) {
526 if (cap->returning_tasks_hd != task) {
527 giveCapabilityToTask(cap,cap->returning_tasks_hd);
528 RELEASE_LOCK(&cap->lock);
529 continue;
530 }
531 cap->running_task = task;
532 popReturningTask(cap);
533 RELEASE_LOCK(&cap->lock);
534 break;
535 }
536 RELEASE_LOCK(&cap->lock);
537 }
538
539 }
540
541 ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
542
543 debugTrace(DEBUG_sched, "resuming capability %d", cap->no);
544
545 *pCap = cap;
546 #endif
547 }
548
549 #if defined(THREADED_RTS)
550 /* ----------------------------------------------------------------------------
551 * yieldCapability
552 * ------------------------------------------------------------------------- */
553
554 void
555 yieldCapability (Capability** pCap, Task *task)
556 {
557 Capability *cap = *pCap;
558
559 if (waiting_for_gc == PENDING_GC_PAR) {
560 debugTrace(DEBUG_sched, "capability %d: becoming a GC thread", cap->no);
561 postEvent(cap, EVENT_GC_START, 0, 0);
562 gcWorkerThread(cap);
563 postEvent(cap, EVENT_GC_END, 0, 0);
564 return;
565 }
566
567 debugTrace(DEBUG_sched, "giving up capability %d", cap->no);
568
569 // We must now release the capability and wait to be woken up
570 // again.
571 task->wakeup = rtsFalse;
572 releaseCapabilityAndQueueWorker(cap);
573
574 for (;;) {
575 ACQUIRE_LOCK(&task->lock);
576 // task->lock held, cap->lock not held
577 if (!task->wakeup) waitCondition(&task->cond, &task->lock);
578 cap = task->cap;
579 task->wakeup = rtsFalse;
580 RELEASE_LOCK(&task->lock);
581
582 debugTrace(DEBUG_sched, "woken up on capability %d", cap->no);
583
584 ACQUIRE_LOCK(&cap->lock);
585 if (cap->running_task != NULL) {
586 debugTrace(DEBUG_sched,
587 "capability %d is owned by another task", cap->no);
588 RELEASE_LOCK(&cap->lock);
589 continue;
590 }
591
592 if (task->tso == NULL) {
593 ASSERT(cap->spare_workers != NULL);
594 // if we're not at the front of the queue, release it
595 // again. This is unlikely to happen.
596 if (cap->spare_workers != task) {
597 giveCapabilityToTask(cap,cap->spare_workers);
598 RELEASE_LOCK(&cap->lock);
599 continue;
600 }
601 cap->spare_workers = task->next;
602 task->next = NULL;
603 }
604 cap->running_task = task;
605 RELEASE_LOCK(&cap->lock);
606 break;
607 }
608
609 debugTrace(DEBUG_sched, "resuming capability %d", cap->no);
610 ASSERT(cap->running_task == task);
611
612 *pCap = cap;
613
614 ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
615
616 return;
617 }
618
619 /* ----------------------------------------------------------------------------
620 * Wake up a thread on a Capability.
621 *
622 * This is used when the current Task is running on a Capability and
623 * wishes to wake up a thread on a different Capability.
624 * ------------------------------------------------------------------------- */
625
626 void
627 wakeupThreadOnCapability (Capability *my_cap,
628 Capability *other_cap,
629 StgTSO *tso)
630 {
631 ACQUIRE_LOCK(&other_cap->lock);
632
633 // ASSUMES: cap->lock is held (asserted in wakeupThreadOnCapability)
634 if (tso->bound) {
635 ASSERT(tso->bound->cap == tso->cap);
636 tso->bound->cap = other_cap;
637 }
638 tso->cap = other_cap;
639
640 ASSERT(tso->bound ? tso->bound->cap == other_cap : 1);
641
642 if (other_cap->running_task == NULL) {
643 // nobody is running this Capability, we can add our thread
644 // directly onto the run queue and start up a Task to run it.
645
646 other_cap->running_task = myTask();
647 // precond for releaseCapability_() and appendToRunQueue()
648
649 appendToRunQueue(other_cap,tso);
650
651 releaseCapability_(other_cap,rtsFalse);
652 } else {
653 appendToWakeupQueue(my_cap,other_cap,tso);
654 other_cap->context_switch = 1;
655 // someone is running on this Capability, so it cannot be
656 // freed without first checking the wakeup queue (see
657 // releaseCapability_).
658 }
659
660 RELEASE_LOCK(&other_cap->lock);
661 }
662
663 /* ----------------------------------------------------------------------------
664 * prodCapability
665 *
666 * If a Capability is currently idle, wake up a Task on it. Used to
667 * get every Capability into the GC.
668 * ------------------------------------------------------------------------- */
669
670 void
671 prodCapability (Capability *cap, Task *task)
672 {
673 ACQUIRE_LOCK(&cap->lock);
674 if (!cap->running_task) {
675 cap->running_task = task;
676 releaseCapability_(cap,rtsTrue);
677 }
678 RELEASE_LOCK(&cap->lock);
679 }
680
681 /* ----------------------------------------------------------------------------
682 * shutdownCapability
683 *
684 * At shutdown time, we want to let everything exit as cleanly as
685 * possible. For each capability, we let its run queue drain, and
686 * allow the workers to stop.
687 *
688 * This function should be called when interrupted and
689 * shutting_down_scheduler = rtsTrue, thus any worker that wakes up
690 * will exit the scheduler and call taskStop(), and any bound thread
691 * that wakes up will return to its caller. Runnable threads are
692 * killed.
693 *
694 * ------------------------------------------------------------------------- */
695
696 void
697 shutdownCapability (Capability *cap, Task *task, rtsBool safe)
698 {
699 nat i;
700
701 task->cap = cap;
702
703 // Loop indefinitely until all the workers have exited and there
704 // are no Haskell threads left. We used to bail out after 50
705 // iterations of this loop, but that occasionally left a worker
706 // running which caused problems later (the closeMutex() below
707 // isn't safe, for one thing).
708
709 for (i = 0; /* i < 50 */; i++) {
710 ASSERT(sched_state == SCHED_SHUTTING_DOWN);
711
712 debugTrace(DEBUG_sched,
713 "shutting down capability %d, attempt %d", cap->no, i);
714 ACQUIRE_LOCK(&cap->lock);
715 if (cap->running_task) {
716 RELEASE_LOCK(&cap->lock);
717 debugTrace(DEBUG_sched, "not owner, yielding");
718 yieldThread();
719 continue;
720 }
721 cap->running_task = task;
722
723 if (cap->spare_workers) {
724 // Look for workers that have died without removing
725 // themselves from the list; this could happen if the OS
726 // summarily killed the thread, for example. This
727 // actually happens on Windows when the system is
728 // terminating the program, and the RTS is running in a
729 // DLL.
730 Task *t, *prev;
731 prev = NULL;
732 for (t = cap->spare_workers; t != NULL; t = t->next) {
733 if (!osThreadIsAlive(t->id)) {
734 debugTrace(DEBUG_sched,
735 "worker thread %p has died unexpectedly", (void *)t->id);
736 if (!prev) {
737 cap->spare_workers = t->next;
738 } else {
739 prev->next = t->next;
740 }
741 prev = t;
742 }
743 }
744 }
745
746 if (!emptyRunQueue(cap) || cap->spare_workers) {
747 debugTrace(DEBUG_sched,
748 "runnable threads or workers still alive, yielding");
749 releaseCapability_(cap,rtsFalse); // this will wake up a worker
750 RELEASE_LOCK(&cap->lock);
751 yieldThread();
752 continue;
753 }
754
755 // If "safe", then busy-wait for any threads currently doing
756 // foreign calls. If we're about to unload this DLL, for
757 // example, we need to be sure that there are no OS threads
758 // that will try to return to code that has been unloaded.
759 // We can be a bit more relaxed when this is a standalone
760 // program that is about to terminate, and let safe=false.
761 if (cap->suspended_ccalling_tasks && safe) {
762 debugTrace(DEBUG_sched,
763 "thread(s) are involved in foreign calls, yielding");
764 cap->running_task = NULL;
765 RELEASE_LOCK(&cap->lock);
766 yieldThread();
767 continue;
768 }
769
770 postEvent(cap, EVENT_SHUTDOWN, 0, 0);
771 debugTrace(DEBUG_sched, "capability %d is stopped.", cap->no);
772 RELEASE_LOCK(&cap->lock);
773 break;
774 }
775 // we now have the Capability, its run queue and spare workers
776 // list are both empty.
777
778 // ToDo: we can't drop this mutex, because there might still be
779 // threads performing foreign calls that will eventually try to
780 // return via resumeThread() and attempt to grab cap->lock.
781 // closeMutex(&cap->lock);
782 }
783
784 /* ----------------------------------------------------------------------------
785 * tryGrabCapability
786 *
787 * Attempt to gain control of a Capability if it is free.
788 *
789 * ------------------------------------------------------------------------- */
790
791 rtsBool
792 tryGrabCapability (Capability *cap, Task *task)
793 {
794 if (cap->running_task != NULL) return rtsFalse;
795 ACQUIRE_LOCK(&cap->lock);
796 if (cap->running_task != NULL) {
797 RELEASE_LOCK(&cap->lock);
798 return rtsFalse;
799 }
800 task->cap = cap;
801 cap->running_task = task;
802 RELEASE_LOCK(&cap->lock);
803 return rtsTrue;
804 }
805
806
807 #endif /* THREADED_RTS */
808
809 static void
810 freeCapability (Capability *cap)
811 {
812 stgFree(cap->mut_lists);
813 #if defined(THREADED_RTS) || defined(PARALLEL_HASKELL)
814 freeSparkPool(cap->sparks);
815 #endif
816 }
817
818 void
819 freeCapabilities (void)
820 {
821 #if defined(THREADED_RTS)
822 nat i;
823 for (i=0; i < n_capabilities; i++) {
824 freeCapability(&capabilities[i]);
825 }
826 #else
827 freeCapability(&MainCapability);
828 #endif
829 }
830
831 /* ---------------------------------------------------------------------------
832 Mark everything directly reachable from the Capabilities. When
833 using multiple GC threads, each GC thread marks all Capabilities
834 for which (c `mod` n == 0), for Capability c and thread n.
835 ------------------------------------------------------------------------ */
836
837 void
838 markSomeCapabilities (evac_fn evac, void *user, nat i0, nat delta,
839 rtsBool prune_sparks USED_IF_THREADS)
840 {
841 nat i;
842 Capability *cap;
843 Task *task;
844
845 // Each GC thread is responsible for following roots from the
846 // Capability of the same number. There will usually be the same
847 // or fewer Capabilities as GC threads, but just in case there
848 // are more, we mark every Capability whose number is the GC
849 // thread's index plus a multiple of the number of GC threads.
850 for (i = i0; i < n_capabilities; i += delta) {
851 cap = &capabilities[i];
852 evac(user, (StgClosure **)(void *)&cap->run_queue_hd);
853 evac(user, (StgClosure **)(void *)&cap->run_queue_tl);
854 #if defined(THREADED_RTS)
855 evac(user, (StgClosure **)(void *)&cap->wakeup_queue_hd);
856 evac(user, (StgClosure **)(void *)&cap->wakeup_queue_tl);
857 #endif
858 for (task = cap->suspended_ccalling_tasks; task != NULL;
859 task=task->next) {
860 debugTrace(DEBUG_sched,
861 "evac'ing suspended TSO %lu", (unsigned long)task->suspended_tso->id);
862 evac(user, (StgClosure **)(void *)&task->suspended_tso);
863 }
864
865 #if defined(THREADED_RTS)
866 if (prune_sparks) {
867 pruneSparkQueue (evac, user, cap);
868 } else {
869 traverseSparkQueue (evac, user, cap);
870 }
871 #endif
872 }
873
874 #if !defined(THREADED_RTS)
875 evac(user, (StgClosure **)(void *)&blocked_queue_hd);
876 evac(user, (StgClosure **)(void *)&blocked_queue_tl);
877 evac(user, (StgClosure **)(void *)&sleeping_queue);
878 #endif
879 }
880
881 void
882 markCapabilities (evac_fn evac, void *user)
883 {
884 markSomeCapabilities(evac, user, 0, 1, rtsFalse);
885 }