Unify event logging and debug tracing.
[ghc.git] / rts / Capability.c
1 /* ---------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 2003-2006
4 *
5 * Capabilities
6 *
7 * A Capability represent the token required to execute STG code,
8 * and all the state an OS thread/task needs to run Haskell code:
9 * its STG registers, a pointer to its TSO, a nursery etc. During
10 * STG execution, a pointer to the capabilitity is kept in a
11 * register (BaseReg; actually it is a pointer to cap->r).
12 *
13 * Only in an THREADED_RTS build will there be multiple capabilities,
14 * for non-threaded builds there is only one global capability, namely
15 * MainCapability.
16 *
17 * --------------------------------------------------------------------------*/
18
19 #include "PosixSource.h"
20 #include "Rts.h"
21
22 #include "Capability.h"
23 #include "Schedule.h"
24 #include "Sparks.h"
25 #include "Trace.h"
26 #include "sm/GC.h" // for gcWorkerThread()
27 #include "STM.h"
28 #include "RtsUtils.h"
29
30 // one global capability, this is the Capability for non-threaded
31 // builds, and for +RTS -N1
32 Capability MainCapability;
33
34 nat n_capabilities = 0;
35 Capability *capabilities = NULL;
36
37 // Holds the Capability which last became free. This is used so that
38 // an in-call has a chance of quickly finding a free Capability.
39 // Maintaining a global free list of Capabilities would require global
40 // locking, so we don't do that.
41 Capability *last_free_capability = NULL;
42
43 /* GC indicator, in scope for the scheduler, init'ed to false */
44 volatile StgWord waiting_for_gc = 0;
45
46 /* Let foreign code get the current Capability -- assuming there is one!
47 * This is useful for unsafe foreign calls because they are called with
48 * the current Capability held, but they are not passed it. For example,
49 * see see the integer-gmp package which calls allocateLocal() in its
50 * stgAllocForGMP() function (which gets called by gmp functions).
51 * */
52 Capability * rts_unsafeGetMyCapability (void)
53 {
54 #if defined(THREADED_RTS)
55 return myTask()->cap;
56 #else
57 return &MainCapability;
58 #endif
59 }
60
61 #if defined(THREADED_RTS)
62 STATIC_INLINE rtsBool
63 globalWorkToDo (void)
64 {
65 return blackholes_need_checking
66 || sched_state >= SCHED_INTERRUPTING
67 ;
68 }
69 #endif
70
71 #if defined(THREADED_RTS)
72 StgClosure *
73 findSpark (Capability *cap)
74 {
75 Capability *robbed;
76 StgClosurePtr spark;
77 rtsBool retry;
78 nat i = 0;
79
80 if (!emptyRunQueue(cap)) {
81 // If there are other threads, don't try to run any new
82 // sparks: sparks might be speculative, we don't want to take
83 // resources away from the main computation.
84 return 0;
85 }
86
87 // first try to get a spark from our own pool.
88 // We should be using reclaimSpark(), because it works without
89 // needing any atomic instructions:
90 // spark = reclaimSpark(cap->sparks);
91 // However, measurements show that this makes at least one benchmark
92 // slower (prsa) and doesn't affect the others.
93 spark = tryStealSpark(cap);
94 if (spark != NULL) {
95 cap->sparks_converted++;
96
97 // Post event for running a spark from capability's own pool.
98 traceSchedEvent(cap, EVENT_RUN_SPARK, cap->r.rCurrentTSO, 0);
99
100 return spark;
101 }
102
103 if (n_capabilities == 1) { return NULL; } // makes no sense...
104
105 debugTrace(DEBUG_sched,
106 "cap %d: Trying to steal work from other capabilities",
107 cap->no);
108
109 do {
110 retry = rtsFalse;
111
112 /* visit cap.s 0..n-1 in sequence until a theft succeeds. We could
113 start at a random place instead of 0 as well. */
114 for ( i=0 ; i < n_capabilities ; i++ ) {
115 robbed = &capabilities[i];
116 if (cap == robbed) // ourselves...
117 continue;
118
119 if (emptySparkPoolCap(robbed)) // nothing to steal here
120 continue;
121
122 spark = tryStealSpark(robbed);
123 if (spark == NULL && !emptySparkPoolCap(robbed)) {
124 // we conflicted with another thread while trying to steal;
125 // try again later.
126 retry = rtsTrue;
127 }
128
129 if (spark != NULL) {
130 cap->sparks_converted++;
131
132 traceSchedEvent(cap, EVENT_STEAL_SPARK,
133 cap->r.rCurrentTSO, robbed->no);
134
135 return spark;
136 }
137 // otherwise: no success, try next one
138 }
139 } while (retry);
140
141 debugTrace(DEBUG_sched, "No sparks stolen");
142 return NULL;
143 }
144
145 // Returns True if any spark pool is non-empty at this moment in time
146 // The result is only valid for an instant, of course, so in a sense
147 // is immediately invalid, and should not be relied upon for
148 // correctness.
149 rtsBool
150 anySparks (void)
151 {
152 nat i;
153
154 for (i=0; i < n_capabilities; i++) {
155 if (!emptySparkPoolCap(&capabilities[i])) {
156 return rtsTrue;
157 }
158 }
159 return rtsFalse;
160 }
161 #endif
162
163 /* -----------------------------------------------------------------------------
164 * Manage the returning_tasks lists.
165 *
166 * These functions require cap->lock
167 * -------------------------------------------------------------------------- */
168
169 #if defined(THREADED_RTS)
170 STATIC_INLINE void
171 newReturningTask (Capability *cap, Task *task)
172 {
173 ASSERT_LOCK_HELD(&cap->lock);
174 ASSERT(task->return_link == NULL);
175 if (cap->returning_tasks_hd) {
176 ASSERT(cap->returning_tasks_tl->return_link == NULL);
177 cap->returning_tasks_tl->return_link = task;
178 } else {
179 cap->returning_tasks_hd = task;
180 }
181 cap->returning_tasks_tl = task;
182 }
183
184 STATIC_INLINE Task *
185 popReturningTask (Capability *cap)
186 {
187 ASSERT_LOCK_HELD(&cap->lock);
188 Task *task;
189 task = cap->returning_tasks_hd;
190 ASSERT(task);
191 cap->returning_tasks_hd = task->return_link;
192 if (!cap->returning_tasks_hd) {
193 cap->returning_tasks_tl = NULL;
194 }
195 task->return_link = NULL;
196 return task;
197 }
198 #endif
199
200 /* ----------------------------------------------------------------------------
201 * Initialisation
202 *
203 * The Capability is initially marked not free.
204 * ------------------------------------------------------------------------- */
205
206 static void
207 initCapability( Capability *cap, nat i )
208 {
209 nat g;
210
211 cap->no = i;
212 cap->in_haskell = rtsFalse;
213 cap->in_gc = rtsFalse;
214
215 cap->run_queue_hd = END_TSO_QUEUE;
216 cap->run_queue_tl = END_TSO_QUEUE;
217
218 #if defined(THREADED_RTS)
219 initMutex(&cap->lock);
220 cap->running_task = NULL; // indicates cap is free
221 cap->spare_workers = NULL;
222 cap->suspended_ccalling_tasks = NULL;
223 cap->returning_tasks_hd = NULL;
224 cap->returning_tasks_tl = NULL;
225 cap->wakeup_queue_hd = END_TSO_QUEUE;
226 cap->wakeup_queue_tl = END_TSO_QUEUE;
227 cap->sparks_created = 0;
228 cap->sparks_converted = 0;
229 cap->sparks_pruned = 0;
230 #endif
231
232 cap->f.stgEagerBlackholeInfo = (W_)&__stg_EAGER_BLACKHOLE_info;
233 cap->f.stgGCEnter1 = (StgFunPtr)__stg_gc_enter_1;
234 cap->f.stgGCFun = (StgFunPtr)__stg_gc_fun;
235
236 cap->mut_lists = stgMallocBytes(sizeof(bdescr *) *
237 RtsFlags.GcFlags.generations,
238 "initCapability");
239 cap->saved_mut_lists = stgMallocBytes(sizeof(bdescr *) *
240 RtsFlags.GcFlags.generations,
241 "initCapability");
242
243 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
244 cap->mut_lists[g] = NULL;
245 }
246
247 cap->free_tvar_watch_queues = END_STM_WATCH_QUEUE;
248 cap->free_invariant_check_queues = END_INVARIANT_CHECK_QUEUE;
249 cap->free_trec_chunks = END_STM_CHUNK_LIST;
250 cap->free_trec_headers = NO_TREC;
251 cap->transaction_tokens = 0;
252 cap->context_switch = 0;
253 }
254
255 /* ---------------------------------------------------------------------------
256 * Function: initCapabilities()
257 *
258 * Purpose: set up the Capability handling. For the THREADED_RTS build,
259 * we keep a table of them, the size of which is
260 * controlled by the user via the RTS flag -N.
261 *
262 * ------------------------------------------------------------------------- */
263 void
264 initCapabilities( void )
265 {
266 #if defined(THREADED_RTS)
267 nat i;
268
269 #ifndef REG_Base
270 // We can't support multiple CPUs if BaseReg is not a register
271 if (RtsFlags.ParFlags.nNodes > 1) {
272 errorBelch("warning: multiple CPUs not supported in this build, reverting to 1");
273 RtsFlags.ParFlags.nNodes = 1;
274 }
275 #endif
276
277 n_capabilities = RtsFlags.ParFlags.nNodes;
278
279 if (n_capabilities == 1) {
280 capabilities = &MainCapability;
281 // THREADED_RTS must work on builds that don't have a mutable
282 // BaseReg (eg. unregisterised), so in this case
283 // capabilities[0] must coincide with &MainCapability.
284 } else {
285 capabilities = stgMallocBytes(n_capabilities * sizeof(Capability),
286 "initCapabilities");
287 }
288
289 for (i = 0; i < n_capabilities; i++) {
290 initCapability(&capabilities[i], i);
291 }
292
293 debugTrace(DEBUG_sched, "allocated %d capabilities", n_capabilities);
294
295 #else /* !THREADED_RTS */
296
297 n_capabilities = 1;
298 capabilities = &MainCapability;
299 initCapability(&MainCapability, 0);
300
301 #endif
302
303 // There are no free capabilities to begin with. We will start
304 // a worker Task to each Capability, which will quickly put the
305 // Capability on the free list when it finds nothing to do.
306 last_free_capability = &capabilities[0];
307 }
308
309 /* ----------------------------------------------------------------------------
310 * setContextSwitches: cause all capabilities to context switch as
311 * soon as possible.
312 * ------------------------------------------------------------------------- */
313
314 void setContextSwitches(void)
315 {
316 nat i;
317 for (i=0; i < n_capabilities; i++) {
318 contextSwitchCapability(&capabilities[i]);
319 }
320 }
321
322 /* ----------------------------------------------------------------------------
323 * Give a Capability to a Task. The task must currently be sleeping
324 * on its condition variable.
325 *
326 * Requires cap->lock (modifies cap->running_task).
327 *
328 * When migrating a Task, the migrater must take task->lock before
329 * modifying task->cap, to synchronise with the waking up Task.
330 * Additionally, the migrater should own the Capability (when
331 * migrating the run queue), or cap->lock (when migrating
332 * returning_workers).
333 *
334 * ------------------------------------------------------------------------- */
335
336 #if defined(THREADED_RTS)
337 STATIC_INLINE void
338 giveCapabilityToTask (Capability *cap USED_IF_DEBUG, Task *task)
339 {
340 ASSERT_LOCK_HELD(&cap->lock);
341 ASSERT(task->cap == cap);
342 debugTrace(DEBUG_sched, "passing capability %d to %s %p",
343 cap->no, task->tso ? "bound task" : "worker",
344 (void *)task->id);
345 ACQUIRE_LOCK(&task->lock);
346 task->wakeup = rtsTrue;
347 // the wakeup flag is needed because signalCondition() doesn't
348 // flag the condition if the thread is already runniing, but we want
349 // it to be sticky.
350 signalCondition(&task->cond);
351 RELEASE_LOCK(&task->lock);
352 }
353 #endif
354
355 /* ----------------------------------------------------------------------------
356 * Function: releaseCapability(Capability*)
357 *
358 * Purpose: Letting go of a capability. Causes a
359 * 'returning worker' thread or a 'waiting worker'
360 * to wake up, in that order.
361 * ------------------------------------------------------------------------- */
362
363 #if defined(THREADED_RTS)
364 void
365 releaseCapability_ (Capability* cap,
366 rtsBool always_wakeup)
367 {
368 Task *task;
369
370 task = cap->running_task;
371
372 ASSERT_PARTIAL_CAPABILITY_INVARIANTS(cap,task);
373
374 cap->running_task = NULL;
375
376 // Check to see whether a worker thread can be given
377 // the go-ahead to return the result of an external call..
378 if (cap->returning_tasks_hd != NULL) {
379 giveCapabilityToTask(cap,cap->returning_tasks_hd);
380 // The Task pops itself from the queue (see waitForReturnCapability())
381 return;
382 }
383
384 if (waiting_for_gc == PENDING_GC_SEQ) {
385 last_free_capability = cap; // needed?
386 debugTrace(DEBUG_sched, "GC pending, set capability %d free", cap->no);
387 return;
388 }
389
390
391 // If the next thread on the run queue is a bound thread,
392 // give this Capability to the appropriate Task.
393 if (!emptyRunQueue(cap) && cap->run_queue_hd->bound) {
394 // Make sure we're not about to try to wake ourselves up
395 ASSERT(task != cap->run_queue_hd->bound);
396 task = cap->run_queue_hd->bound;
397 giveCapabilityToTask(cap,task);
398 return;
399 }
400
401 if (!cap->spare_workers) {
402 // Create a worker thread if we don't have one. If the system
403 // is interrupted, we only create a worker task if there
404 // are threads that need to be completed. If the system is
405 // shutting down, we never create a new worker.
406 if (sched_state < SCHED_SHUTTING_DOWN || !emptyRunQueue(cap)) {
407 debugTrace(DEBUG_sched,
408 "starting new worker on capability %d", cap->no);
409 startWorkerTask(cap, workerStart);
410 return;
411 }
412 }
413
414 // If we have an unbound thread on the run queue, or if there's
415 // anything else to do, give the Capability to a worker thread.
416 if (always_wakeup ||
417 !emptyRunQueue(cap) || !emptyWakeupQueue(cap) ||
418 !emptySparkPoolCap(cap) || globalWorkToDo()) {
419 if (cap->spare_workers) {
420 giveCapabilityToTask(cap,cap->spare_workers);
421 // The worker Task pops itself from the queue;
422 return;
423 }
424 }
425
426 last_free_capability = cap;
427 debugTrace(DEBUG_sched, "freeing capability %d", cap->no);
428 }
429
430 void
431 releaseCapability (Capability* cap USED_IF_THREADS)
432 {
433 ACQUIRE_LOCK(&cap->lock);
434 releaseCapability_(cap, rtsFalse);
435 RELEASE_LOCK(&cap->lock);
436 }
437
438 void
439 releaseAndWakeupCapability (Capability* cap USED_IF_THREADS)
440 {
441 ACQUIRE_LOCK(&cap->lock);
442 releaseCapability_(cap, rtsTrue);
443 RELEASE_LOCK(&cap->lock);
444 }
445
446 static void
447 releaseCapabilityAndQueueWorker (Capability* cap USED_IF_THREADS)
448 {
449 Task *task;
450
451 ACQUIRE_LOCK(&cap->lock);
452
453 task = cap->running_task;
454
455 // If the current task is a worker, save it on the spare_workers
456 // list of this Capability. A worker can mark itself as stopped,
457 // in which case it is not replaced on the spare_worker queue.
458 // This happens when the system is shutting down (see
459 // Schedule.c:workerStart()).
460 // Also, be careful to check that this task hasn't just exited
461 // Haskell to do a foreign call (task->suspended_tso).
462 if (!isBoundTask(task) && !task->stopped && !task->suspended_tso) {
463 task->next = cap->spare_workers;
464 cap->spare_workers = task;
465 }
466 // Bound tasks just float around attached to their TSOs.
467
468 releaseCapability_(cap,rtsFalse);
469
470 RELEASE_LOCK(&cap->lock);
471 }
472 #endif
473
474 /* ----------------------------------------------------------------------------
475 * waitForReturnCapability( Task *task )
476 *
477 * Purpose: when an OS thread returns from an external call,
478 * it calls waitForReturnCapability() (via Schedule.resumeThread())
479 * to wait for permission to enter the RTS & communicate the
480 * result of the external call back to the Haskell thread that
481 * made it.
482 *
483 * ------------------------------------------------------------------------- */
484 void
485 waitForReturnCapability (Capability **pCap, Task *task)
486 {
487 #if !defined(THREADED_RTS)
488
489 MainCapability.running_task = task;
490 task->cap = &MainCapability;
491 *pCap = &MainCapability;
492
493 #else
494 Capability *cap = *pCap;
495
496 if (cap == NULL) {
497 // Try last_free_capability first
498 cap = last_free_capability;
499 if (cap->running_task) {
500 nat i;
501 // otherwise, search for a free capability
502 cap = NULL;
503 for (i = 0; i < n_capabilities; i++) {
504 if (!capabilities[i].running_task) {
505 cap = &capabilities[i];
506 break;
507 }
508 }
509 if (cap == NULL) {
510 // Can't find a free one, use last_free_capability.
511 cap = last_free_capability;
512 }
513 }
514
515 // record the Capability as the one this Task is now assocated with.
516 task->cap = cap;
517
518 } else {
519 ASSERT(task->cap == cap);
520 }
521
522 ACQUIRE_LOCK(&cap->lock);
523
524 debugTrace(DEBUG_sched, "returning; I want capability %d", cap->no);
525
526 if (!cap->running_task) {
527 // It's free; just grab it
528 cap->running_task = task;
529 RELEASE_LOCK(&cap->lock);
530 } else {
531 newReturningTask(cap,task);
532 RELEASE_LOCK(&cap->lock);
533
534 for (;;) {
535 ACQUIRE_LOCK(&task->lock);
536 // task->lock held, cap->lock not held
537 if (!task->wakeup) waitCondition(&task->cond, &task->lock);
538 cap = task->cap;
539 task->wakeup = rtsFalse;
540 RELEASE_LOCK(&task->lock);
541
542 // now check whether we should wake up...
543 ACQUIRE_LOCK(&cap->lock);
544 if (cap->running_task == NULL) {
545 if (cap->returning_tasks_hd != task) {
546 giveCapabilityToTask(cap,cap->returning_tasks_hd);
547 RELEASE_LOCK(&cap->lock);
548 continue;
549 }
550 cap->running_task = task;
551 popReturningTask(cap);
552 RELEASE_LOCK(&cap->lock);
553 break;
554 }
555 RELEASE_LOCK(&cap->lock);
556 }
557
558 }
559
560 ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
561
562 debugTrace(DEBUG_sched, "resuming capability %d", cap->no);
563
564 *pCap = cap;
565 #endif
566 }
567
568 #if defined(THREADED_RTS)
569 /* ----------------------------------------------------------------------------
570 * yieldCapability
571 * ------------------------------------------------------------------------- */
572
573 void
574 yieldCapability (Capability** pCap, Task *task)
575 {
576 Capability *cap = *pCap;
577
578 if (waiting_for_gc == PENDING_GC_PAR) {
579 traceSchedEvent(cap, EVENT_GC_START, 0, 0);
580 gcWorkerThread(cap);
581 traceSchedEvent(cap, EVENT_GC_END, 0, 0);
582 return;
583 }
584
585 debugTrace(DEBUG_sched, "giving up capability %d", cap->no);
586
587 // We must now release the capability and wait to be woken up
588 // again.
589 task->wakeup = rtsFalse;
590 releaseCapabilityAndQueueWorker(cap);
591
592 for (;;) {
593 ACQUIRE_LOCK(&task->lock);
594 // task->lock held, cap->lock not held
595 if (!task->wakeup) waitCondition(&task->cond, &task->lock);
596 cap = task->cap;
597 task->wakeup = rtsFalse;
598 RELEASE_LOCK(&task->lock);
599
600 debugTrace(DEBUG_sched, "woken up on capability %d", cap->no);
601
602 ACQUIRE_LOCK(&cap->lock);
603 if (cap->running_task != NULL) {
604 debugTrace(DEBUG_sched,
605 "capability %d is owned by another task", cap->no);
606 RELEASE_LOCK(&cap->lock);
607 continue;
608 }
609
610 if (task->tso == NULL) {
611 ASSERT(cap->spare_workers != NULL);
612 // if we're not at the front of the queue, release it
613 // again. This is unlikely to happen.
614 if (cap->spare_workers != task) {
615 giveCapabilityToTask(cap,cap->spare_workers);
616 RELEASE_LOCK(&cap->lock);
617 continue;
618 }
619 cap->spare_workers = task->next;
620 task->next = NULL;
621 }
622 cap->running_task = task;
623 RELEASE_LOCK(&cap->lock);
624 break;
625 }
626
627 debugTrace(DEBUG_sched, "resuming capability %d", cap->no);
628 ASSERT(cap->running_task == task);
629
630 *pCap = cap;
631
632 ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
633
634 return;
635 }
636
637 /* ----------------------------------------------------------------------------
638 * Wake up a thread on a Capability.
639 *
640 * This is used when the current Task is running on a Capability and
641 * wishes to wake up a thread on a different Capability.
642 * ------------------------------------------------------------------------- */
643
644 void
645 wakeupThreadOnCapability (Capability *my_cap,
646 Capability *other_cap,
647 StgTSO *tso)
648 {
649 ACQUIRE_LOCK(&other_cap->lock);
650
651 // ASSUMES: cap->lock is held (asserted in wakeupThreadOnCapability)
652 if (tso->bound) {
653 ASSERT(tso->bound->cap == tso->cap);
654 tso->bound->cap = other_cap;
655 }
656 tso->cap = other_cap;
657
658 ASSERT(tso->bound ? tso->bound->cap == other_cap : 1);
659
660 if (other_cap->running_task == NULL) {
661 // nobody is running this Capability, we can add our thread
662 // directly onto the run queue and start up a Task to run it.
663
664 other_cap->running_task = myTask();
665 // precond for releaseCapability_() and appendToRunQueue()
666
667 appendToRunQueue(other_cap,tso);
668
669 releaseCapability_(other_cap,rtsFalse);
670 } else {
671 appendToWakeupQueue(my_cap,other_cap,tso);
672 other_cap->context_switch = 1;
673 // someone is running on this Capability, so it cannot be
674 // freed without first checking the wakeup queue (see
675 // releaseCapability_).
676 }
677
678 RELEASE_LOCK(&other_cap->lock);
679 }
680
681 /* ----------------------------------------------------------------------------
682 * prodCapability
683 *
684 * If a Capability is currently idle, wake up a Task on it. Used to
685 * get every Capability into the GC.
686 * ------------------------------------------------------------------------- */
687
688 void
689 prodCapability (Capability *cap, Task *task)
690 {
691 ACQUIRE_LOCK(&cap->lock);
692 if (!cap->running_task) {
693 cap->running_task = task;
694 releaseCapability_(cap,rtsTrue);
695 }
696 RELEASE_LOCK(&cap->lock);
697 }
698
699 /* ----------------------------------------------------------------------------
700 * shutdownCapability
701 *
702 * At shutdown time, we want to let everything exit as cleanly as
703 * possible. For each capability, we let its run queue drain, and
704 * allow the workers to stop.
705 *
706 * This function should be called when interrupted and
707 * shutting_down_scheduler = rtsTrue, thus any worker that wakes up
708 * will exit the scheduler and call taskStop(), and any bound thread
709 * that wakes up will return to its caller. Runnable threads are
710 * killed.
711 *
712 * ------------------------------------------------------------------------- */
713
714 void
715 shutdownCapability (Capability *cap, Task *task, rtsBool safe)
716 {
717 nat i;
718
719 task->cap = cap;
720
721 // Loop indefinitely until all the workers have exited and there
722 // are no Haskell threads left. We used to bail out after 50
723 // iterations of this loop, but that occasionally left a worker
724 // running which caused problems later (the closeMutex() below
725 // isn't safe, for one thing).
726
727 for (i = 0; /* i < 50 */; i++) {
728 ASSERT(sched_state == SCHED_SHUTTING_DOWN);
729
730 debugTrace(DEBUG_sched,
731 "shutting down capability %d, attempt %d", cap->no, i);
732 ACQUIRE_LOCK(&cap->lock);
733 if (cap->running_task) {
734 RELEASE_LOCK(&cap->lock);
735 debugTrace(DEBUG_sched, "not owner, yielding");
736 yieldThread();
737 continue;
738 }
739 cap->running_task = task;
740
741 if (cap->spare_workers) {
742 // Look for workers that have died without removing
743 // themselves from the list; this could happen if the OS
744 // summarily killed the thread, for example. This
745 // actually happens on Windows when the system is
746 // terminating the program, and the RTS is running in a
747 // DLL.
748 Task *t, *prev;
749 prev = NULL;
750 for (t = cap->spare_workers; t != NULL; t = t->next) {
751 if (!osThreadIsAlive(t->id)) {
752 debugTrace(DEBUG_sched,
753 "worker thread %p has died unexpectedly", (void *)t->id);
754 if (!prev) {
755 cap->spare_workers = t->next;
756 } else {
757 prev->next = t->next;
758 }
759 prev = t;
760 }
761 }
762 }
763
764 if (!emptyRunQueue(cap) || cap->spare_workers) {
765 debugTrace(DEBUG_sched,
766 "runnable threads or workers still alive, yielding");
767 releaseCapability_(cap,rtsFalse); // this will wake up a worker
768 RELEASE_LOCK(&cap->lock);
769 yieldThread();
770 continue;
771 }
772
773 // If "safe", then busy-wait for any threads currently doing
774 // foreign calls. If we're about to unload this DLL, for
775 // example, we need to be sure that there are no OS threads
776 // that will try to return to code that has been unloaded.
777 // We can be a bit more relaxed when this is a standalone
778 // program that is about to terminate, and let safe=false.
779 if (cap->suspended_ccalling_tasks && safe) {
780 debugTrace(DEBUG_sched,
781 "thread(s) are involved in foreign calls, yielding");
782 cap->running_task = NULL;
783 RELEASE_LOCK(&cap->lock);
784 yieldThread();
785 continue;
786 }
787
788 traceSchedEvent(cap, EVENT_SHUTDOWN, 0, 0);
789 RELEASE_LOCK(&cap->lock);
790 break;
791 }
792 // we now have the Capability, its run queue and spare workers
793 // list are both empty.
794
795 // ToDo: we can't drop this mutex, because there might still be
796 // threads performing foreign calls that will eventually try to
797 // return via resumeThread() and attempt to grab cap->lock.
798 // closeMutex(&cap->lock);
799 }
800
801 /* ----------------------------------------------------------------------------
802 * tryGrabCapability
803 *
804 * Attempt to gain control of a Capability if it is free.
805 *
806 * ------------------------------------------------------------------------- */
807
808 rtsBool
809 tryGrabCapability (Capability *cap, Task *task)
810 {
811 if (cap->running_task != NULL) return rtsFalse;
812 ACQUIRE_LOCK(&cap->lock);
813 if (cap->running_task != NULL) {
814 RELEASE_LOCK(&cap->lock);
815 return rtsFalse;
816 }
817 task->cap = cap;
818 cap->running_task = task;
819 RELEASE_LOCK(&cap->lock);
820 return rtsTrue;
821 }
822
823
824 #endif /* THREADED_RTS */
825
826 static void
827 freeCapability (Capability *cap)
828 {
829 stgFree(cap->mut_lists);
830 #if defined(THREADED_RTS)
831 freeSparkPool(cap->sparks);
832 #endif
833 }
834
835 void
836 freeCapabilities (void)
837 {
838 #if defined(THREADED_RTS)
839 nat i;
840 for (i=0; i < n_capabilities; i++) {
841 freeCapability(&capabilities[i]);
842 }
843 #else
844 freeCapability(&MainCapability);
845 #endif
846 }
847
848 /* ---------------------------------------------------------------------------
849 Mark everything directly reachable from the Capabilities. When
850 using multiple GC threads, each GC thread marks all Capabilities
851 for which (c `mod` n == 0), for Capability c and thread n.
852 ------------------------------------------------------------------------ */
853
854 void
855 markSomeCapabilities (evac_fn evac, void *user, nat i0, nat delta,
856 rtsBool prune_sparks USED_IF_THREADS)
857 {
858 nat i;
859 Capability *cap;
860 Task *task;
861
862 // Each GC thread is responsible for following roots from the
863 // Capability of the same number. There will usually be the same
864 // or fewer Capabilities as GC threads, but just in case there
865 // are more, we mark every Capability whose number is the GC
866 // thread's index plus a multiple of the number of GC threads.
867 for (i = i0; i < n_capabilities; i += delta) {
868 cap = &capabilities[i];
869 evac(user, (StgClosure **)(void *)&cap->run_queue_hd);
870 evac(user, (StgClosure **)(void *)&cap->run_queue_tl);
871 #if defined(THREADED_RTS)
872 evac(user, (StgClosure **)(void *)&cap->wakeup_queue_hd);
873 evac(user, (StgClosure **)(void *)&cap->wakeup_queue_tl);
874 #endif
875 for (task = cap->suspended_ccalling_tasks; task != NULL;
876 task=task->next) {
877 evac(user, (StgClosure **)(void *)&task->suspended_tso);
878 }
879
880 #if defined(THREADED_RTS)
881 if (prune_sparks) {
882 pruneSparkQueue (evac, user, cap);
883 } else {
884 traverseSparkQueue (evac, user, cap);
885 }
886 #endif
887 }
888
889 #if !defined(THREADED_RTS)
890 evac(user, (StgClosure **)(void *)&blocked_queue_hd);
891 evac(user, (StgClosure **)(void *)&blocked_queue_tl);
892 evac(user, (StgClosure **)(void *)&sleeping_queue);
893 #endif
894 }
895
896 void
897 markCapabilities (evac_fn evac, void *user)
898 {
899 markSomeCapabilities(evac, user, 0, 1, rtsFalse);
900 }