Merge branch 'master' of http://darcs.haskell.org/ghc into ghc-generics
[ghc.git] / rts / Capability.c
1 /* ---------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 2003-2006
4 *
5 * Capabilities
6 *
7 * A Capability represent the token required to execute STG code,
8 * and all the state an OS thread/task needs to run Haskell code:
9 * its STG registers, a pointer to its TSO, a nursery etc. During
10 * STG execution, a pointer to the capabilitity is kept in a
11 * register (BaseReg; actually it is a pointer to cap->r).
12 *
13 * Only in an THREADED_RTS build will there be multiple capabilities,
14 * for non-threaded builds there is only one global capability, namely
15 * MainCapability.
16 *
17 * --------------------------------------------------------------------------*/
18
19 #include "PosixSource.h"
20 #include "Rts.h"
21
22 #include "Capability.h"
23 #include "Schedule.h"
24 #include "Sparks.h"
25 #include "Trace.h"
26 #include "sm/GC.h" // for gcWorkerThread()
27 #include "STM.h"
28 #include "RtsUtils.h"
29
30 // one global capability, this is the Capability for non-threaded
31 // builds, and for +RTS -N1
32 Capability MainCapability;
33
34 nat n_capabilities = 0;
35 Capability *capabilities = NULL;
36
37 // Holds the Capability which last became free. This is used so that
38 // an in-call has a chance of quickly finding a free Capability.
39 // Maintaining a global free list of Capabilities would require global
40 // locking, so we don't do that.
41 Capability *last_free_capability = NULL;
42
43 /* GC indicator, in scope for the scheduler, init'ed to false */
44 volatile StgWord waiting_for_gc = 0;
45
46 /* Let foreign code get the current Capability -- assuming there is one!
47 * This is useful for unsafe foreign calls because they are called with
48 * the current Capability held, but they are not passed it. For example,
49 * see see the integer-gmp package which calls allocateLocal() in its
50 * stgAllocForGMP() function (which gets called by gmp functions).
51 * */
52 Capability * rts_unsafeGetMyCapability (void)
53 {
54 #if defined(THREADED_RTS)
55 return myTask()->cap;
56 #else
57 return &MainCapability;
58 #endif
59 }
60
61 #if defined(THREADED_RTS)
62 STATIC_INLINE rtsBool
63 globalWorkToDo (void)
64 {
65 return sched_state >= SCHED_INTERRUPTING
66 || recent_activity == ACTIVITY_INACTIVE; // need to check for deadlock
67 }
68 #endif
69
70 #if defined(THREADED_RTS)
71 StgClosure *
72 findSpark (Capability *cap)
73 {
74 Capability *robbed;
75 StgClosurePtr spark;
76 rtsBool retry;
77 nat i = 0;
78
79 if (!emptyRunQueue(cap) || cap->returning_tasks_hd != NULL) {
80 // If there are other threads, don't try to run any new
81 // sparks: sparks might be speculative, we don't want to take
82 // resources away from the main computation.
83 return 0;
84 }
85
86 do {
87 retry = rtsFalse;
88
89 // first try to get a spark from our own pool.
90 // We should be using reclaimSpark(), because it works without
91 // needing any atomic instructions:
92 // spark = reclaimSpark(cap->sparks);
93 // However, measurements show that this makes at least one benchmark
94 // slower (prsa) and doesn't affect the others.
95 spark = tryStealSpark(cap);
96 if (spark != NULL) {
97 cap->sparks_converted++;
98
99 // Post event for running a spark from capability's own pool.
100 traceEventRunSpark(cap, cap->r.rCurrentTSO);
101
102 return spark;
103 }
104 if (!emptySparkPoolCap(cap)) {
105 retry = rtsTrue;
106 }
107
108 if (n_capabilities == 1) { return NULL; } // makes no sense...
109
110 debugTrace(DEBUG_sched,
111 "cap %d: Trying to steal work from other capabilities",
112 cap->no);
113
114 /* visit cap.s 0..n-1 in sequence until a theft succeeds. We could
115 start at a random place instead of 0 as well. */
116 for ( i=0 ; i < n_capabilities ; i++ ) {
117 robbed = &capabilities[i];
118 if (cap == robbed) // ourselves...
119 continue;
120
121 if (emptySparkPoolCap(robbed)) // nothing to steal here
122 continue;
123
124 spark = tryStealSpark(robbed);
125 if (spark == NULL && !emptySparkPoolCap(robbed)) {
126 // we conflicted with another thread while trying to steal;
127 // try again later.
128 retry = rtsTrue;
129 }
130
131 if (spark != NULL) {
132 cap->sparks_converted++;
133
134 traceEventStealSpark(cap, cap->r.rCurrentTSO, robbed->no);
135
136 return spark;
137 }
138 // otherwise: no success, try next one
139 }
140 } while (retry);
141
142 debugTrace(DEBUG_sched, "No sparks stolen");
143 return NULL;
144 }
145
146 // Returns True if any spark pool is non-empty at this moment in time
147 // The result is only valid for an instant, of course, so in a sense
148 // is immediately invalid, and should not be relied upon for
149 // correctness.
150 rtsBool
151 anySparks (void)
152 {
153 nat i;
154
155 for (i=0; i < n_capabilities; i++) {
156 if (!emptySparkPoolCap(&capabilities[i])) {
157 return rtsTrue;
158 }
159 }
160 return rtsFalse;
161 }
162 #endif
163
164 /* -----------------------------------------------------------------------------
165 * Manage the returning_tasks lists.
166 *
167 * These functions require cap->lock
168 * -------------------------------------------------------------------------- */
169
170 #if defined(THREADED_RTS)
171 STATIC_INLINE void
172 newReturningTask (Capability *cap, Task *task)
173 {
174 ASSERT_LOCK_HELD(&cap->lock);
175 ASSERT(task->next == NULL);
176 if (cap->returning_tasks_hd) {
177 ASSERT(cap->returning_tasks_tl->next == NULL);
178 cap->returning_tasks_tl->next = task;
179 } else {
180 cap->returning_tasks_hd = task;
181 }
182 cap->returning_tasks_tl = task;
183 }
184
185 STATIC_INLINE Task *
186 popReturningTask (Capability *cap)
187 {
188 ASSERT_LOCK_HELD(&cap->lock);
189 Task *task;
190 task = cap->returning_tasks_hd;
191 ASSERT(task);
192 cap->returning_tasks_hd = task->next;
193 if (!cap->returning_tasks_hd) {
194 cap->returning_tasks_tl = NULL;
195 }
196 task->next = NULL;
197 return task;
198 }
199 #endif
200
201 /* ----------------------------------------------------------------------------
202 * Initialisation
203 *
204 * The Capability is initially marked not free.
205 * ------------------------------------------------------------------------- */
206
207 static void
208 initCapability( Capability *cap, nat i )
209 {
210 nat g;
211
212 cap->no = i;
213 cap->in_haskell = rtsFalse;
214
215 cap->run_queue_hd = END_TSO_QUEUE;
216 cap->run_queue_tl = END_TSO_QUEUE;
217
218 #if defined(THREADED_RTS)
219 initMutex(&cap->lock);
220 cap->running_task = NULL; // indicates cap is free
221 cap->spare_workers = NULL;
222 cap->n_spare_workers = 0;
223 cap->suspended_ccalls = NULL;
224 cap->returning_tasks_hd = NULL;
225 cap->returning_tasks_tl = NULL;
226 cap->inbox = (Message*)END_TSO_QUEUE;
227 cap->sparks_created = 0;
228 cap->sparks_dud = 0;
229 cap->sparks_converted = 0;
230 cap->sparks_gcd = 0;
231 cap->sparks_fizzled = 0;
232 #endif
233
234 cap->f.stgEagerBlackholeInfo = (W_)&__stg_EAGER_BLACKHOLE_info;
235 cap->f.stgGCEnter1 = (StgFunPtr)__stg_gc_enter_1;
236 cap->f.stgGCFun = (StgFunPtr)__stg_gc_fun;
237
238 cap->mut_lists = stgMallocBytes(sizeof(bdescr *) *
239 RtsFlags.GcFlags.generations,
240 "initCapability");
241 cap->saved_mut_lists = stgMallocBytes(sizeof(bdescr *) *
242 RtsFlags.GcFlags.generations,
243 "initCapability");
244
245 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
246 cap->mut_lists[g] = NULL;
247 }
248
249 cap->free_tvar_watch_queues = END_STM_WATCH_QUEUE;
250 cap->free_invariant_check_queues = END_INVARIANT_CHECK_QUEUE;
251 cap->free_trec_chunks = END_STM_CHUNK_LIST;
252 cap->free_trec_headers = NO_TREC;
253 cap->transaction_tokens = 0;
254 cap->context_switch = 0;
255 cap->pinned_object_block = NULL;
256
257 traceCapsetAssignCap(CAPSET_OSPROCESS_DEFAULT, i);
258 }
259
260 /* ---------------------------------------------------------------------------
261 * Function: initCapabilities()
262 *
263 * Purpose: set up the Capability handling. For the THREADED_RTS build,
264 * we keep a table of them, the size of which is
265 * controlled by the user via the RTS flag -N.
266 *
267 * ------------------------------------------------------------------------- */
268 void
269 initCapabilities( void )
270 {
271
272 #if defined(THREADED_RTS)
273 nat i;
274
275 #ifndef REG_Base
276 // We can't support multiple CPUs if BaseReg is not a register
277 if (RtsFlags.ParFlags.nNodes > 1) {
278 errorBelch("warning: multiple CPUs not supported in this build, reverting to 1");
279 RtsFlags.ParFlags.nNodes = 1;
280 }
281 #endif
282
283 n_capabilities = RtsFlags.ParFlags.nNodes;
284
285 if (n_capabilities == 1) {
286 capabilities = &MainCapability;
287 // THREADED_RTS must work on builds that don't have a mutable
288 // BaseReg (eg. unregisterised), so in this case
289 // capabilities[0] must coincide with &MainCapability.
290 } else {
291 capabilities = stgMallocBytes(n_capabilities * sizeof(Capability),
292 "initCapabilities");
293 }
294
295 for (i = 0; i < n_capabilities; i++) {
296 initCapability(&capabilities[i], i);
297 }
298
299 debugTrace(DEBUG_sched, "allocated %d capabilities", n_capabilities);
300
301 #else /* !THREADED_RTS */
302
303 n_capabilities = 1;
304 capabilities = &MainCapability;
305 initCapability(&MainCapability, 0);
306
307 #endif
308
309 // There are no free capabilities to begin with. We will start
310 // a worker Task to each Capability, which will quickly put the
311 // Capability on the free list when it finds nothing to do.
312 last_free_capability = &capabilities[0];
313 }
314
315 /* ----------------------------------------------------------------------------
316 * setContextSwitches: cause all capabilities to context switch as
317 * soon as possible.
318 * ------------------------------------------------------------------------- */
319
320 void setContextSwitches(void)
321 {
322 nat i;
323 for (i=0; i < n_capabilities; i++) {
324 contextSwitchCapability(&capabilities[i]);
325 }
326 }
327
328 /* ----------------------------------------------------------------------------
329 * Give a Capability to a Task. The task must currently be sleeping
330 * on its condition variable.
331 *
332 * Requires cap->lock (modifies cap->running_task).
333 *
334 * When migrating a Task, the migrater must take task->lock before
335 * modifying task->cap, to synchronise with the waking up Task.
336 * Additionally, the migrater should own the Capability (when
337 * migrating the run queue), or cap->lock (when migrating
338 * returning_workers).
339 *
340 * ------------------------------------------------------------------------- */
341
342 #if defined(THREADED_RTS)
343 STATIC_INLINE void
344 giveCapabilityToTask (Capability *cap USED_IF_DEBUG, Task *task)
345 {
346 ASSERT_LOCK_HELD(&cap->lock);
347 ASSERT(task->cap == cap);
348 debugTrace(DEBUG_sched, "passing capability %d to %s %p",
349 cap->no, task->incall->tso ? "bound task" : "worker",
350 (void *)task->id);
351 ACQUIRE_LOCK(&task->lock);
352 task->wakeup = rtsTrue;
353 // the wakeup flag is needed because signalCondition() doesn't
354 // flag the condition if the thread is already runniing, but we want
355 // it to be sticky.
356 signalCondition(&task->cond);
357 RELEASE_LOCK(&task->lock);
358 }
359 #endif
360
361 /* ----------------------------------------------------------------------------
362 * Function: releaseCapability(Capability*)
363 *
364 * Purpose: Letting go of a capability. Causes a
365 * 'returning worker' thread or a 'waiting worker'
366 * to wake up, in that order.
367 * ------------------------------------------------------------------------- */
368
369 #if defined(THREADED_RTS)
370 void
371 releaseCapability_ (Capability* cap,
372 rtsBool always_wakeup)
373 {
374 Task *task;
375
376 task = cap->running_task;
377
378 ASSERT_PARTIAL_CAPABILITY_INVARIANTS(cap,task);
379
380 cap->running_task = NULL;
381
382 // Check to see whether a worker thread can be given
383 // the go-ahead to return the result of an external call..
384 if (cap->returning_tasks_hd != NULL) {
385 giveCapabilityToTask(cap,cap->returning_tasks_hd);
386 // The Task pops itself from the queue (see waitForReturnCapability())
387 return;
388 }
389
390 if (waiting_for_gc == PENDING_GC_SEQ) {
391 last_free_capability = cap; // needed?
392 debugTrace(DEBUG_sched, "GC pending, set capability %d free", cap->no);
393 return;
394 }
395
396
397 // If the next thread on the run queue is a bound thread,
398 // give this Capability to the appropriate Task.
399 if (!emptyRunQueue(cap) && cap->run_queue_hd->bound) {
400 // Make sure we're not about to try to wake ourselves up
401 // ASSERT(task != cap->run_queue_hd->bound);
402 // assertion is false: in schedule() we force a yield after
403 // ThreadBlocked, but the thread may be back on the run queue
404 // by now.
405 task = cap->run_queue_hd->bound->task;
406 giveCapabilityToTask(cap,task);
407 return;
408 }
409
410 if (!cap->spare_workers) {
411 // Create a worker thread if we don't have one. If the system
412 // is interrupted, we only create a worker task if there
413 // are threads that need to be completed. If the system is
414 // shutting down, we never create a new worker.
415 if (sched_state < SCHED_SHUTTING_DOWN || !emptyRunQueue(cap)) {
416 debugTrace(DEBUG_sched,
417 "starting new worker on capability %d", cap->no);
418 startWorkerTask(cap);
419 return;
420 }
421 }
422
423 // If we have an unbound thread on the run queue, or if there's
424 // anything else to do, give the Capability to a worker thread.
425 if (always_wakeup ||
426 !emptyRunQueue(cap) || !emptyInbox(cap) ||
427 !emptySparkPoolCap(cap) || globalWorkToDo()) {
428 if (cap->spare_workers) {
429 giveCapabilityToTask(cap,cap->spare_workers);
430 // The worker Task pops itself from the queue;
431 return;
432 }
433 }
434
435 last_free_capability = cap;
436 debugTrace(DEBUG_sched, "freeing capability %d", cap->no);
437 }
438
439 void
440 releaseCapability (Capability* cap USED_IF_THREADS)
441 {
442 ACQUIRE_LOCK(&cap->lock);
443 releaseCapability_(cap, rtsFalse);
444 RELEASE_LOCK(&cap->lock);
445 }
446
447 void
448 releaseAndWakeupCapability (Capability* cap USED_IF_THREADS)
449 {
450 ACQUIRE_LOCK(&cap->lock);
451 releaseCapability_(cap, rtsTrue);
452 RELEASE_LOCK(&cap->lock);
453 }
454
455 static void
456 releaseCapabilityAndQueueWorker (Capability* cap USED_IF_THREADS)
457 {
458 Task *task;
459
460 ACQUIRE_LOCK(&cap->lock);
461
462 task = cap->running_task;
463
464 // If the Task is stopped, we shouldn't be yielding, we should
465 // be just exiting.
466 ASSERT(!task->stopped);
467
468 // If the current task is a worker, save it on the spare_workers
469 // list of this Capability. A worker can mark itself as stopped,
470 // in which case it is not replaced on the spare_worker queue.
471 // This happens when the system is shutting down (see
472 // Schedule.c:workerStart()).
473 if (!isBoundTask(task))
474 {
475 if (cap->n_spare_workers < MAX_SPARE_WORKERS)
476 {
477 task->next = cap->spare_workers;
478 cap->spare_workers = task;
479 cap->n_spare_workers++;
480 }
481 else
482 {
483 debugTrace(DEBUG_sched, "%d spare workers already, exiting",
484 cap->n_spare_workers);
485 releaseCapability_(cap,rtsFalse);
486 // hold the lock until after workerTaskStop; c.f. scheduleWorker()
487 workerTaskStop(task);
488 RELEASE_LOCK(&cap->lock);
489 shutdownThread();
490 }
491 }
492 // Bound tasks just float around attached to their TSOs.
493
494 releaseCapability_(cap,rtsFalse);
495
496 RELEASE_LOCK(&cap->lock);
497 }
498 #endif
499
500 /* ----------------------------------------------------------------------------
501 * waitForReturnCapability( Task *task )
502 *
503 * Purpose: when an OS thread returns from an external call,
504 * it calls waitForReturnCapability() (via Schedule.resumeThread())
505 * to wait for permission to enter the RTS & communicate the
506 * result of the external call back to the Haskell thread that
507 * made it.
508 *
509 * ------------------------------------------------------------------------- */
510 void
511 waitForReturnCapability (Capability **pCap, Task *task)
512 {
513 #if !defined(THREADED_RTS)
514
515 MainCapability.running_task = task;
516 task->cap = &MainCapability;
517 *pCap = &MainCapability;
518
519 #else
520 Capability *cap = *pCap;
521
522 if (cap == NULL) {
523 // Try last_free_capability first
524 cap = last_free_capability;
525 if (cap->running_task) {
526 nat i;
527 // otherwise, search for a free capability
528 cap = NULL;
529 for (i = 0; i < n_capabilities; i++) {
530 if (!capabilities[i].running_task) {
531 cap = &capabilities[i];
532 break;
533 }
534 }
535 if (cap == NULL) {
536 // Can't find a free one, use last_free_capability.
537 cap = last_free_capability;
538 }
539 }
540
541 // record the Capability as the one this Task is now assocated with.
542 task->cap = cap;
543
544 } else {
545 ASSERT(task->cap == cap);
546 }
547
548 ACQUIRE_LOCK(&cap->lock);
549
550 debugTrace(DEBUG_sched, "returning; I want capability %d", cap->no);
551
552 if (!cap->running_task) {
553 // It's free; just grab it
554 cap->running_task = task;
555 RELEASE_LOCK(&cap->lock);
556 } else {
557 newReturningTask(cap,task);
558 RELEASE_LOCK(&cap->lock);
559
560 for (;;) {
561 ACQUIRE_LOCK(&task->lock);
562 // task->lock held, cap->lock not held
563 if (!task->wakeup) waitCondition(&task->cond, &task->lock);
564 cap = task->cap;
565 task->wakeup = rtsFalse;
566 RELEASE_LOCK(&task->lock);
567
568 // now check whether we should wake up...
569 ACQUIRE_LOCK(&cap->lock);
570 if (cap->running_task == NULL) {
571 if (cap->returning_tasks_hd != task) {
572 giveCapabilityToTask(cap,cap->returning_tasks_hd);
573 RELEASE_LOCK(&cap->lock);
574 continue;
575 }
576 cap->running_task = task;
577 popReturningTask(cap);
578 RELEASE_LOCK(&cap->lock);
579 break;
580 }
581 RELEASE_LOCK(&cap->lock);
582 }
583
584 }
585
586 ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
587
588 debugTrace(DEBUG_sched, "resuming capability %d", cap->no);
589
590 *pCap = cap;
591 #endif
592 }
593
594 #if defined(THREADED_RTS)
595 /* ----------------------------------------------------------------------------
596 * yieldCapability
597 * ------------------------------------------------------------------------- */
598
599 void
600 yieldCapability (Capability** pCap, Task *task)
601 {
602 Capability *cap = *pCap;
603
604 if (waiting_for_gc == PENDING_GC_PAR) {
605 traceEventGcStart(cap);
606 gcWorkerThread(cap);
607 traceEventGcEnd(cap);
608 return;
609 }
610
611 debugTrace(DEBUG_sched, "giving up capability %d", cap->no);
612
613 // We must now release the capability and wait to be woken up
614 // again.
615 task->wakeup = rtsFalse;
616 releaseCapabilityAndQueueWorker(cap);
617
618 for (;;) {
619 ACQUIRE_LOCK(&task->lock);
620 // task->lock held, cap->lock not held
621 if (!task->wakeup) waitCondition(&task->cond, &task->lock);
622 cap = task->cap;
623 task->wakeup = rtsFalse;
624 RELEASE_LOCK(&task->lock);
625
626 debugTrace(DEBUG_sched, "woken up on capability %d", cap->no);
627
628 ACQUIRE_LOCK(&cap->lock);
629 if (cap->running_task != NULL) {
630 debugTrace(DEBUG_sched,
631 "capability %d is owned by another task", cap->no);
632 RELEASE_LOCK(&cap->lock);
633 continue;
634 }
635
636 if (task->incall->tso == NULL) {
637 ASSERT(cap->spare_workers != NULL);
638 // if we're not at the front of the queue, release it
639 // again. This is unlikely to happen.
640 if (cap->spare_workers != task) {
641 giveCapabilityToTask(cap,cap->spare_workers);
642 RELEASE_LOCK(&cap->lock);
643 continue;
644 }
645 cap->spare_workers = task->next;
646 task->next = NULL;
647 cap->n_spare_workers--;
648 }
649 cap->running_task = task;
650 RELEASE_LOCK(&cap->lock);
651 break;
652 }
653
654 debugTrace(DEBUG_sched, "resuming capability %d", cap->no);
655 ASSERT(cap->running_task == task);
656
657 *pCap = cap;
658
659 ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
660
661 return;
662 }
663
664 /* ----------------------------------------------------------------------------
665 * prodCapability
666 *
667 * If a Capability is currently idle, wake up a Task on it. Used to
668 * get every Capability into the GC.
669 * ------------------------------------------------------------------------- */
670
671 void
672 prodCapability (Capability *cap, Task *task)
673 {
674 ACQUIRE_LOCK(&cap->lock);
675 if (!cap->running_task) {
676 cap->running_task = task;
677 releaseCapability_(cap,rtsTrue);
678 }
679 RELEASE_LOCK(&cap->lock);
680 }
681
682 /* ----------------------------------------------------------------------------
683 * shutdownCapability
684 *
685 * At shutdown time, we want to let everything exit as cleanly as
686 * possible. For each capability, we let its run queue drain, and
687 * allow the workers to stop.
688 *
689 * This function should be called when interrupted and
690 * shutting_down_scheduler = rtsTrue, thus any worker that wakes up
691 * will exit the scheduler and call taskStop(), and any bound thread
692 * that wakes up will return to its caller. Runnable threads are
693 * killed.
694 *
695 * ------------------------------------------------------------------------- */
696
697 void
698 shutdownCapability (Capability *cap, Task *task, rtsBool safe)
699 {
700 nat i;
701
702 task->cap = cap;
703
704 // Loop indefinitely until all the workers have exited and there
705 // are no Haskell threads left. We used to bail out after 50
706 // iterations of this loop, but that occasionally left a worker
707 // running which caused problems later (the closeMutex() below
708 // isn't safe, for one thing).
709
710 for (i = 0; /* i < 50 */; i++) {
711 ASSERT(sched_state == SCHED_SHUTTING_DOWN);
712
713 debugTrace(DEBUG_sched,
714 "shutting down capability %d, attempt %d", cap->no, i);
715 ACQUIRE_LOCK(&cap->lock);
716 if (cap->running_task) {
717 RELEASE_LOCK(&cap->lock);
718 debugTrace(DEBUG_sched, "not owner, yielding");
719 yieldThread();
720 continue;
721 }
722 cap->running_task = task;
723
724 if (cap->spare_workers) {
725 // Look for workers that have died without removing
726 // themselves from the list; this could happen if the OS
727 // summarily killed the thread, for example. This
728 // actually happens on Windows when the system is
729 // terminating the program, and the RTS is running in a
730 // DLL.
731 Task *t, *prev;
732 prev = NULL;
733 for (t = cap->spare_workers; t != NULL; t = t->next) {
734 if (!osThreadIsAlive(t->id)) {
735 debugTrace(DEBUG_sched,
736 "worker thread %p has died unexpectedly", (void *)t->id);
737 cap->n_spare_workers--;
738 if (!prev) {
739 cap->spare_workers = t->next;
740 } else {
741 prev->next = t->next;
742 }
743 prev = t;
744 }
745 }
746 }
747
748 if (!emptyRunQueue(cap) || cap->spare_workers) {
749 debugTrace(DEBUG_sched,
750 "runnable threads or workers still alive, yielding");
751 releaseCapability_(cap,rtsFalse); // this will wake up a worker
752 RELEASE_LOCK(&cap->lock);
753 yieldThread();
754 continue;
755 }
756
757 // If "safe", then busy-wait for any threads currently doing
758 // foreign calls. If we're about to unload this DLL, for
759 // example, we need to be sure that there are no OS threads
760 // that will try to return to code that has been unloaded.
761 // We can be a bit more relaxed when this is a standalone
762 // program that is about to terminate, and let safe=false.
763 if (cap->suspended_ccalls && safe) {
764 debugTrace(DEBUG_sched,
765 "thread(s) are involved in foreign calls, yielding");
766 cap->running_task = NULL;
767 RELEASE_LOCK(&cap->lock);
768 // The IO manager thread might have been slow to start up,
769 // so the first attempt to kill it might not have
770 // succeeded. Just in case, try again - the kill message
771 // will only be sent once.
772 //
773 // To reproduce this deadlock: run ffi002(threaded1)
774 // repeatedly on a loaded machine.
775 ioManagerDie();
776 yieldThread();
777 continue;
778 }
779
780 traceEventShutdown(cap);
781 RELEASE_LOCK(&cap->lock);
782 break;
783 }
784 // we now have the Capability, its run queue and spare workers
785 // list are both empty.
786
787 // ToDo: we can't drop this mutex, because there might still be
788 // threads performing foreign calls that will eventually try to
789 // return via resumeThread() and attempt to grab cap->lock.
790 // closeMutex(&cap->lock);
791 }
792
793 /* ----------------------------------------------------------------------------
794 * tryGrabCapability
795 *
796 * Attempt to gain control of a Capability if it is free.
797 *
798 * ------------------------------------------------------------------------- */
799
800 rtsBool
801 tryGrabCapability (Capability *cap, Task *task)
802 {
803 if (cap->running_task != NULL) return rtsFalse;
804 ACQUIRE_LOCK(&cap->lock);
805 if (cap->running_task != NULL) {
806 RELEASE_LOCK(&cap->lock);
807 return rtsFalse;
808 }
809 task->cap = cap;
810 cap->running_task = task;
811 RELEASE_LOCK(&cap->lock);
812 return rtsTrue;
813 }
814
815
816 #endif /* THREADED_RTS */
817
818 static void
819 freeCapability (Capability *cap)
820 {
821 stgFree(cap->mut_lists);
822 stgFree(cap->saved_mut_lists);
823 #if defined(THREADED_RTS)
824 freeSparkPool(cap->sparks);
825 #endif
826 }
827
828 void
829 freeCapabilities (void)
830 {
831 #if defined(THREADED_RTS)
832 nat i;
833 for (i=0; i < n_capabilities; i++) {
834 freeCapability(&capabilities[i]);
835 }
836 #else
837 freeCapability(&MainCapability);
838 #endif
839 traceCapsetDelete(CAPSET_OSPROCESS_DEFAULT);
840 }
841
842 /* ---------------------------------------------------------------------------
843 Mark everything directly reachable from the Capabilities. When
844 using multiple GC threads, each GC thread marks all Capabilities
845 for which (c `mod` n == 0), for Capability c and thread n.
846 ------------------------------------------------------------------------ */
847
848 void
849 markCapability (evac_fn evac, void *user, Capability *cap,
850 rtsBool no_mark_sparks USED_IF_THREADS)
851 {
852 InCall *incall;
853
854 // Each GC thread is responsible for following roots from the
855 // Capability of the same number. There will usually be the same
856 // or fewer Capabilities as GC threads, but just in case there
857 // are more, we mark every Capability whose number is the GC
858 // thread's index plus a multiple of the number of GC threads.
859 evac(user, (StgClosure **)(void *)&cap->run_queue_hd);
860 evac(user, (StgClosure **)(void *)&cap->run_queue_tl);
861 #if defined(THREADED_RTS)
862 evac(user, (StgClosure **)(void *)&cap->inbox);
863 #endif
864 for (incall = cap->suspended_ccalls; incall != NULL;
865 incall=incall->next) {
866 evac(user, (StgClosure **)(void *)&incall->suspended_tso);
867 }
868
869 #if defined(THREADED_RTS)
870 if (!no_mark_sparks) {
871 traverseSparkQueue (evac, user, cap);
872 }
873 #endif
874
875 // Free STM structures for this Capability
876 stmPreGCHook(cap);
877 }
878
879 void
880 markCapabilities (evac_fn evac, void *user)
881 {
882 nat n;
883 for (n = 0; n < n_capabilities; n++) {
884 markCapability(evac, user, &capabilities[n], rtsFalse);
885 }
886 }