Merge remote-tracking branch 'origin/master' into newcg
[ghc.git] / rts / Capability.c
1 /* ---------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 2003-2006
4 *
5 * Capabilities
6 *
7 * A Capability represent the token required to execute STG code,
8 * and all the state an OS thread/task needs to run Haskell code:
9 * its STG registers, a pointer to its TSO, a nursery etc. During
10 * STG execution, a pointer to the capabilitity is kept in a
11 * register (BaseReg; actually it is a pointer to cap->r).
12 *
13 * Only in an THREADED_RTS build will there be multiple capabilities,
14 * for non-threaded builds there is only one global capability, namely
15 * MainCapability.
16 *
17 * --------------------------------------------------------------------------*/
18
19 #include "PosixSource.h"
20 #include "Rts.h"
21
22 #include "Capability.h"
23 #include "Schedule.h"
24 #include "Sparks.h"
25 #include "Trace.h"
26 #include "sm/GC.h" // for gcWorkerThread()
27 #include "STM.h"
28 #include "RtsUtils.h"
29
30 #include <string.h>
31
32 // one global capability, this is the Capability for non-threaded
33 // builds, and for +RTS -N1
34 Capability MainCapability;
35
36 nat n_capabilities = 0;
37 nat enabled_capabilities = 0;
38 Capability *capabilities = NULL;
39
40 // Holds the Capability which last became free. This is used so that
41 // an in-call has a chance of quickly finding a free Capability.
42 // Maintaining a global free list of Capabilities would require global
43 // locking, so we don't do that.
44 Capability *last_free_capability = NULL;
45
46 /*
47 * Indicates that the RTS wants to synchronise all the Capabilities
48 * for some reason. All Capabilities should stop and return to the
49 * scheduler.
50 */
51 volatile StgWord pending_sync = 0;
52
53 /* Let foreign code get the current Capability -- assuming there is one!
54 * This is useful for unsafe foreign calls because they are called with
55 * the current Capability held, but they are not passed it. For example,
56 * see see the integer-gmp package which calls allocate() in its
57 * stgAllocForGMP() function (which gets called by gmp functions).
58 * */
59 Capability * rts_unsafeGetMyCapability (void)
60 {
61 #if defined(THREADED_RTS)
62 return myTask()->cap;
63 #else
64 return &MainCapability;
65 #endif
66 }
67
68 #if defined(THREADED_RTS)
69 STATIC_INLINE rtsBool
70 globalWorkToDo (void)
71 {
72 return sched_state >= SCHED_INTERRUPTING
73 || recent_activity == ACTIVITY_INACTIVE; // need to check for deadlock
74 }
75 #endif
76
77 #if defined(THREADED_RTS)
78 StgClosure *
79 findSpark (Capability *cap)
80 {
81 Capability *robbed;
82 StgClosurePtr spark;
83 rtsBool retry;
84 nat i = 0;
85
86 if (!emptyRunQueue(cap) || cap->returning_tasks_hd != NULL) {
87 // If there are other threads, don't try to run any new
88 // sparks: sparks might be speculative, we don't want to take
89 // resources away from the main computation.
90 return 0;
91 }
92
93 do {
94 retry = rtsFalse;
95
96 // first try to get a spark from our own pool.
97 // We should be using reclaimSpark(), because it works without
98 // needing any atomic instructions:
99 // spark = reclaimSpark(cap->sparks);
100 // However, measurements show that this makes at least one benchmark
101 // slower (prsa) and doesn't affect the others.
102 spark = tryStealSpark(cap->sparks);
103 while (spark != NULL && fizzledSpark(spark)) {
104 cap->spark_stats.fizzled++;
105 traceEventSparkFizzle(cap);
106 spark = tryStealSpark(cap->sparks);
107 }
108 if (spark != NULL) {
109 cap->spark_stats.converted++;
110
111 // Post event for running a spark from capability's own pool.
112 traceEventSparkRun(cap);
113
114 return spark;
115 }
116 if (!emptySparkPoolCap(cap)) {
117 retry = rtsTrue;
118 }
119
120 if (n_capabilities == 1) { return NULL; } // makes no sense...
121
122 debugTrace(DEBUG_sched,
123 "cap %d: Trying to steal work from other capabilities",
124 cap->no);
125
126 /* visit cap.s 0..n-1 in sequence until a theft succeeds. We could
127 start at a random place instead of 0 as well. */
128 for ( i=0 ; i < n_capabilities ; i++ ) {
129 robbed = &capabilities[i];
130 if (cap == robbed) // ourselves...
131 continue;
132
133 if (emptySparkPoolCap(robbed)) // nothing to steal here
134 continue;
135
136 spark = tryStealSpark(robbed->sparks);
137 while (spark != NULL && fizzledSpark(spark)) {
138 cap->spark_stats.fizzled++;
139 traceEventSparkFizzle(cap);
140 spark = tryStealSpark(robbed->sparks);
141 }
142 if (spark == NULL && !emptySparkPoolCap(robbed)) {
143 // we conflicted with another thread while trying to steal;
144 // try again later.
145 retry = rtsTrue;
146 }
147
148 if (spark != NULL) {
149 cap->spark_stats.converted++;
150 traceEventSparkSteal(cap, robbed->no);
151
152 return spark;
153 }
154 // otherwise: no success, try next one
155 }
156 } while (retry);
157
158 debugTrace(DEBUG_sched, "No sparks stolen");
159 return NULL;
160 }
161
162 // Returns True if any spark pool is non-empty at this moment in time
163 // The result is only valid for an instant, of course, so in a sense
164 // is immediately invalid, and should not be relied upon for
165 // correctness.
166 rtsBool
167 anySparks (void)
168 {
169 nat i;
170
171 for (i=0; i < n_capabilities; i++) {
172 if (!emptySparkPoolCap(&capabilities[i])) {
173 return rtsTrue;
174 }
175 }
176 return rtsFalse;
177 }
178 #endif
179
180 /* -----------------------------------------------------------------------------
181 * Manage the returning_tasks lists.
182 *
183 * These functions require cap->lock
184 * -------------------------------------------------------------------------- */
185
186 #if defined(THREADED_RTS)
187 STATIC_INLINE void
188 newReturningTask (Capability *cap, Task *task)
189 {
190 ASSERT_LOCK_HELD(&cap->lock);
191 ASSERT(task->next == NULL);
192 if (cap->returning_tasks_hd) {
193 ASSERT(cap->returning_tasks_tl->next == NULL);
194 cap->returning_tasks_tl->next = task;
195 } else {
196 cap->returning_tasks_hd = task;
197 }
198 cap->returning_tasks_tl = task;
199 }
200
201 STATIC_INLINE Task *
202 popReturningTask (Capability *cap)
203 {
204 ASSERT_LOCK_HELD(&cap->lock);
205 Task *task;
206 task = cap->returning_tasks_hd;
207 ASSERT(task);
208 cap->returning_tasks_hd = task->next;
209 if (!cap->returning_tasks_hd) {
210 cap->returning_tasks_tl = NULL;
211 }
212 task->next = NULL;
213 return task;
214 }
215 #endif
216
217 /* ----------------------------------------------------------------------------
218 * Initialisation
219 *
220 * The Capability is initially marked not free.
221 * ------------------------------------------------------------------------- */
222
223 static void
224 initCapability( Capability *cap, nat i )
225 {
226 nat g;
227
228 cap->no = i;
229 cap->in_haskell = rtsFalse;
230 cap->idle = 0;
231 cap->disabled = rtsFalse;
232
233 cap->run_queue_hd = END_TSO_QUEUE;
234 cap->run_queue_tl = END_TSO_QUEUE;
235
236 #if defined(THREADED_RTS)
237 initMutex(&cap->lock);
238 cap->running_task = NULL; // indicates cap is free
239 cap->spare_workers = NULL;
240 cap->n_spare_workers = 0;
241 cap->suspended_ccalls = NULL;
242 cap->returning_tasks_hd = NULL;
243 cap->returning_tasks_tl = NULL;
244 cap->inbox = (Message*)END_TSO_QUEUE;
245 cap->sparks = allocSparkPool();
246 cap->spark_stats.created = 0;
247 cap->spark_stats.dud = 0;
248 cap->spark_stats.overflowed = 0;
249 cap->spark_stats.converted = 0;
250 cap->spark_stats.gcd = 0;
251 cap->spark_stats.fizzled = 0;
252 #endif
253
254 cap->f.stgEagerBlackholeInfo = (W_)&__stg_EAGER_BLACKHOLE_info;
255 cap->f.stgGCEnter1 = (StgFunPtr)__stg_gc_enter_1;
256 cap->f.stgGCFun = (StgFunPtr)__stg_gc_fun;
257
258 cap->mut_lists = stgMallocBytes(sizeof(bdescr *) *
259 RtsFlags.GcFlags.generations,
260 "initCapability");
261 cap->saved_mut_lists = stgMallocBytes(sizeof(bdescr *) *
262 RtsFlags.GcFlags.generations,
263 "initCapability");
264
265 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
266 cap->mut_lists[g] = NULL;
267 }
268
269 cap->free_tvar_watch_queues = END_STM_WATCH_QUEUE;
270 cap->free_invariant_check_queues = END_INVARIANT_CHECK_QUEUE;
271 cap->free_trec_chunks = END_STM_CHUNK_LIST;
272 cap->free_trec_headers = NO_TREC;
273 cap->transaction_tokens = 0;
274 cap->context_switch = 0;
275 cap->pinned_object_block = NULL;
276 cap->pinned_object_blocks = NULL;
277
278 #ifdef PROFILING
279 cap->r.rCCCS = CCS_SYSTEM;
280 #else
281 cap->r.rCCCS = NULL;
282 #endif
283
284 traceCapsetAssignCap(CAPSET_OSPROCESS_DEFAULT, i);
285 traceCapsetAssignCap(CAPSET_CLOCKDOMAIN_DEFAULT, i);
286 #if defined(THREADED_RTS)
287 traceSparkCounters(cap);
288 #endif
289 }
290
291 /* ---------------------------------------------------------------------------
292 * Function: initCapabilities()
293 *
294 * Purpose: set up the Capability handling. For the THREADED_RTS build,
295 * we keep a table of them, the size of which is
296 * controlled by the user via the RTS flag -N.
297 *
298 * ------------------------------------------------------------------------- */
299 void
300 initCapabilities( void )
301 {
302 /* Declare a couple capability sets representing the process and
303 clock domain. Each capability will get added to these capsets. */
304 traceCapsetCreate(CAPSET_OSPROCESS_DEFAULT, CapsetTypeOsProcess);
305 traceCapsetCreate(CAPSET_CLOCKDOMAIN_DEFAULT, CapsetTypeClockdomain);
306
307 #if defined(THREADED_RTS)
308
309 #ifndef REG_Base
310 // We can't support multiple CPUs if BaseReg is not a register
311 if (RtsFlags.ParFlags.nNodes > 1) {
312 errorBelch("warning: multiple CPUs not supported in this build, reverting to 1");
313 RtsFlags.ParFlags.nNodes = 1;
314 }
315 #endif
316
317 n_capabilities = 0;
318 moreCapabilities(0, RtsFlags.ParFlags.nNodes);
319 n_capabilities = RtsFlags.ParFlags.nNodes;
320
321 #else /* !THREADED_RTS */
322
323 n_capabilities = 1;
324 capabilities = &MainCapability;
325 initCapability(&MainCapability, 0);
326
327 #endif
328
329 enabled_capabilities = n_capabilities;
330
331 // There are no free capabilities to begin with. We will start
332 // a worker Task to each Capability, which will quickly put the
333 // Capability on the free list when it finds nothing to do.
334 last_free_capability = &capabilities[0];
335 }
336
337 Capability *
338 moreCapabilities (nat from USED_IF_THREADS, nat to USED_IF_THREADS)
339 {
340 #if defined(THREADED_RTS)
341 nat i;
342 Capability *old_capabilities = capabilities;
343
344 if (to == 1) {
345 // THREADED_RTS must work on builds that don't have a mutable
346 // BaseReg (eg. unregisterised), so in this case
347 // capabilities[0] must coincide with &MainCapability.
348 capabilities = &MainCapability;
349 } else {
350 capabilities = stgMallocBytes(to * sizeof(Capability),
351 "moreCapabilities");
352
353 if (from > 0) {
354 memcpy(capabilities, old_capabilities, from * sizeof(Capability));
355 }
356 }
357
358 for (i = from; i < to; i++) {
359 initCapability(&capabilities[i], i);
360 }
361
362 last_free_capability = &capabilities[0];
363
364 debugTrace(DEBUG_sched, "allocated %d more capabilities", to - from);
365
366 // Return the old array to free later.
367 if (from > 1) {
368 return old_capabilities;
369 } else {
370 return NULL;
371 }
372 #else
373 return NULL;
374 #endif
375 }
376
377 /* ----------------------------------------------------------------------------
378 * setContextSwitches: cause all capabilities to context switch as
379 * soon as possible.
380 * ------------------------------------------------------------------------- */
381
382 void contextSwitchAllCapabilities(void)
383 {
384 nat i;
385 for (i=0; i < n_capabilities; i++) {
386 contextSwitchCapability(&capabilities[i]);
387 }
388 }
389
390 void interruptAllCapabilities(void)
391 {
392 nat i;
393 for (i=0; i < n_capabilities; i++) {
394 interruptCapability(&capabilities[i]);
395 }
396 }
397
398 /* ----------------------------------------------------------------------------
399 * Give a Capability to a Task. The task must currently be sleeping
400 * on its condition variable.
401 *
402 * Requires cap->lock (modifies cap->running_task).
403 *
404 * When migrating a Task, the migrater must take task->lock before
405 * modifying task->cap, to synchronise with the waking up Task.
406 * Additionally, the migrater should own the Capability (when
407 * migrating the run queue), or cap->lock (when migrating
408 * returning_workers).
409 *
410 * ------------------------------------------------------------------------- */
411
412 #if defined(THREADED_RTS)
413 STATIC_INLINE void
414 giveCapabilityToTask (Capability *cap USED_IF_DEBUG, Task *task)
415 {
416 ASSERT_LOCK_HELD(&cap->lock);
417 ASSERT(task->cap == cap);
418 debugTrace(DEBUG_sched, "passing capability %d to %s %p",
419 cap->no, task->incall->tso ? "bound task" : "worker",
420 (void *)task->id);
421 ACQUIRE_LOCK(&task->lock);
422 if (task->wakeup == rtsFalse) {
423 task->wakeup = rtsTrue;
424 // the wakeup flag is needed because signalCondition() doesn't
425 // flag the condition if the thread is already runniing, but we want
426 // it to be sticky.
427 signalCondition(&task->cond);
428 }
429 RELEASE_LOCK(&task->lock);
430 }
431 #endif
432
433 /* ----------------------------------------------------------------------------
434 * Function: releaseCapability(Capability*)
435 *
436 * Purpose: Letting go of a capability. Causes a
437 * 'returning worker' thread or a 'waiting worker'
438 * to wake up, in that order.
439 * ------------------------------------------------------------------------- */
440
441 #if defined(THREADED_RTS)
442 void
443 releaseCapability_ (Capability* cap,
444 rtsBool always_wakeup)
445 {
446 Task *task;
447
448 task = cap->running_task;
449
450 ASSERT_PARTIAL_CAPABILITY_INVARIANTS(cap,task);
451
452 cap->running_task = NULL;
453
454 // Check to see whether a worker thread can be given
455 // the go-ahead to return the result of an external call..
456 if (cap->returning_tasks_hd != NULL) {
457 giveCapabilityToTask(cap,cap->returning_tasks_hd);
458 // The Task pops itself from the queue (see waitForReturnCapability())
459 return;
460 }
461
462 // If there is a pending sync, then we should just leave the
463 // Capability free. The thread trying to sync will be about to
464 // call waitForReturnCapability().
465 if (pending_sync != 0 && pending_sync != SYNC_GC_PAR) {
466 last_free_capability = cap; // needed?
467 debugTrace(DEBUG_sched, "sync pending, set capability %d free", cap->no);
468 return;
469 }
470
471 // If the next thread on the run queue is a bound thread,
472 // give this Capability to the appropriate Task.
473 if (!emptyRunQueue(cap) && cap->run_queue_hd->bound) {
474 // Make sure we're not about to try to wake ourselves up
475 // ASSERT(task != cap->run_queue_hd->bound);
476 // assertion is false: in schedule() we force a yield after
477 // ThreadBlocked, but the thread may be back on the run queue
478 // by now.
479 task = cap->run_queue_hd->bound->task;
480 giveCapabilityToTask(cap,task);
481 return;
482 }
483
484 if (!cap->spare_workers) {
485 // Create a worker thread if we don't have one. If the system
486 // is interrupted, we only create a worker task if there
487 // are threads that need to be completed. If the system is
488 // shutting down, we never create a new worker.
489 if (sched_state < SCHED_SHUTTING_DOWN || !emptyRunQueue(cap)) {
490 debugTrace(DEBUG_sched,
491 "starting new worker on capability %d", cap->no);
492 startWorkerTask(cap);
493 return;
494 }
495 }
496
497 // If we have an unbound thread on the run queue, or if there's
498 // anything else to do, give the Capability to a worker thread.
499 if (always_wakeup ||
500 !emptyRunQueue(cap) || !emptyInbox(cap) ||
501 (!cap->disabled && !emptySparkPoolCap(cap)) || globalWorkToDo()) {
502 if (cap->spare_workers) {
503 giveCapabilityToTask(cap,cap->spare_workers);
504 // The worker Task pops itself from the queue;
505 return;
506 }
507 }
508
509 #ifdef PROFILING
510 cap->r.rCCCS = CCS_IDLE;
511 #endif
512 last_free_capability = cap;
513 debugTrace(DEBUG_sched, "freeing capability %d", cap->no);
514 }
515
516 void
517 releaseCapability (Capability* cap USED_IF_THREADS)
518 {
519 ACQUIRE_LOCK(&cap->lock);
520 releaseCapability_(cap, rtsFalse);
521 RELEASE_LOCK(&cap->lock);
522 }
523
524 void
525 releaseAndWakeupCapability (Capability* cap USED_IF_THREADS)
526 {
527 ACQUIRE_LOCK(&cap->lock);
528 releaseCapability_(cap, rtsTrue);
529 RELEASE_LOCK(&cap->lock);
530 }
531
532 static void
533 releaseCapabilityAndQueueWorker (Capability* cap USED_IF_THREADS)
534 {
535 Task *task;
536
537 ACQUIRE_LOCK(&cap->lock);
538
539 task = cap->running_task;
540
541 // If the Task is stopped, we shouldn't be yielding, we should
542 // be just exiting.
543 ASSERT(!task->stopped);
544
545 // If the current task is a worker, save it on the spare_workers
546 // list of this Capability. A worker can mark itself as stopped,
547 // in which case it is not replaced on the spare_worker queue.
548 // This happens when the system is shutting down (see
549 // Schedule.c:workerStart()).
550 if (!isBoundTask(task))
551 {
552 if (cap->n_spare_workers < MAX_SPARE_WORKERS)
553 {
554 task->next = cap->spare_workers;
555 cap->spare_workers = task;
556 cap->n_spare_workers++;
557 }
558 else
559 {
560 debugTrace(DEBUG_sched, "%d spare workers already, exiting",
561 cap->n_spare_workers);
562 releaseCapability_(cap,rtsFalse);
563 // hold the lock until after workerTaskStop; c.f. scheduleWorker()
564 workerTaskStop(task);
565 RELEASE_LOCK(&cap->lock);
566 shutdownThread();
567 }
568 }
569 // Bound tasks just float around attached to their TSOs.
570
571 releaseCapability_(cap,rtsFalse);
572
573 RELEASE_LOCK(&cap->lock);
574 }
575 #endif
576
577 /* ----------------------------------------------------------------------------
578 * waitForReturnCapability (Capability **pCap, Task *task)
579 *
580 * Purpose: when an OS thread returns from an external call,
581 * it calls waitForReturnCapability() (via Schedule.resumeThread())
582 * to wait for permission to enter the RTS & communicate the
583 * result of the external call back to the Haskell thread that
584 * made it.
585 *
586 * ------------------------------------------------------------------------- */
587 void
588 waitForReturnCapability (Capability **pCap, Task *task)
589 {
590 #if !defined(THREADED_RTS)
591
592 MainCapability.running_task = task;
593 task->cap = &MainCapability;
594 *pCap = &MainCapability;
595
596 #else
597 Capability *cap = *pCap;
598
599 if (cap == NULL) {
600 // Try last_free_capability first
601 cap = last_free_capability;
602 if (cap->running_task) {
603 nat i;
604 // otherwise, search for a free capability
605 cap = NULL;
606 for (i = 0; i < n_capabilities; i++) {
607 if (!capabilities[i].running_task) {
608 cap = &capabilities[i];
609 break;
610 }
611 }
612 if (cap == NULL) {
613 // Can't find a free one, use last_free_capability.
614 cap = last_free_capability;
615 }
616 }
617
618 // record the Capability as the one this Task is now assocated with.
619 task->cap = cap;
620
621 } else {
622 ASSERT(task->cap == cap);
623 }
624
625 ACQUIRE_LOCK(&cap->lock);
626
627 debugTrace(DEBUG_sched, "returning; I want capability %d", cap->no);
628
629 if (!cap->running_task) {
630 // It's free; just grab it
631 cap->running_task = task;
632 RELEASE_LOCK(&cap->lock);
633 } else {
634 newReturningTask(cap,task);
635 RELEASE_LOCK(&cap->lock);
636
637 for (;;) {
638 ACQUIRE_LOCK(&task->lock);
639 // task->lock held, cap->lock not held
640 if (!task->wakeup) waitCondition(&task->cond, &task->lock);
641 cap = task->cap;
642 task->wakeup = rtsFalse;
643 RELEASE_LOCK(&task->lock);
644
645 // now check whether we should wake up...
646 ACQUIRE_LOCK(&cap->lock);
647 if (cap->running_task == NULL) {
648 if (cap->returning_tasks_hd != task) {
649 giveCapabilityToTask(cap,cap->returning_tasks_hd);
650 RELEASE_LOCK(&cap->lock);
651 continue;
652 }
653 cap->running_task = task;
654 popReturningTask(cap);
655 RELEASE_LOCK(&cap->lock);
656 break;
657 }
658 RELEASE_LOCK(&cap->lock);
659 }
660
661 }
662
663 #ifdef PROFILING
664 cap->r.rCCCS = CCS_SYSTEM;
665 #endif
666
667 ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
668
669 debugTrace(DEBUG_sched, "resuming capability %d", cap->no);
670
671 *pCap = cap;
672 #endif
673 }
674
675 #if defined(THREADED_RTS)
676 /* ----------------------------------------------------------------------------
677 * yieldCapability
678 * ------------------------------------------------------------------------- */
679
680 void
681 yieldCapability (Capability** pCap, Task *task)
682 {
683 Capability *cap = *pCap;
684
685 if (pending_sync == SYNC_GC_PAR) {
686 traceEventGcStart(cap);
687 gcWorkerThread(cap);
688 traceEventGcEnd(cap);
689 traceSparkCounters(cap);
690 // See Note [migrated bound threads 2]
691 if (task->cap == cap) return;
692 }
693
694 debugTrace(DEBUG_sched, "giving up capability %d", cap->no);
695
696 // We must now release the capability and wait to be woken up
697 // again.
698 task->wakeup = rtsFalse;
699 releaseCapabilityAndQueueWorker(cap);
700
701 for (;;) {
702 ACQUIRE_LOCK(&task->lock);
703 // task->lock held, cap->lock not held
704 if (!task->wakeup) waitCondition(&task->cond, &task->lock);
705 cap = task->cap;
706 task->wakeup = rtsFalse;
707 RELEASE_LOCK(&task->lock);
708
709 debugTrace(DEBUG_sched, "woken up on capability %d", cap->no);
710
711 ACQUIRE_LOCK(&cap->lock);
712 if (cap->running_task != NULL) {
713 debugTrace(DEBUG_sched,
714 "capability %d is owned by another task", cap->no);
715 RELEASE_LOCK(&cap->lock);
716 continue;
717 }
718
719 if (task->cap != cap) {
720 // see Note [migrated bound threads]
721 debugTrace(DEBUG_sched,
722 "task has been migrated to cap %d", task->cap->no);
723 RELEASE_LOCK(&cap->lock);
724 continue;
725 }
726
727 if (task->incall->tso == NULL) {
728 ASSERT(cap->spare_workers != NULL);
729 // if we're not at the front of the queue, release it
730 // again. This is unlikely to happen.
731 if (cap->spare_workers != task) {
732 giveCapabilityToTask(cap,cap->spare_workers);
733 RELEASE_LOCK(&cap->lock);
734 continue;
735 }
736 cap->spare_workers = task->next;
737 task->next = NULL;
738 cap->n_spare_workers--;
739 }
740
741 cap->running_task = task;
742 RELEASE_LOCK(&cap->lock);
743 break;
744 }
745
746 debugTrace(DEBUG_sched, "resuming capability %d", cap->no);
747 ASSERT(cap->running_task == task);
748
749 #ifdef PROFILING
750 cap->r.rCCCS = CCS_SYSTEM;
751 #endif
752
753 *pCap = cap;
754
755 ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
756
757 return;
758 }
759
760 // Note [migrated bound threads]
761 //
762 // There's a tricky case where:
763 // - cap A is running an unbound thread T1
764 // - there is a bound thread T2 at the head of the run queue on cap A
765 // - T1 makes a safe foreign call, the task bound to T2 is woken up on cap A
766 // - T1 returns quickly grabbing A again (T2 is still waking up on A)
767 // - T1 blocks, the scheduler migrates T2 to cap B
768 // - the task bound to T2 wakes up on cap B
769 //
770 // We take advantage of the following invariant:
771 //
772 // - A bound thread can only be migrated by the holder of the
773 // Capability on which the bound thread currently lives. So, if we
774 // hold Capabilty C, and task->cap == C, then task cannot be
775 // migrated under our feet.
776
777 // Note [migrated bound threads 2]
778 //
779 // Second tricky case;
780 // - A bound Task becomes a GC thread
781 // - scheduleDoGC() migrates the thread belonging to this Task,
782 // because the Capability it is on is disabled
783 // - after GC, gcWorkerThread() returns, but now we are
784 // holding a Capability that is not the same as task->cap
785 // - Hence we must check for this case and immediately give up the
786 // cap we hold.
787
788 /* ----------------------------------------------------------------------------
789 * prodCapability
790 *
791 * If a Capability is currently idle, wake up a Task on it. Used to
792 * get every Capability into the GC.
793 * ------------------------------------------------------------------------- */
794
795 void
796 prodCapability (Capability *cap, Task *task)
797 {
798 ACQUIRE_LOCK(&cap->lock);
799 if (!cap->running_task) {
800 cap->running_task = task;
801 releaseCapability_(cap,rtsTrue);
802 }
803 RELEASE_LOCK(&cap->lock);
804 }
805
806 /* ----------------------------------------------------------------------------
807 * tryGrabCapability
808 *
809 * Attempt to gain control of a Capability if it is free.
810 *
811 * ------------------------------------------------------------------------- */
812
813 rtsBool
814 tryGrabCapability (Capability *cap, Task *task)
815 {
816 if (cap->running_task != NULL) return rtsFalse;
817 ACQUIRE_LOCK(&cap->lock);
818 if (cap->running_task != NULL) {
819 RELEASE_LOCK(&cap->lock);
820 return rtsFalse;
821 }
822 task->cap = cap;
823 cap->running_task = task;
824 RELEASE_LOCK(&cap->lock);
825 return rtsTrue;
826 }
827
828
829 #endif /* THREADED_RTS */
830
831 /* ----------------------------------------------------------------------------
832 * shutdownCapability
833 *
834 * At shutdown time, we want to let everything exit as cleanly as
835 * possible. For each capability, we let its run queue drain, and
836 * allow the workers to stop.
837 *
838 * This function should be called when interrupted and
839 * shutting_down_scheduler = rtsTrue, thus any worker that wakes up
840 * will exit the scheduler and call taskStop(), and any bound thread
841 * that wakes up will return to its caller. Runnable threads are
842 * killed.
843 *
844 * ------------------------------------------------------------------------- */
845
846 void
847 shutdownCapability (Capability *cap,
848 Task *task USED_IF_THREADS,
849 rtsBool safe USED_IF_THREADS)
850 {
851 #if defined(THREADED_RTS)
852 nat i;
853
854 task->cap = cap;
855
856 // Loop indefinitely until all the workers have exited and there
857 // are no Haskell threads left. We used to bail out after 50
858 // iterations of this loop, but that occasionally left a worker
859 // running which caused problems later (the closeMutex() below
860 // isn't safe, for one thing).
861
862 for (i = 0; /* i < 50 */; i++) {
863 ASSERT(sched_state == SCHED_SHUTTING_DOWN);
864
865 debugTrace(DEBUG_sched,
866 "shutting down capability %d, attempt %d", cap->no, i);
867 ACQUIRE_LOCK(&cap->lock);
868 if (cap->running_task) {
869 RELEASE_LOCK(&cap->lock);
870 debugTrace(DEBUG_sched, "not owner, yielding");
871 yieldThread();
872 continue;
873 }
874 cap->running_task = task;
875
876 if (cap->spare_workers) {
877 // Look for workers that have died without removing
878 // themselves from the list; this could happen if the OS
879 // summarily killed the thread, for example. This
880 // actually happens on Windows when the system is
881 // terminating the program, and the RTS is running in a
882 // DLL.
883 Task *t, *prev;
884 prev = NULL;
885 for (t = cap->spare_workers; t != NULL; t = t->next) {
886 if (!osThreadIsAlive(t->id)) {
887 debugTrace(DEBUG_sched,
888 "worker thread %p has died unexpectedly", (void *)t->id);
889 cap->n_spare_workers--;
890 if (!prev) {
891 cap->spare_workers = t->next;
892 } else {
893 prev->next = t->next;
894 }
895 prev = t;
896 }
897 }
898 }
899
900 if (!emptyRunQueue(cap) || cap->spare_workers) {
901 debugTrace(DEBUG_sched,
902 "runnable threads or workers still alive, yielding");
903 releaseCapability_(cap,rtsFalse); // this will wake up a worker
904 RELEASE_LOCK(&cap->lock);
905 yieldThread();
906 continue;
907 }
908
909 // If "safe", then busy-wait for any threads currently doing
910 // foreign calls. If we're about to unload this DLL, for
911 // example, we need to be sure that there are no OS threads
912 // that will try to return to code that has been unloaded.
913 // We can be a bit more relaxed when this is a standalone
914 // program that is about to terminate, and let safe=false.
915 if (cap->suspended_ccalls && safe) {
916 debugTrace(DEBUG_sched,
917 "thread(s) are involved in foreign calls, yielding");
918 cap->running_task = NULL;
919 RELEASE_LOCK(&cap->lock);
920 // The IO manager thread might have been slow to start up,
921 // so the first attempt to kill it might not have
922 // succeeded. Just in case, try again - the kill message
923 // will only be sent once.
924 //
925 // To reproduce this deadlock: run ffi002(threaded1)
926 // repeatedly on a loaded machine.
927 ioManagerDie();
928 yieldThread();
929 continue;
930 }
931
932 traceEventShutdown(cap);
933 RELEASE_LOCK(&cap->lock);
934 break;
935 }
936 // we now have the Capability, its run queue and spare workers
937 // list are both empty.
938
939 // ToDo: we can't drop this mutex, because there might still be
940 // threads performing foreign calls that will eventually try to
941 // return via resumeThread() and attempt to grab cap->lock.
942 // closeMutex(&cap->lock);
943
944 traceSparkCounters(cap);
945
946 #endif /* THREADED_RTS */
947
948 traceCapsetRemoveCap(CAPSET_OSPROCESS_DEFAULT, cap->no);
949 traceCapsetRemoveCap(CAPSET_CLOCKDOMAIN_DEFAULT, cap->no);
950 }
951
952 void
953 shutdownCapabilities(Task *task, rtsBool safe)
954 {
955 nat i;
956 for (i=0; i < n_capabilities; i++) {
957 ASSERT(task->incall->tso == NULL);
958 shutdownCapability(&capabilities[i], task, safe);
959 }
960 traceCapsetDelete(CAPSET_OSPROCESS_DEFAULT);
961 traceCapsetDelete(CAPSET_CLOCKDOMAIN_DEFAULT);
962
963 #if defined(THREADED_RTS)
964 ASSERT(checkSparkCountInvariant());
965 #endif
966 }
967
968 static void
969 freeCapability (Capability *cap)
970 {
971 stgFree(cap->mut_lists);
972 stgFree(cap->saved_mut_lists);
973 #if defined(THREADED_RTS)
974 freeSparkPool(cap->sparks);
975 #endif
976 }
977
978 void
979 freeCapabilities (void)
980 {
981 #if defined(THREADED_RTS)
982 nat i;
983 for (i=0; i < n_capabilities; i++) {
984 freeCapability(&capabilities[i]);
985 }
986 #else
987 freeCapability(&MainCapability);
988 #endif
989 }
990
991 /* ---------------------------------------------------------------------------
992 Mark everything directly reachable from the Capabilities. When
993 using multiple GC threads, each GC thread marks all Capabilities
994 for which (c `mod` n == 0), for Capability c and thread n.
995 ------------------------------------------------------------------------ */
996
997 void
998 markCapability (evac_fn evac, void *user, Capability *cap,
999 rtsBool no_mark_sparks USED_IF_THREADS)
1000 {
1001 InCall *incall;
1002
1003 // Each GC thread is responsible for following roots from the
1004 // Capability of the same number. There will usually be the same
1005 // or fewer Capabilities as GC threads, but just in case there
1006 // are more, we mark every Capability whose number is the GC
1007 // thread's index plus a multiple of the number of GC threads.
1008 evac(user, (StgClosure **)(void *)&cap->run_queue_hd);
1009 evac(user, (StgClosure **)(void *)&cap->run_queue_tl);
1010 #if defined(THREADED_RTS)
1011 evac(user, (StgClosure **)(void *)&cap->inbox);
1012 #endif
1013 for (incall = cap->suspended_ccalls; incall != NULL;
1014 incall=incall->next) {
1015 evac(user, (StgClosure **)(void *)&incall->suspended_tso);
1016 }
1017
1018 #if defined(THREADED_RTS)
1019 if (!no_mark_sparks) {
1020 traverseSparkQueue (evac, user, cap);
1021 }
1022 #endif
1023
1024 // Free STM structures for this Capability
1025 stmPreGCHook(cap);
1026 }
1027
1028 void
1029 markCapabilities (evac_fn evac, void *user)
1030 {
1031 nat n;
1032 for (n = 0; n < n_capabilities; n++) {
1033 markCapability(evac, user, &capabilities[n], rtsFalse);
1034 }
1035 }
1036
1037 #if defined(THREADED_RTS)
1038 rtsBool checkSparkCountInvariant (void)
1039 {
1040 SparkCounters sparks = { 0, 0, 0, 0, 0, 0 };
1041 StgWord64 remaining = 0;
1042 nat i;
1043
1044 for (i = 0; i < n_capabilities; i++) {
1045 sparks.created += capabilities[i].spark_stats.created;
1046 sparks.dud += capabilities[i].spark_stats.dud;
1047 sparks.overflowed+= capabilities[i].spark_stats.overflowed;
1048 sparks.converted += capabilities[i].spark_stats.converted;
1049 sparks.gcd += capabilities[i].spark_stats.gcd;
1050 sparks.fizzled += capabilities[i].spark_stats.fizzled;
1051 remaining += sparkPoolSize(capabilities[i].sparks);
1052 }
1053
1054 /* The invariant is
1055 * created = converted + remaining + gcd + fizzled
1056 */
1057 debugTrace(DEBUG_sparks,"spark invariant: %ld == %ld + %ld + %ld + %ld "
1058 "(created == converted + remaining + gcd + fizzled)",
1059 sparks.created, sparks.converted, remaining,
1060 sparks.gcd, sparks.fizzled);
1061
1062 return (sparks.created ==
1063 sparks.converted + remaining + sparks.gcd + sparks.fizzled);
1064
1065 }
1066 #endif