Add new fully-accurate per-spark trace/eventlog events
[ghc.git] / rts / Capability.c
1 /* ---------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 2003-2006
4 *
5 * Capabilities
6 *
7 * A Capability represent the token required to execute STG code,
8 * and all the state an OS thread/task needs to run Haskell code:
9 * its STG registers, a pointer to its TSO, a nursery etc. During
10 * STG execution, a pointer to the capabilitity is kept in a
11 * register (BaseReg; actually it is a pointer to cap->r).
12 *
13 * Only in an THREADED_RTS build will there be multiple capabilities,
14 * for non-threaded builds there is only one global capability, namely
15 * MainCapability.
16 *
17 * --------------------------------------------------------------------------*/
18
19 #include "PosixSource.h"
20 #include "Rts.h"
21
22 #include "Capability.h"
23 #include "Schedule.h"
24 #include "Sparks.h"
25 #include "Trace.h"
26 #include "sm/GC.h" // for gcWorkerThread()
27 #include "STM.h"
28 #include "RtsUtils.h"
29
30 // one global capability, this is the Capability for non-threaded
31 // builds, and for +RTS -N1
32 Capability MainCapability;
33
34 nat n_capabilities = 0;
35 Capability *capabilities = NULL;
36
37 // Holds the Capability which last became free. This is used so that
38 // an in-call has a chance of quickly finding a free Capability.
39 // Maintaining a global free list of Capabilities would require global
40 // locking, so we don't do that.
41 Capability *last_free_capability = NULL;
42
43 /* GC indicator, in scope for the scheduler, init'ed to false */
44 volatile StgWord waiting_for_gc = 0;
45
46 /* Let foreign code get the current Capability -- assuming there is one!
47 * This is useful for unsafe foreign calls because they are called with
48 * the current Capability held, but they are not passed it. For example,
49 * see see the integer-gmp package which calls allocateLocal() in its
50 * stgAllocForGMP() function (which gets called by gmp functions).
51 * */
52 Capability * rts_unsafeGetMyCapability (void)
53 {
54 #if defined(THREADED_RTS)
55 return myTask()->cap;
56 #else
57 return &MainCapability;
58 #endif
59 }
60
61 #if defined(THREADED_RTS)
62 STATIC_INLINE rtsBool
63 globalWorkToDo (void)
64 {
65 return sched_state >= SCHED_INTERRUPTING
66 || recent_activity == ACTIVITY_INACTIVE; // need to check for deadlock
67 }
68 #endif
69
70 #if defined(THREADED_RTS)
71 StgClosure *
72 findSpark (Capability *cap)
73 {
74 Capability *robbed;
75 StgClosurePtr spark;
76 rtsBool retry;
77 nat i = 0;
78
79 if (!emptyRunQueue(cap) || cap->returning_tasks_hd != NULL) {
80 // If there are other threads, don't try to run any new
81 // sparks: sparks might be speculative, we don't want to take
82 // resources away from the main computation.
83 return 0;
84 }
85
86 do {
87 retry = rtsFalse;
88
89 // first try to get a spark from our own pool.
90 // We should be using reclaimSpark(), because it works without
91 // needing any atomic instructions:
92 // spark = reclaimSpark(cap->sparks);
93 // However, measurements show that this makes at least one benchmark
94 // slower (prsa) and doesn't affect the others.
95 spark = tryStealSpark(cap->sparks);
96 while (spark != NULL && fizzledSpark(spark)) {
97 cap->spark_stats.fizzled++;
98 traceEventSparkFizzle(cap);
99 spark = tryStealSpark(cap->sparks);
100 }
101 if (spark != NULL) {
102 cap->spark_stats.converted++;
103
104 // Post event for running a spark from capability's own pool.
105 traceEventSparkRun(cap);
106
107 return spark;
108 }
109 if (!emptySparkPoolCap(cap)) {
110 retry = rtsTrue;
111 }
112
113 if (n_capabilities == 1) { return NULL; } // makes no sense...
114
115 debugTrace(DEBUG_sched,
116 "cap %d: Trying to steal work from other capabilities",
117 cap->no);
118
119 /* visit cap.s 0..n-1 in sequence until a theft succeeds. We could
120 start at a random place instead of 0 as well. */
121 for ( i=0 ; i < n_capabilities ; i++ ) {
122 robbed = &capabilities[i];
123 if (cap == robbed) // ourselves...
124 continue;
125
126 if (emptySparkPoolCap(robbed)) // nothing to steal here
127 continue;
128
129 spark = tryStealSpark(robbed->sparks);
130 while (spark != NULL && fizzledSpark(spark)) {
131 cap->spark_stats.fizzled++;
132 traceEventSparkFizzle(cap);
133 spark = tryStealSpark(robbed->sparks);
134 }
135 if (spark == NULL && !emptySparkPoolCap(robbed)) {
136 // we conflicted with another thread while trying to steal;
137 // try again later.
138 retry = rtsTrue;
139 }
140
141 if (spark != NULL) {
142 cap->spark_stats.converted++;
143 traceEventSparkSteal(cap, robbed->no);
144
145 return spark;
146 }
147 // otherwise: no success, try next one
148 }
149 } while (retry);
150
151 debugTrace(DEBUG_sched, "No sparks stolen");
152 return NULL;
153 }
154
155 // Returns True if any spark pool is non-empty at this moment in time
156 // The result is only valid for an instant, of course, so in a sense
157 // is immediately invalid, and should not be relied upon for
158 // correctness.
159 rtsBool
160 anySparks (void)
161 {
162 nat i;
163
164 for (i=0; i < n_capabilities; i++) {
165 if (!emptySparkPoolCap(&capabilities[i])) {
166 return rtsTrue;
167 }
168 }
169 return rtsFalse;
170 }
171 #endif
172
173 /* -----------------------------------------------------------------------------
174 * Manage the returning_tasks lists.
175 *
176 * These functions require cap->lock
177 * -------------------------------------------------------------------------- */
178
179 #if defined(THREADED_RTS)
180 STATIC_INLINE void
181 newReturningTask (Capability *cap, Task *task)
182 {
183 ASSERT_LOCK_HELD(&cap->lock);
184 ASSERT(task->next == NULL);
185 if (cap->returning_tasks_hd) {
186 ASSERT(cap->returning_tasks_tl->next == NULL);
187 cap->returning_tasks_tl->next = task;
188 } else {
189 cap->returning_tasks_hd = task;
190 }
191 cap->returning_tasks_tl = task;
192 }
193
194 STATIC_INLINE Task *
195 popReturningTask (Capability *cap)
196 {
197 ASSERT_LOCK_HELD(&cap->lock);
198 Task *task;
199 task = cap->returning_tasks_hd;
200 ASSERT(task);
201 cap->returning_tasks_hd = task->next;
202 if (!cap->returning_tasks_hd) {
203 cap->returning_tasks_tl = NULL;
204 }
205 task->next = NULL;
206 return task;
207 }
208 #endif
209
210 /* ----------------------------------------------------------------------------
211 * Initialisation
212 *
213 * The Capability is initially marked not free.
214 * ------------------------------------------------------------------------- */
215
216 static void
217 initCapability( Capability *cap, nat i )
218 {
219 nat g;
220
221 cap->no = i;
222 cap->in_haskell = rtsFalse;
223
224 cap->run_queue_hd = END_TSO_QUEUE;
225 cap->run_queue_tl = END_TSO_QUEUE;
226
227 #if defined(THREADED_RTS)
228 initMutex(&cap->lock);
229 cap->running_task = NULL; // indicates cap is free
230 cap->spare_workers = NULL;
231 cap->n_spare_workers = 0;
232 cap->suspended_ccalls = NULL;
233 cap->returning_tasks_hd = NULL;
234 cap->returning_tasks_tl = NULL;
235 cap->inbox = (Message*)END_TSO_QUEUE;
236 cap->sparks = allocSparkPool();
237 cap->spark_stats.created = 0;
238 cap->spark_stats.dud = 0;
239 cap->spark_stats.overflowed = 0;
240 cap->spark_stats.converted = 0;
241 cap->spark_stats.gcd = 0;
242 cap->spark_stats.fizzled = 0;
243 #endif
244
245 cap->f.stgEagerBlackholeInfo = (W_)&__stg_EAGER_BLACKHOLE_info;
246 cap->f.stgGCEnter1 = (StgFunPtr)__stg_gc_enter_1;
247 cap->f.stgGCFun = (StgFunPtr)__stg_gc_fun;
248
249 cap->mut_lists = stgMallocBytes(sizeof(bdescr *) *
250 RtsFlags.GcFlags.generations,
251 "initCapability");
252 cap->saved_mut_lists = stgMallocBytes(sizeof(bdescr *) *
253 RtsFlags.GcFlags.generations,
254 "initCapability");
255
256 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
257 cap->mut_lists[g] = NULL;
258 }
259
260 cap->free_tvar_watch_queues = END_STM_WATCH_QUEUE;
261 cap->free_invariant_check_queues = END_INVARIANT_CHECK_QUEUE;
262 cap->free_trec_chunks = END_STM_CHUNK_LIST;
263 cap->free_trec_headers = NO_TREC;
264 cap->transaction_tokens = 0;
265 cap->context_switch = 0;
266 cap->pinned_object_block = NULL;
267
268 traceCapsetAssignCap(CAPSET_OSPROCESS_DEFAULT, i);
269 #if defined(THREADED_RTS)
270 traceSparkCounters(cap);
271 #endif
272 }
273
274 /* ---------------------------------------------------------------------------
275 * Function: initCapabilities()
276 *
277 * Purpose: set up the Capability handling. For the THREADED_RTS build,
278 * we keep a table of them, the size of which is
279 * controlled by the user via the RTS flag -N.
280 *
281 * ------------------------------------------------------------------------- */
282 void
283 initCapabilities( void )
284 {
285 /* Declare a single capability set representing the process.
286 Each capability will get added to this capset. */
287 traceCapsetCreate(CAPSET_OSPROCESS_DEFAULT, CapsetTypeOsProcess);
288
289 #if defined(THREADED_RTS)
290 nat i;
291
292 #ifndef REG_Base
293 // We can't support multiple CPUs if BaseReg is not a register
294 if (RtsFlags.ParFlags.nNodes > 1) {
295 errorBelch("warning: multiple CPUs not supported in this build, reverting to 1");
296 RtsFlags.ParFlags.nNodes = 1;
297 }
298 #endif
299
300 n_capabilities = RtsFlags.ParFlags.nNodes;
301
302 if (n_capabilities == 1) {
303 capabilities = &MainCapability;
304 // THREADED_RTS must work on builds that don't have a mutable
305 // BaseReg (eg. unregisterised), so in this case
306 // capabilities[0] must coincide with &MainCapability.
307 } else {
308 capabilities = stgMallocBytes(n_capabilities * sizeof(Capability),
309 "initCapabilities");
310 }
311
312 for (i = 0; i < n_capabilities; i++) {
313 initCapability(&capabilities[i], i);
314 }
315
316 debugTrace(DEBUG_sched, "allocated %d capabilities", n_capabilities);
317
318 #else /* !THREADED_RTS */
319
320 n_capabilities = 1;
321 capabilities = &MainCapability;
322 initCapability(&MainCapability, 0);
323
324 #endif
325
326 // There are no free capabilities to begin with. We will start
327 // a worker Task to each Capability, which will quickly put the
328 // Capability on the free list when it finds nothing to do.
329 last_free_capability = &capabilities[0];
330 }
331
332 /* ----------------------------------------------------------------------------
333 * setContextSwitches: cause all capabilities to context switch as
334 * soon as possible.
335 * ------------------------------------------------------------------------- */
336
337 void setContextSwitches(void)
338 {
339 nat i;
340 for (i=0; i < n_capabilities; i++) {
341 contextSwitchCapability(&capabilities[i]);
342 }
343 }
344
345 /* ----------------------------------------------------------------------------
346 * Give a Capability to a Task. The task must currently be sleeping
347 * on its condition variable.
348 *
349 * Requires cap->lock (modifies cap->running_task).
350 *
351 * When migrating a Task, the migrater must take task->lock before
352 * modifying task->cap, to synchronise with the waking up Task.
353 * Additionally, the migrater should own the Capability (when
354 * migrating the run queue), or cap->lock (when migrating
355 * returning_workers).
356 *
357 * ------------------------------------------------------------------------- */
358
359 #if defined(THREADED_RTS)
360 STATIC_INLINE void
361 giveCapabilityToTask (Capability *cap USED_IF_DEBUG, Task *task)
362 {
363 ASSERT_LOCK_HELD(&cap->lock);
364 ASSERT(task->cap == cap);
365 debugTrace(DEBUG_sched, "passing capability %d to %s %p",
366 cap->no, task->incall->tso ? "bound task" : "worker",
367 (void *)task->id);
368 ACQUIRE_LOCK(&task->lock);
369 task->wakeup = rtsTrue;
370 // the wakeup flag is needed because signalCondition() doesn't
371 // flag the condition if the thread is already runniing, but we want
372 // it to be sticky.
373 signalCondition(&task->cond);
374 RELEASE_LOCK(&task->lock);
375 }
376 #endif
377
378 /* ----------------------------------------------------------------------------
379 * Function: releaseCapability(Capability*)
380 *
381 * Purpose: Letting go of a capability. Causes a
382 * 'returning worker' thread or a 'waiting worker'
383 * to wake up, in that order.
384 * ------------------------------------------------------------------------- */
385
386 #if defined(THREADED_RTS)
387 void
388 releaseCapability_ (Capability* cap,
389 rtsBool always_wakeup)
390 {
391 Task *task;
392
393 task = cap->running_task;
394
395 ASSERT_PARTIAL_CAPABILITY_INVARIANTS(cap,task);
396
397 cap->running_task = NULL;
398
399 // Check to see whether a worker thread can be given
400 // the go-ahead to return the result of an external call..
401 if (cap->returning_tasks_hd != NULL) {
402 giveCapabilityToTask(cap,cap->returning_tasks_hd);
403 // The Task pops itself from the queue (see waitForReturnCapability())
404 return;
405 }
406
407 if (waiting_for_gc == PENDING_GC_SEQ) {
408 last_free_capability = cap; // needed?
409 debugTrace(DEBUG_sched, "GC pending, set capability %d free", cap->no);
410 return;
411 }
412
413
414 // If the next thread on the run queue is a bound thread,
415 // give this Capability to the appropriate Task.
416 if (!emptyRunQueue(cap) && cap->run_queue_hd->bound) {
417 // Make sure we're not about to try to wake ourselves up
418 // ASSERT(task != cap->run_queue_hd->bound);
419 // assertion is false: in schedule() we force a yield after
420 // ThreadBlocked, but the thread may be back on the run queue
421 // by now.
422 task = cap->run_queue_hd->bound->task;
423 giveCapabilityToTask(cap,task);
424 return;
425 }
426
427 if (!cap->spare_workers) {
428 // Create a worker thread if we don't have one. If the system
429 // is interrupted, we only create a worker task if there
430 // are threads that need to be completed. If the system is
431 // shutting down, we never create a new worker.
432 if (sched_state < SCHED_SHUTTING_DOWN || !emptyRunQueue(cap)) {
433 debugTrace(DEBUG_sched,
434 "starting new worker on capability %d", cap->no);
435 startWorkerTask(cap);
436 return;
437 }
438 }
439
440 // If we have an unbound thread on the run queue, or if there's
441 // anything else to do, give the Capability to a worker thread.
442 if (always_wakeup ||
443 !emptyRunQueue(cap) || !emptyInbox(cap) ||
444 !emptySparkPoolCap(cap) || globalWorkToDo()) {
445 if (cap->spare_workers) {
446 giveCapabilityToTask(cap,cap->spare_workers);
447 // The worker Task pops itself from the queue;
448 return;
449 }
450 }
451
452 last_free_capability = cap;
453 debugTrace(DEBUG_sched, "freeing capability %d", cap->no);
454 }
455
456 void
457 releaseCapability (Capability* cap USED_IF_THREADS)
458 {
459 ACQUIRE_LOCK(&cap->lock);
460 releaseCapability_(cap, rtsFalse);
461 RELEASE_LOCK(&cap->lock);
462 }
463
464 void
465 releaseAndWakeupCapability (Capability* cap USED_IF_THREADS)
466 {
467 ACQUIRE_LOCK(&cap->lock);
468 releaseCapability_(cap, rtsTrue);
469 RELEASE_LOCK(&cap->lock);
470 }
471
472 static void
473 releaseCapabilityAndQueueWorker (Capability* cap USED_IF_THREADS)
474 {
475 Task *task;
476
477 ACQUIRE_LOCK(&cap->lock);
478
479 task = cap->running_task;
480
481 // If the Task is stopped, we shouldn't be yielding, we should
482 // be just exiting.
483 ASSERT(!task->stopped);
484
485 // If the current task is a worker, save it on the spare_workers
486 // list of this Capability. A worker can mark itself as stopped,
487 // in which case it is not replaced on the spare_worker queue.
488 // This happens when the system is shutting down (see
489 // Schedule.c:workerStart()).
490 if (!isBoundTask(task))
491 {
492 if (cap->n_spare_workers < MAX_SPARE_WORKERS)
493 {
494 task->next = cap->spare_workers;
495 cap->spare_workers = task;
496 cap->n_spare_workers++;
497 }
498 else
499 {
500 debugTrace(DEBUG_sched, "%d spare workers already, exiting",
501 cap->n_spare_workers);
502 releaseCapability_(cap,rtsFalse);
503 // hold the lock until after workerTaskStop; c.f. scheduleWorker()
504 workerTaskStop(task);
505 RELEASE_LOCK(&cap->lock);
506 shutdownThread();
507 }
508 }
509 // Bound tasks just float around attached to their TSOs.
510
511 releaseCapability_(cap,rtsFalse);
512
513 RELEASE_LOCK(&cap->lock);
514 }
515 #endif
516
517 /* ----------------------------------------------------------------------------
518 * waitForReturnCapability( Task *task )
519 *
520 * Purpose: when an OS thread returns from an external call,
521 * it calls waitForReturnCapability() (via Schedule.resumeThread())
522 * to wait for permission to enter the RTS & communicate the
523 * result of the external call back to the Haskell thread that
524 * made it.
525 *
526 * ------------------------------------------------------------------------- */
527 void
528 waitForReturnCapability (Capability **pCap, Task *task)
529 {
530 #if !defined(THREADED_RTS)
531
532 MainCapability.running_task = task;
533 task->cap = &MainCapability;
534 *pCap = &MainCapability;
535
536 #else
537 Capability *cap = *pCap;
538
539 if (cap == NULL) {
540 // Try last_free_capability first
541 cap = last_free_capability;
542 if (cap->running_task) {
543 nat i;
544 // otherwise, search for a free capability
545 cap = NULL;
546 for (i = 0; i < n_capabilities; i++) {
547 if (!capabilities[i].running_task) {
548 cap = &capabilities[i];
549 break;
550 }
551 }
552 if (cap == NULL) {
553 // Can't find a free one, use last_free_capability.
554 cap = last_free_capability;
555 }
556 }
557
558 // record the Capability as the one this Task is now assocated with.
559 task->cap = cap;
560
561 } else {
562 ASSERT(task->cap == cap);
563 }
564
565 ACQUIRE_LOCK(&cap->lock);
566
567 debugTrace(DEBUG_sched, "returning; I want capability %d", cap->no);
568
569 if (!cap->running_task) {
570 // It's free; just grab it
571 cap->running_task = task;
572 RELEASE_LOCK(&cap->lock);
573 } else {
574 newReturningTask(cap,task);
575 RELEASE_LOCK(&cap->lock);
576
577 for (;;) {
578 ACQUIRE_LOCK(&task->lock);
579 // task->lock held, cap->lock not held
580 if (!task->wakeup) waitCondition(&task->cond, &task->lock);
581 cap = task->cap;
582 task->wakeup = rtsFalse;
583 RELEASE_LOCK(&task->lock);
584
585 // now check whether we should wake up...
586 ACQUIRE_LOCK(&cap->lock);
587 if (cap->running_task == NULL) {
588 if (cap->returning_tasks_hd != task) {
589 giveCapabilityToTask(cap,cap->returning_tasks_hd);
590 RELEASE_LOCK(&cap->lock);
591 continue;
592 }
593 cap->running_task = task;
594 popReturningTask(cap);
595 RELEASE_LOCK(&cap->lock);
596 break;
597 }
598 RELEASE_LOCK(&cap->lock);
599 }
600
601 }
602
603 ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
604
605 debugTrace(DEBUG_sched, "resuming capability %d", cap->no);
606
607 *pCap = cap;
608 #endif
609 }
610
611 #if defined(THREADED_RTS)
612 /* ----------------------------------------------------------------------------
613 * yieldCapability
614 * ------------------------------------------------------------------------- */
615
616 void
617 yieldCapability (Capability** pCap, Task *task)
618 {
619 Capability *cap = *pCap;
620
621 if (waiting_for_gc == PENDING_GC_PAR) {
622 traceEventGcStart(cap);
623 gcWorkerThread(cap);
624 traceEventGcEnd(cap);
625 traceSparkCounters(cap);
626 return;
627 }
628
629 debugTrace(DEBUG_sched, "giving up capability %d", cap->no);
630
631 // We must now release the capability and wait to be woken up
632 // again.
633 task->wakeup = rtsFalse;
634 releaseCapabilityAndQueueWorker(cap);
635
636 for (;;) {
637 ACQUIRE_LOCK(&task->lock);
638 // task->lock held, cap->lock not held
639 if (!task->wakeup) waitCondition(&task->cond, &task->lock);
640 cap = task->cap;
641 task->wakeup = rtsFalse;
642 RELEASE_LOCK(&task->lock);
643
644 debugTrace(DEBUG_sched, "woken up on capability %d", cap->no);
645
646 ACQUIRE_LOCK(&cap->lock);
647 if (cap->running_task != NULL) {
648 debugTrace(DEBUG_sched,
649 "capability %d is owned by another task", cap->no);
650 RELEASE_LOCK(&cap->lock);
651 continue;
652 }
653
654 if (task->incall->tso == NULL) {
655 ASSERT(cap->spare_workers != NULL);
656 // if we're not at the front of the queue, release it
657 // again. This is unlikely to happen.
658 if (cap->spare_workers != task) {
659 giveCapabilityToTask(cap,cap->spare_workers);
660 RELEASE_LOCK(&cap->lock);
661 continue;
662 }
663 cap->spare_workers = task->next;
664 task->next = NULL;
665 cap->n_spare_workers--;
666 }
667 cap->running_task = task;
668 RELEASE_LOCK(&cap->lock);
669 break;
670 }
671
672 debugTrace(DEBUG_sched, "resuming capability %d", cap->no);
673 ASSERT(cap->running_task == task);
674
675 *pCap = cap;
676
677 ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
678
679 return;
680 }
681
682 /* ----------------------------------------------------------------------------
683 * prodCapability
684 *
685 * If a Capability is currently idle, wake up a Task on it. Used to
686 * get every Capability into the GC.
687 * ------------------------------------------------------------------------- */
688
689 void
690 prodCapability (Capability *cap, Task *task)
691 {
692 ACQUIRE_LOCK(&cap->lock);
693 if (!cap->running_task) {
694 cap->running_task = task;
695 releaseCapability_(cap,rtsTrue);
696 }
697 RELEASE_LOCK(&cap->lock);
698 }
699
700 /* ----------------------------------------------------------------------------
701 * tryGrabCapability
702 *
703 * Attempt to gain control of a Capability if it is free.
704 *
705 * ------------------------------------------------------------------------- */
706
707 rtsBool
708 tryGrabCapability (Capability *cap, Task *task)
709 {
710 if (cap->running_task != NULL) return rtsFalse;
711 ACQUIRE_LOCK(&cap->lock);
712 if (cap->running_task != NULL) {
713 RELEASE_LOCK(&cap->lock);
714 return rtsFalse;
715 }
716 task->cap = cap;
717 cap->running_task = task;
718 RELEASE_LOCK(&cap->lock);
719 return rtsTrue;
720 }
721
722
723 #endif /* THREADED_RTS */
724
725 /* ----------------------------------------------------------------------------
726 * shutdownCapability
727 *
728 * At shutdown time, we want to let everything exit as cleanly as
729 * possible. For each capability, we let its run queue drain, and
730 * allow the workers to stop.
731 *
732 * This function should be called when interrupted and
733 * shutting_down_scheduler = rtsTrue, thus any worker that wakes up
734 * will exit the scheduler and call taskStop(), and any bound thread
735 * that wakes up will return to its caller. Runnable threads are
736 * killed.
737 *
738 * ------------------------------------------------------------------------- */
739
740 void
741 shutdownCapability (Capability *cap,
742 Task *task USED_IF_THREADS,
743 rtsBool safe USED_IF_THREADS)
744 {
745 #if defined(THREADED_RTS)
746 nat i;
747
748 task->cap = cap;
749
750 // Loop indefinitely until all the workers have exited and there
751 // are no Haskell threads left. We used to bail out after 50
752 // iterations of this loop, but that occasionally left a worker
753 // running which caused problems later (the closeMutex() below
754 // isn't safe, for one thing).
755
756 for (i = 0; /* i < 50 */; i++) {
757 ASSERT(sched_state == SCHED_SHUTTING_DOWN);
758
759 debugTrace(DEBUG_sched,
760 "shutting down capability %d, attempt %d", cap->no, i);
761 ACQUIRE_LOCK(&cap->lock);
762 if (cap->running_task) {
763 RELEASE_LOCK(&cap->lock);
764 debugTrace(DEBUG_sched, "not owner, yielding");
765 yieldThread();
766 continue;
767 }
768 cap->running_task = task;
769
770 if (cap->spare_workers) {
771 // Look for workers that have died without removing
772 // themselves from the list; this could happen if the OS
773 // summarily killed the thread, for example. This
774 // actually happens on Windows when the system is
775 // terminating the program, and the RTS is running in a
776 // DLL.
777 Task *t, *prev;
778 prev = NULL;
779 for (t = cap->spare_workers; t != NULL; t = t->next) {
780 if (!osThreadIsAlive(t->id)) {
781 debugTrace(DEBUG_sched,
782 "worker thread %p has died unexpectedly", (void *)t->id);
783 cap->n_spare_workers--;
784 if (!prev) {
785 cap->spare_workers = t->next;
786 } else {
787 prev->next = t->next;
788 }
789 prev = t;
790 }
791 }
792 }
793
794 if (!emptyRunQueue(cap) || cap->spare_workers) {
795 debugTrace(DEBUG_sched,
796 "runnable threads or workers still alive, yielding");
797 releaseCapability_(cap,rtsFalse); // this will wake up a worker
798 RELEASE_LOCK(&cap->lock);
799 yieldThread();
800 continue;
801 }
802
803 // If "safe", then busy-wait for any threads currently doing
804 // foreign calls. If we're about to unload this DLL, for
805 // example, we need to be sure that there are no OS threads
806 // that will try to return to code that has been unloaded.
807 // We can be a bit more relaxed when this is a standalone
808 // program that is about to terminate, and let safe=false.
809 if (cap->suspended_ccalls && safe) {
810 debugTrace(DEBUG_sched,
811 "thread(s) are involved in foreign calls, yielding");
812 cap->running_task = NULL;
813 RELEASE_LOCK(&cap->lock);
814 // The IO manager thread might have been slow to start up,
815 // so the first attempt to kill it might not have
816 // succeeded. Just in case, try again - the kill message
817 // will only be sent once.
818 //
819 // To reproduce this deadlock: run ffi002(threaded1)
820 // repeatedly on a loaded machine.
821 ioManagerDie();
822 yieldThread();
823 continue;
824 }
825
826 traceEventShutdown(cap);
827 RELEASE_LOCK(&cap->lock);
828 break;
829 }
830 // we now have the Capability, its run queue and spare workers
831 // list are both empty.
832
833 // ToDo: we can't drop this mutex, because there might still be
834 // threads performing foreign calls that will eventually try to
835 // return via resumeThread() and attempt to grab cap->lock.
836 // closeMutex(&cap->lock);
837
838 traceSparkCounters(cap);
839
840 #endif /* THREADED_RTS */
841
842 traceCapsetRemoveCap(CAPSET_OSPROCESS_DEFAULT, cap->no);
843 }
844
845 void
846 shutdownCapabilities(Task *task, rtsBool safe)
847 {
848 nat i;
849 for (i=0; i < n_capabilities; i++) {
850 ASSERT(task->incall->tso == NULL);
851 shutdownCapability(&capabilities[i], task, safe);
852 }
853 traceCapsetDelete(CAPSET_OSPROCESS_DEFAULT);
854
855 #if defined(THREADED_RTS)
856 ASSERT(checkSparkCountInvariant());
857 #endif
858 }
859
860 static void
861 freeCapability (Capability *cap)
862 {
863 stgFree(cap->mut_lists);
864 stgFree(cap->saved_mut_lists);
865 #if defined(THREADED_RTS)
866 freeSparkPool(cap->sparks);
867 #endif
868 }
869
870 void
871 freeCapabilities (void)
872 {
873 #if defined(THREADED_RTS)
874 nat i;
875 for (i=0; i < n_capabilities; i++) {
876 freeCapability(&capabilities[i]);
877 }
878 #else
879 freeCapability(&MainCapability);
880 #endif
881 }
882
883 /* ---------------------------------------------------------------------------
884 Mark everything directly reachable from the Capabilities. When
885 using multiple GC threads, each GC thread marks all Capabilities
886 for which (c `mod` n == 0), for Capability c and thread n.
887 ------------------------------------------------------------------------ */
888
889 void
890 markCapability (evac_fn evac, void *user, Capability *cap,
891 rtsBool no_mark_sparks USED_IF_THREADS)
892 {
893 InCall *incall;
894
895 // Each GC thread is responsible for following roots from the
896 // Capability of the same number. There will usually be the same
897 // or fewer Capabilities as GC threads, but just in case there
898 // are more, we mark every Capability whose number is the GC
899 // thread's index plus a multiple of the number of GC threads.
900 evac(user, (StgClosure **)(void *)&cap->run_queue_hd);
901 evac(user, (StgClosure **)(void *)&cap->run_queue_tl);
902 #if defined(THREADED_RTS)
903 evac(user, (StgClosure **)(void *)&cap->inbox);
904 #endif
905 for (incall = cap->suspended_ccalls; incall != NULL;
906 incall=incall->next) {
907 evac(user, (StgClosure **)(void *)&incall->suspended_tso);
908 }
909
910 #if defined(THREADED_RTS)
911 if (!no_mark_sparks) {
912 traverseSparkQueue (evac, user, cap);
913 }
914 #endif
915
916 // Free STM structures for this Capability
917 stmPreGCHook(cap);
918 }
919
920 void
921 markCapabilities (evac_fn evac, void *user)
922 {
923 nat n;
924 for (n = 0; n < n_capabilities; n++) {
925 markCapability(evac, user, &capabilities[n], rtsFalse);
926 }
927 }
928
929 #if defined(THREADED_RTS)
930 rtsBool checkSparkCountInvariant (void)
931 {
932 SparkCounters sparks = { 0, 0, 0, 0, 0, 0 };
933 StgWord64 remaining = 0;
934 nat i;
935
936 for (i = 0; i < n_capabilities; i++) {
937 sparks.created += capabilities[i].spark_stats.created;
938 sparks.dud += capabilities[i].spark_stats.dud;
939 sparks.overflowed+= capabilities[i].spark_stats.overflowed;
940 sparks.converted += capabilities[i].spark_stats.converted;
941 sparks.gcd += capabilities[i].spark_stats.gcd;
942 sparks.fizzled += capabilities[i].spark_stats.fizzled;
943 remaining += sparkPoolSize(capabilities[i].sparks);
944 }
945
946 /* The invariant is
947 * created = converted + remaining + gcd + fizzled
948 */
949 debugTrace(DEBUG_sparks,"spark invariant: %ld == %ld + %ld + %ld + %ld "
950 "(created == converted + remaining + gcd + fizzled)",
951 sparks.created, sparks.converted, remaining,
952 sparks.gcd, sparks.fizzled);
953
954 return (sparks.created ==
955 sparks.converted + remaining + sparks.gcd + sparks.fizzled);
956
957 }
958 #endif