Store a destination step in the block descriptor
[ghc.git] / rts / Schedule.c
1 /* ---------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 1998-2006
4 *
5 * The scheduler and thread-related functionality
6 *
7 * --------------------------------------------------------------------------*/
8
9 #include "PosixSource.h"
10 #define KEEP_LOCKCLOSURE
11 #include "Rts.h"
12
13 #include "sm/Storage.h"
14 #include "RtsUtils.h"
15 #include "StgRun.h"
16 #include "Schedule.h"
17 #include "Interpreter.h"
18 #include "Printer.h"
19 #include "RtsSignals.h"
20 #include "Sanity.h"
21 #include "Stats.h"
22 #include "STM.h"
23 #include "Prelude.h"
24 #include "ThreadLabels.h"
25 #include "Updates.h"
26 #include "Proftimer.h"
27 #include "ProfHeap.h"
28 #include "Weak.h"
29 #include "sm/GC.h" // waitForGcThreads, releaseGCThreads, N
30 #include "Sparks.h"
31 #include "Capability.h"
32 #include "Task.h"
33 #include "AwaitEvent.h"
34 #if defined(mingw32_HOST_OS)
35 #include "win32/IOManager.h"
36 #endif
37 #include "Trace.h"
38 #include "RaiseAsync.h"
39 #include "Threads.h"
40 #include "Timer.h"
41 #include "ThreadPaused.h"
42
43 #ifdef HAVE_SYS_TYPES_H
44 #include <sys/types.h>
45 #endif
46 #ifdef HAVE_UNISTD_H
47 #include <unistd.h>
48 #endif
49
50 #include <string.h>
51 #include <stdlib.h>
52 #include <stdarg.h>
53
54 #ifdef HAVE_ERRNO_H
55 #include <errno.h>
56 #endif
57
58 /* -----------------------------------------------------------------------------
59 * Global variables
60 * -------------------------------------------------------------------------- */
61
62 #if !defined(THREADED_RTS)
63 // Blocked/sleeping thrads
64 StgTSO *blocked_queue_hd = NULL;
65 StgTSO *blocked_queue_tl = NULL;
66 StgTSO *sleeping_queue = NULL; // perhaps replace with a hash table?
67 #endif
68
69 /* Threads blocked on blackholes.
70 * LOCK: sched_mutex+capability, or all capabilities
71 */
72 StgTSO *blackhole_queue = NULL;
73
74 /* The blackhole_queue should be checked for threads to wake up. See
75 * Schedule.h for more thorough comment.
76 * LOCK: none (doesn't matter if we miss an update)
77 */
78 rtsBool blackholes_need_checking = rtsFalse;
79
80 /* Set to true when the latest garbage collection failed to reclaim
81 * enough space, and the runtime should proceed to shut itself down in
82 * an orderly fashion (emitting profiling info etc.)
83 */
84 rtsBool heap_overflow = rtsFalse;
85
86 /* flag that tracks whether we have done any execution in this time slice.
87 * LOCK: currently none, perhaps we should lock (but needs to be
88 * updated in the fast path of the scheduler).
89 *
90 * NB. must be StgWord, we do xchg() on it.
91 */
92 volatile StgWord recent_activity = ACTIVITY_YES;
93
94 /* if this flag is set as well, give up execution
95 * LOCK: none (changes monotonically)
96 */
97 volatile StgWord sched_state = SCHED_RUNNING;
98
99 /* This is used in `TSO.h' and gcc 2.96 insists that this variable actually
100 * exists - earlier gccs apparently didn't.
101 * -= chak
102 */
103 StgTSO dummy_tso;
104
105 /*
106 * Set to TRUE when entering a shutdown state (via shutdownHaskellAndExit()) --
107 * in an MT setting, needed to signal that a worker thread shouldn't hang around
108 * in the scheduler when it is out of work.
109 */
110 rtsBool shutting_down_scheduler = rtsFalse;
111
112 /*
113 * This mutex protects most of the global scheduler data in
114 * the THREADED_RTS runtime.
115 */
116 #if defined(THREADED_RTS)
117 Mutex sched_mutex;
118 #endif
119
120 #if !defined(mingw32_HOST_OS)
121 #define FORKPROCESS_PRIMOP_SUPPORTED
122 #endif
123
124 /* -----------------------------------------------------------------------------
125 * static function prototypes
126 * -------------------------------------------------------------------------- */
127
128 static Capability *schedule (Capability *initialCapability, Task *task);
129
130 //
131 // These function all encapsulate parts of the scheduler loop, and are
132 // abstracted only to make the structure and control flow of the
133 // scheduler clearer.
134 //
135 static void schedulePreLoop (void);
136 static void scheduleFindWork (Capability *cap);
137 #if defined(THREADED_RTS)
138 static void scheduleYield (Capability **pcap, Task *task, rtsBool);
139 #endif
140 static void scheduleStartSignalHandlers (Capability *cap);
141 static void scheduleCheckBlockedThreads (Capability *cap);
142 static void scheduleCheckWakeupThreads(Capability *cap USED_IF_NOT_THREADS);
143 static void scheduleCheckBlackHoles (Capability *cap);
144 static void scheduleDetectDeadlock (Capability *cap, Task *task);
145 static void schedulePushWork(Capability *cap, Task *task);
146 #if defined(THREADED_RTS)
147 static void scheduleActivateSpark(Capability *cap);
148 #endif
149 static void schedulePostRunThread(Capability *cap, StgTSO *t);
150 static rtsBool scheduleHandleHeapOverflow( Capability *cap, StgTSO *t );
151 static void scheduleHandleStackOverflow( Capability *cap, Task *task,
152 StgTSO *t);
153 static rtsBool scheduleHandleYield( Capability *cap, StgTSO *t,
154 nat prev_what_next );
155 static void scheduleHandleThreadBlocked( StgTSO *t );
156 static rtsBool scheduleHandleThreadFinished( Capability *cap, Task *task,
157 StgTSO *t );
158 static rtsBool scheduleNeedHeapProfile(rtsBool ready_to_gc);
159 static Capability *scheduleDoGC(Capability *cap, Task *task,
160 rtsBool force_major);
161
162 static rtsBool checkBlackHoles(Capability *cap);
163
164 static StgTSO *threadStackOverflow(Capability *cap, StgTSO *tso);
165 static StgTSO *threadStackUnderflow(Task *task, StgTSO *tso);
166
167 static void deleteThread (Capability *cap, StgTSO *tso);
168 static void deleteAllThreads (Capability *cap);
169
170 #ifdef FORKPROCESS_PRIMOP_SUPPORTED
171 static void deleteThread_(Capability *cap, StgTSO *tso);
172 #endif
173
174 /* -----------------------------------------------------------------------------
175 * Putting a thread on the run queue: different scheduling policies
176 * -------------------------------------------------------------------------- */
177
178 STATIC_INLINE void
179 addToRunQueue( Capability *cap, StgTSO *t )
180 {
181 // this does round-robin scheduling; good for concurrency
182 appendToRunQueue(cap,t);
183 }
184
185 /* ---------------------------------------------------------------------------
186 Main scheduling loop.
187
188 We use round-robin scheduling, each thread returning to the
189 scheduler loop when one of these conditions is detected:
190
191 * out of heap space
192 * timer expires (thread yields)
193 * thread blocks
194 * thread ends
195 * stack overflow
196
197 GRAN version:
198 In a GranSim setup this loop iterates over the global event queue.
199 This revolves around the global event queue, which determines what
200 to do next. Therefore, it's more complicated than either the
201 concurrent or the parallel (GUM) setup.
202 This version has been entirely removed (JB 2008/08).
203
204 GUM version:
205 GUM iterates over incoming messages.
206 It starts with nothing to do (thus CurrentTSO == END_TSO_QUEUE),
207 and sends out a fish whenever it has nothing to do; in-between
208 doing the actual reductions (shared code below) it processes the
209 incoming messages and deals with delayed operations
210 (see PendingFetches).
211 This is not the ugliest code you could imagine, but it's bloody close.
212
213 (JB 2008/08) This version was formerly indicated by a PP-Flag PAR,
214 now by PP-flag PARALLEL_HASKELL. The Eden RTS (in GHC-6.x) uses it,
215 as well as future GUM versions. This file has been refurbished to
216 only contain valid code, which is however incomplete, refers to
217 invalid includes etc.
218
219 ------------------------------------------------------------------------ */
220
221 static Capability *
222 schedule (Capability *initialCapability, Task *task)
223 {
224 StgTSO *t;
225 Capability *cap;
226 StgThreadReturnCode ret;
227 nat prev_what_next;
228 rtsBool ready_to_gc;
229 #if defined(THREADED_RTS)
230 rtsBool first = rtsTrue;
231 rtsBool force_yield = rtsFalse;
232 #endif
233
234 cap = initialCapability;
235
236 // Pre-condition: this task owns initialCapability.
237 // The sched_mutex is *NOT* held
238 // NB. on return, we still hold a capability.
239
240 debugTrace (DEBUG_sched, "cap %d: schedule()", initialCapability->no);
241
242 schedulePreLoop();
243
244 // -----------------------------------------------------------
245 // Scheduler loop starts here:
246
247 while (1) {
248
249 // Check whether we have re-entered the RTS from Haskell without
250 // going via suspendThread()/resumeThread (i.e. a 'safe' foreign
251 // call).
252 if (cap->in_haskell) {
253 errorBelch("schedule: re-entered unsafely.\n"
254 " Perhaps a 'foreign import unsafe' should be 'safe'?");
255 stg_exit(EXIT_FAILURE);
256 }
257
258 // The interruption / shutdown sequence.
259 //
260 // In order to cleanly shut down the runtime, we want to:
261 // * make sure that all main threads return to their callers
262 // with the state 'Interrupted'.
263 // * clean up all OS threads assocated with the runtime
264 // * free all memory etc.
265 //
266 // So the sequence for ^C goes like this:
267 //
268 // * ^C handler sets sched_state := SCHED_INTERRUPTING and
269 // arranges for some Capability to wake up
270 //
271 // * all threads in the system are halted, and the zombies are
272 // placed on the run queue for cleaning up. We acquire all
273 // the capabilities in order to delete the threads, this is
274 // done by scheduleDoGC() for convenience (because GC already
275 // needs to acquire all the capabilities). We can't kill
276 // threads involved in foreign calls.
277 //
278 // * somebody calls shutdownHaskell(), which calls exitScheduler()
279 //
280 // * sched_state := SCHED_SHUTTING_DOWN
281 //
282 // * all workers exit when the run queue on their capability
283 // drains. All main threads will also exit when their TSO
284 // reaches the head of the run queue and they can return.
285 //
286 // * eventually all Capabilities will shut down, and the RTS can
287 // exit.
288 //
289 // * We might be left with threads blocked in foreign calls,
290 // we should really attempt to kill these somehow (TODO);
291
292 switch (sched_state) {
293 case SCHED_RUNNING:
294 break;
295 case SCHED_INTERRUPTING:
296 debugTrace(DEBUG_sched, "SCHED_INTERRUPTING");
297 #if defined(THREADED_RTS)
298 discardSparksCap(cap);
299 #endif
300 /* scheduleDoGC() deletes all the threads */
301 cap = scheduleDoGC(cap,task,rtsFalse);
302
303 // after scheduleDoGC(), we must be shutting down. Either some
304 // other Capability did the final GC, or we did it above,
305 // either way we can fall through to the SCHED_SHUTTING_DOWN
306 // case now.
307 ASSERT(sched_state == SCHED_SHUTTING_DOWN);
308 // fall through
309
310 case SCHED_SHUTTING_DOWN:
311 debugTrace(DEBUG_sched, "SCHED_SHUTTING_DOWN");
312 // If we are a worker, just exit. If we're a bound thread
313 // then we will exit below when we've removed our TSO from
314 // the run queue.
315 if (task->tso == NULL && emptyRunQueue(cap)) {
316 return cap;
317 }
318 break;
319 default:
320 barf("sched_state: %d", sched_state);
321 }
322
323 scheduleFindWork(cap);
324
325 /* work pushing, currently relevant only for THREADED_RTS:
326 (pushes threads, wakes up idle capabilities for stealing) */
327 schedulePushWork(cap,task);
328
329 scheduleDetectDeadlock(cap,task);
330
331 #if defined(THREADED_RTS)
332 cap = task->cap; // reload cap, it might have changed
333 #endif
334
335 // Normally, the only way we can get here with no threads to
336 // run is if a keyboard interrupt received during
337 // scheduleCheckBlockedThreads() or scheduleDetectDeadlock().
338 // Additionally, it is not fatal for the
339 // threaded RTS to reach here with no threads to run.
340 //
341 // win32: might be here due to awaitEvent() being abandoned
342 // as a result of a console event having been delivered.
343
344 #if defined(THREADED_RTS)
345 if (first)
346 {
347 // XXX: ToDo
348 // // don't yield the first time, we want a chance to run this
349 // // thread for a bit, even if there are others banging at the
350 // // door.
351 // first = rtsFalse;
352 // ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
353 }
354
355 yield:
356 scheduleYield(&cap,task,force_yield);
357 force_yield = rtsFalse;
358
359 if (emptyRunQueue(cap)) continue; // look for work again
360 #endif
361
362 #if !defined(THREADED_RTS) && !defined(mingw32_HOST_OS)
363 if ( emptyRunQueue(cap) ) {
364 ASSERT(sched_state >= SCHED_INTERRUPTING);
365 }
366 #endif
367
368 //
369 // Get a thread to run
370 //
371 t = popRunQueue(cap);
372
373 // Sanity check the thread we're about to run. This can be
374 // expensive if there is lots of thread switching going on...
375 IF_DEBUG(sanity,checkTSO(t));
376
377 #if defined(THREADED_RTS)
378 // Check whether we can run this thread in the current task.
379 // If not, we have to pass our capability to the right task.
380 {
381 Task *bound = t->bound;
382
383 if (bound) {
384 if (bound == task) {
385 // yes, the Haskell thread is bound to the current native thread
386 } else {
387 debugTrace(DEBUG_sched,
388 "thread %lu bound to another OS thread",
389 (unsigned long)t->id);
390 // no, bound to a different Haskell thread: pass to that thread
391 pushOnRunQueue(cap,t);
392 continue;
393 }
394 } else {
395 // The thread we want to run is unbound.
396 if (task->tso) {
397 debugTrace(DEBUG_sched,
398 "this OS thread cannot run thread %lu",
399 (unsigned long)t->id);
400 // no, the current native thread is bound to a different
401 // Haskell thread, so pass it to any worker thread
402 pushOnRunQueue(cap,t);
403 continue;
404 }
405 }
406 }
407 #endif
408
409 // If we're shutting down, and this thread has not yet been
410 // killed, kill it now. This sometimes happens when a finalizer
411 // thread is created by the final GC, or a thread previously
412 // in a foreign call returns.
413 if (sched_state >= SCHED_INTERRUPTING &&
414 !(t->what_next == ThreadComplete || t->what_next == ThreadKilled)) {
415 deleteThread(cap,t);
416 }
417
418 /* context switches are initiated by the timer signal, unless
419 * the user specified "context switch as often as possible", with
420 * +RTS -C0
421 */
422 if (RtsFlags.ConcFlags.ctxtSwitchTicks == 0
423 && !emptyThreadQueues(cap)) {
424 cap->context_switch = 1;
425 }
426
427 run_thread:
428
429 // CurrentTSO is the thread to run. t might be different if we
430 // loop back to run_thread, so make sure to set CurrentTSO after
431 // that.
432 cap->r.rCurrentTSO = t;
433
434 startHeapProfTimer();
435
436 // Check for exceptions blocked on this thread
437 maybePerformBlockedException (cap, t);
438
439 // ----------------------------------------------------------------------
440 // Run the current thread
441
442 ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
443 ASSERT(t->cap == cap);
444 ASSERT(t->bound ? t->bound->cap == cap : 1);
445
446 prev_what_next = t->what_next;
447
448 errno = t->saved_errno;
449 #if mingw32_HOST_OS
450 SetLastError(t->saved_winerror);
451 #endif
452
453 cap->in_haskell = rtsTrue;
454
455 dirty_TSO(cap,t);
456
457 #if defined(THREADED_RTS)
458 if (recent_activity == ACTIVITY_DONE_GC) {
459 // ACTIVITY_DONE_GC means we turned off the timer signal to
460 // conserve power (see #1623). Re-enable it here.
461 nat prev;
462 prev = xchg((P_)&recent_activity, ACTIVITY_YES);
463 if (prev == ACTIVITY_DONE_GC) {
464 startTimer();
465 }
466 } else {
467 recent_activity = ACTIVITY_YES;
468 }
469 #endif
470
471 traceSchedEvent(cap, EVENT_RUN_THREAD, t, 0);
472
473 switch (prev_what_next) {
474
475 case ThreadKilled:
476 case ThreadComplete:
477 /* Thread already finished, return to scheduler. */
478 ret = ThreadFinished;
479 break;
480
481 case ThreadRunGHC:
482 {
483 StgRegTable *r;
484 r = StgRun((StgFunPtr) stg_returnToStackTop, &cap->r);
485 cap = regTableToCapability(r);
486 ret = r->rRet;
487 break;
488 }
489
490 case ThreadInterpret:
491 cap = interpretBCO(cap);
492 ret = cap->r.rRet;
493 break;
494
495 default:
496 barf("schedule: invalid what_next field");
497 }
498
499 cap->in_haskell = rtsFalse;
500
501 // The TSO might have moved, eg. if it re-entered the RTS and a GC
502 // happened. So find the new location:
503 t = cap->r.rCurrentTSO;
504
505 // We have run some Haskell code: there might be blackhole-blocked
506 // threads to wake up now.
507 // Lock-free test here should be ok, we're just setting a flag.
508 if ( blackhole_queue != END_TSO_QUEUE ) {
509 blackholes_need_checking = rtsTrue;
510 }
511
512 // And save the current errno in this thread.
513 // XXX: possibly bogus for SMP because this thread might already
514 // be running again, see code below.
515 t->saved_errno = errno;
516 #if mingw32_HOST_OS
517 // Similarly for Windows error code
518 t->saved_winerror = GetLastError();
519 #endif
520
521 traceSchedEvent (cap, EVENT_STOP_THREAD, t, ret);
522
523 #if defined(THREADED_RTS)
524 // If ret is ThreadBlocked, and this Task is bound to the TSO that
525 // blocked, we are in limbo - the TSO is now owned by whatever it
526 // is blocked on, and may in fact already have been woken up,
527 // perhaps even on a different Capability. It may be the case
528 // that task->cap != cap. We better yield this Capability
529 // immediately and return to normaility.
530 if (ret == ThreadBlocked) {
531 force_yield = rtsTrue;
532 goto yield;
533 }
534 #endif
535
536 ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
537 ASSERT(t->cap == cap);
538
539 // ----------------------------------------------------------------------
540
541 // Costs for the scheduler are assigned to CCS_SYSTEM
542 stopHeapProfTimer();
543 #if defined(PROFILING)
544 CCCS = CCS_SYSTEM;
545 #endif
546
547 schedulePostRunThread(cap,t);
548
549 if (ret != StackOverflow) {
550 t = threadStackUnderflow(task,t);
551 }
552
553 ready_to_gc = rtsFalse;
554
555 switch (ret) {
556 case HeapOverflow:
557 ready_to_gc = scheduleHandleHeapOverflow(cap,t);
558 break;
559
560 case StackOverflow:
561 scheduleHandleStackOverflow(cap,task,t);
562 break;
563
564 case ThreadYielding:
565 if (scheduleHandleYield(cap, t, prev_what_next)) {
566 // shortcut for switching between compiler/interpreter:
567 goto run_thread;
568 }
569 break;
570
571 case ThreadBlocked:
572 scheduleHandleThreadBlocked(t);
573 break;
574
575 case ThreadFinished:
576 if (scheduleHandleThreadFinished(cap, task, t)) return cap;
577 ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
578 break;
579
580 default:
581 barf("schedule: invalid thread return code %d", (int)ret);
582 }
583
584 if (ready_to_gc || scheduleNeedHeapProfile(ready_to_gc)) {
585 cap = scheduleDoGC(cap,task,rtsFalse);
586 }
587 } /* end of while() */
588 }
589
590 /* ----------------------------------------------------------------------------
591 * Setting up the scheduler loop
592 * ------------------------------------------------------------------------- */
593
594 static void
595 schedulePreLoop(void)
596 {
597 // initialisation for scheduler - what cannot go into initScheduler()
598 }
599
600 /* -----------------------------------------------------------------------------
601 * scheduleFindWork()
602 *
603 * Search for work to do, and handle messages from elsewhere.
604 * -------------------------------------------------------------------------- */
605
606 static void
607 scheduleFindWork (Capability *cap)
608 {
609 scheduleStartSignalHandlers(cap);
610
611 // Only check the black holes here if we've nothing else to do.
612 // During normal execution, the black hole list only gets checked
613 // at GC time, to avoid repeatedly traversing this possibly long
614 // list each time around the scheduler.
615 if (emptyRunQueue(cap)) { scheduleCheckBlackHoles(cap); }
616
617 scheduleCheckWakeupThreads(cap);
618
619 scheduleCheckBlockedThreads(cap);
620
621 #if defined(THREADED_RTS)
622 if (emptyRunQueue(cap)) { scheduleActivateSpark(cap); }
623 #endif
624 }
625
626 #if defined(THREADED_RTS)
627 STATIC_INLINE rtsBool
628 shouldYieldCapability (Capability *cap, Task *task)
629 {
630 // we need to yield this capability to someone else if..
631 // - another thread is initiating a GC
632 // - another Task is returning from a foreign call
633 // - the thread at the head of the run queue cannot be run
634 // by this Task (it is bound to another Task, or it is unbound
635 // and this task it bound).
636 return (waiting_for_gc ||
637 cap->returning_tasks_hd != NULL ||
638 (!emptyRunQueue(cap) && (task->tso == NULL
639 ? cap->run_queue_hd->bound != NULL
640 : cap->run_queue_hd->bound != task)));
641 }
642
643 // This is the single place where a Task goes to sleep. There are
644 // two reasons it might need to sleep:
645 // - there are no threads to run
646 // - we need to yield this Capability to someone else
647 // (see shouldYieldCapability())
648 //
649 // Careful: the scheduler loop is quite delicate. Make sure you run
650 // the tests in testsuite/concurrent (all ways) after modifying this,
651 // and also check the benchmarks in nofib/parallel for regressions.
652
653 static void
654 scheduleYield (Capability **pcap, Task *task, rtsBool force_yield)
655 {
656 Capability *cap = *pcap;
657
658 // if we have work, and we don't need to give up the Capability, continue.
659 //
660 // The force_yield flag is used when a bound thread blocks. This
661 // is a particularly tricky situation: the current Task does not
662 // own the TSO any more, since it is on some queue somewhere, and
663 // might be woken up or manipulated by another thread at any time.
664 // The TSO and Task might be migrated to another Capability.
665 // Certain invariants might be in doubt, such as task->bound->cap
666 // == cap. We have to yield the current Capability immediately,
667 // no messing around.
668 //
669 if (!force_yield &&
670 !shouldYieldCapability(cap,task) &&
671 (!emptyRunQueue(cap) ||
672 !emptyWakeupQueue(cap) ||
673 blackholes_need_checking ||
674 sched_state >= SCHED_INTERRUPTING))
675 return;
676
677 // otherwise yield (sleep), and keep yielding if necessary.
678 do {
679 yieldCapability(&cap,task);
680 }
681 while (shouldYieldCapability(cap,task));
682
683 // note there may still be no threads on the run queue at this
684 // point, the caller has to check.
685
686 *pcap = cap;
687 return;
688 }
689 #endif
690
691 /* -----------------------------------------------------------------------------
692 * schedulePushWork()
693 *
694 * Push work to other Capabilities if we have some.
695 * -------------------------------------------------------------------------- */
696
697 static void
698 schedulePushWork(Capability *cap USED_IF_THREADS,
699 Task *task USED_IF_THREADS)
700 {
701 /* following code not for PARALLEL_HASKELL. I kept the call general,
702 future GUM versions might use pushing in a distributed setup */
703 #if defined(THREADED_RTS)
704
705 Capability *free_caps[n_capabilities], *cap0;
706 nat i, n_free_caps;
707
708 // migration can be turned off with +RTS -qg
709 if (!RtsFlags.ParFlags.migrate) return;
710
711 // Check whether we have more threads on our run queue, or sparks
712 // in our pool, that we could hand to another Capability.
713 if (cap->run_queue_hd == END_TSO_QUEUE) {
714 if (sparkPoolSizeCap(cap) < 2) return;
715 } else {
716 if (cap->run_queue_hd->_link == END_TSO_QUEUE &&
717 sparkPoolSizeCap(cap) < 1) return;
718 }
719
720 // First grab as many free Capabilities as we can.
721 for (i=0, n_free_caps=0; i < n_capabilities; i++) {
722 cap0 = &capabilities[i];
723 if (cap != cap0 && tryGrabCapability(cap0,task)) {
724 if (!emptyRunQueue(cap0) || cap->returning_tasks_hd != NULL) {
725 // it already has some work, we just grabbed it at
726 // the wrong moment. Or maybe it's deadlocked!
727 releaseCapability(cap0);
728 } else {
729 free_caps[n_free_caps++] = cap0;
730 }
731 }
732 }
733
734 // we now have n_free_caps free capabilities stashed in
735 // free_caps[]. Share our run queue equally with them. This is
736 // probably the simplest thing we could do; improvements we might
737 // want to do include:
738 //
739 // - giving high priority to moving relatively new threads, on
740 // the gournds that they haven't had time to build up a
741 // working set in the cache on this CPU/Capability.
742 //
743 // - giving low priority to moving long-lived threads
744
745 if (n_free_caps > 0) {
746 StgTSO *prev, *t, *next;
747 rtsBool pushed_to_all;
748
749 debugTrace(DEBUG_sched,
750 "cap %d: %s and %d free capabilities, sharing...",
751 cap->no,
752 (!emptyRunQueue(cap) && cap->run_queue_hd->_link != END_TSO_QUEUE)?
753 "excess threads on run queue":"sparks to share (>=2)",
754 n_free_caps);
755
756 i = 0;
757 pushed_to_all = rtsFalse;
758
759 if (cap->run_queue_hd != END_TSO_QUEUE) {
760 prev = cap->run_queue_hd;
761 t = prev->_link;
762 prev->_link = END_TSO_QUEUE;
763 for (; t != END_TSO_QUEUE; t = next) {
764 next = t->_link;
765 t->_link = END_TSO_QUEUE;
766 if (t->what_next == ThreadRelocated
767 || t->bound == task // don't move my bound thread
768 || tsoLocked(t)) { // don't move a locked thread
769 setTSOLink(cap, prev, t);
770 prev = t;
771 } else if (i == n_free_caps) {
772 pushed_to_all = rtsTrue;
773 i = 0;
774 // keep one for us
775 setTSOLink(cap, prev, t);
776 prev = t;
777 } else {
778 debugTrace(DEBUG_sched, "pushing thread %lu to capability %d", (unsigned long)t->id, free_caps[i]->no);
779 appendToRunQueue(free_caps[i],t);
780
781 traceSchedEvent (cap, EVENT_MIGRATE_THREAD, t, free_caps[i]->no);
782
783 if (t->bound) { t->bound->cap = free_caps[i]; }
784 t->cap = free_caps[i];
785 i++;
786 }
787 }
788 cap->run_queue_tl = prev;
789 }
790
791 #ifdef SPARK_PUSHING
792 /* JB I left this code in place, it would work but is not necessary */
793
794 // If there are some free capabilities that we didn't push any
795 // threads to, then try to push a spark to each one.
796 if (!pushed_to_all) {
797 StgClosure *spark;
798 // i is the next free capability to push to
799 for (; i < n_free_caps; i++) {
800 if (emptySparkPoolCap(free_caps[i])) {
801 spark = tryStealSpark(cap->sparks);
802 if (spark != NULL) {
803 debugTrace(DEBUG_sched, "pushing spark %p to capability %d", spark, free_caps[i]->no);
804
805 traceSchedEvent(free_caps[i], EVENT_STEAL_SPARK, t, cap->no);
806
807 newSpark(&(free_caps[i]->r), spark);
808 }
809 }
810 }
811 }
812 #endif /* SPARK_PUSHING */
813
814 // release the capabilities
815 for (i = 0; i < n_free_caps; i++) {
816 task->cap = free_caps[i];
817 releaseAndWakeupCapability(free_caps[i]);
818 }
819 }
820 task->cap = cap; // reset to point to our Capability.
821
822 #endif /* THREADED_RTS */
823
824 }
825
826 /* ----------------------------------------------------------------------------
827 * Start any pending signal handlers
828 * ------------------------------------------------------------------------- */
829
830 #if defined(RTS_USER_SIGNALS) && !defined(THREADED_RTS)
831 static void
832 scheduleStartSignalHandlers(Capability *cap)
833 {
834 if (RtsFlags.MiscFlags.install_signal_handlers && signals_pending()) {
835 // safe outside the lock
836 startSignalHandlers(cap);
837 }
838 }
839 #else
840 static void
841 scheduleStartSignalHandlers(Capability *cap STG_UNUSED)
842 {
843 }
844 #endif
845
846 /* ----------------------------------------------------------------------------
847 * Check for blocked threads that can be woken up.
848 * ------------------------------------------------------------------------- */
849
850 static void
851 scheduleCheckBlockedThreads(Capability *cap USED_IF_NOT_THREADS)
852 {
853 #if !defined(THREADED_RTS)
854 //
855 // Check whether any waiting threads need to be woken up. If the
856 // run queue is empty, and there are no other tasks running, we
857 // can wait indefinitely for something to happen.
858 //
859 if ( !emptyQueue(blocked_queue_hd) || !emptyQueue(sleeping_queue) )
860 {
861 awaitEvent( emptyRunQueue(cap) && !blackholes_need_checking );
862 }
863 #endif
864 }
865
866
867 /* ----------------------------------------------------------------------------
868 * Check for threads woken up by other Capabilities
869 * ------------------------------------------------------------------------- */
870
871 static void
872 scheduleCheckWakeupThreads(Capability *cap USED_IF_THREADS)
873 {
874 #if defined(THREADED_RTS)
875 // Any threads that were woken up by other Capabilities get
876 // appended to our run queue.
877 if (!emptyWakeupQueue(cap)) {
878 ACQUIRE_LOCK(&cap->lock);
879 if (emptyRunQueue(cap)) {
880 cap->run_queue_hd = cap->wakeup_queue_hd;
881 cap->run_queue_tl = cap->wakeup_queue_tl;
882 } else {
883 setTSOLink(cap, cap->run_queue_tl, cap->wakeup_queue_hd);
884 cap->run_queue_tl = cap->wakeup_queue_tl;
885 }
886 cap->wakeup_queue_hd = cap->wakeup_queue_tl = END_TSO_QUEUE;
887 RELEASE_LOCK(&cap->lock);
888 }
889 #endif
890 }
891
892 /* ----------------------------------------------------------------------------
893 * Check for threads blocked on BLACKHOLEs that can be woken up
894 * ------------------------------------------------------------------------- */
895 static void
896 scheduleCheckBlackHoles (Capability *cap)
897 {
898 if ( blackholes_need_checking ) // check without the lock first
899 {
900 ACQUIRE_LOCK(&sched_mutex);
901 if ( blackholes_need_checking ) {
902 blackholes_need_checking = rtsFalse;
903 // important that we reset the flag *before* checking the
904 // blackhole queue, otherwise we could get deadlock. This
905 // happens as follows: we wake up a thread that
906 // immediately runs on another Capability, blocks on a
907 // blackhole, and then we reset the blackholes_need_checking flag.
908 checkBlackHoles(cap);
909 }
910 RELEASE_LOCK(&sched_mutex);
911 }
912 }
913
914 /* ----------------------------------------------------------------------------
915 * Detect deadlock conditions and attempt to resolve them.
916 * ------------------------------------------------------------------------- */
917
918 static void
919 scheduleDetectDeadlock (Capability *cap, Task *task)
920 {
921 /*
922 * Detect deadlock: when we have no threads to run, there are no
923 * threads blocked, waiting for I/O, or sleeping, and all the
924 * other tasks are waiting for work, we must have a deadlock of
925 * some description.
926 */
927 if ( emptyThreadQueues(cap) )
928 {
929 #if defined(THREADED_RTS)
930 /*
931 * In the threaded RTS, we only check for deadlock if there
932 * has been no activity in a complete timeslice. This means
933 * we won't eagerly start a full GC just because we don't have
934 * any threads to run currently.
935 */
936 if (recent_activity != ACTIVITY_INACTIVE) return;
937 #endif
938
939 debugTrace(DEBUG_sched, "deadlocked, forcing major GC...");
940
941 // Garbage collection can release some new threads due to
942 // either (a) finalizers or (b) threads resurrected because
943 // they are unreachable and will therefore be sent an
944 // exception. Any threads thus released will be immediately
945 // runnable.
946 cap = scheduleDoGC (cap, task, rtsTrue/*force major GC*/);
947 // when force_major == rtsTrue. scheduleDoGC sets
948 // recent_activity to ACTIVITY_DONE_GC and turns off the timer
949 // signal.
950
951 if ( !emptyRunQueue(cap) ) return;
952
953 #if defined(RTS_USER_SIGNALS) && !defined(THREADED_RTS)
954 /* If we have user-installed signal handlers, then wait
955 * for signals to arrive rather then bombing out with a
956 * deadlock.
957 */
958 if ( RtsFlags.MiscFlags.install_signal_handlers && anyUserHandlers() ) {
959 debugTrace(DEBUG_sched,
960 "still deadlocked, waiting for signals...");
961
962 awaitUserSignals();
963
964 if (signals_pending()) {
965 startSignalHandlers(cap);
966 }
967
968 // either we have threads to run, or we were interrupted:
969 ASSERT(!emptyRunQueue(cap) || sched_state >= SCHED_INTERRUPTING);
970
971 return;
972 }
973 #endif
974
975 #if !defined(THREADED_RTS)
976 /* Probably a real deadlock. Send the current main thread the
977 * Deadlock exception.
978 */
979 if (task->tso) {
980 switch (task->tso->why_blocked) {
981 case BlockedOnSTM:
982 case BlockedOnBlackHole:
983 case BlockedOnException:
984 case BlockedOnMVar:
985 throwToSingleThreaded(cap, task->tso,
986 (StgClosure *)nonTermination_closure);
987 return;
988 default:
989 barf("deadlock: main thread blocked in a strange way");
990 }
991 }
992 return;
993 #endif
994 }
995 }
996
997
998 /* ----------------------------------------------------------------------------
999 * Send pending messages (PARALLEL_HASKELL only)
1000 * ------------------------------------------------------------------------- */
1001
1002 #if defined(PARALLEL_HASKELL)
1003 static void
1004 scheduleSendPendingMessages(void)
1005 {
1006
1007 # if defined(PAR) // global Mem.Mgmt., omit for now
1008 if (PendingFetches != END_BF_QUEUE) {
1009 processFetches();
1010 }
1011 # endif
1012
1013 if (RtsFlags.ParFlags.BufferTime) {
1014 // if we use message buffering, we must send away all message
1015 // packets which have become too old...
1016 sendOldBuffers();
1017 }
1018 }
1019 #endif
1020
1021 /* ----------------------------------------------------------------------------
1022 * Activate spark threads (PARALLEL_HASKELL and THREADED_RTS)
1023 * ------------------------------------------------------------------------- */
1024
1025 #if defined(THREADED_RTS)
1026 static void
1027 scheduleActivateSpark(Capability *cap)
1028 {
1029 if (anySparks())
1030 {
1031 createSparkThread(cap);
1032 debugTrace(DEBUG_sched, "creating a spark thread");
1033 }
1034 }
1035 #endif // PARALLEL_HASKELL || THREADED_RTS
1036
1037 /* ----------------------------------------------------------------------------
1038 * After running a thread...
1039 * ------------------------------------------------------------------------- */
1040
1041 static void
1042 schedulePostRunThread (Capability *cap, StgTSO *t)
1043 {
1044 // We have to be able to catch transactions that are in an
1045 // infinite loop as a result of seeing an inconsistent view of
1046 // memory, e.g.
1047 //
1048 // atomically $ do
1049 // [a,b] <- mapM readTVar [ta,tb]
1050 // when (a == b) loop
1051 //
1052 // and a is never equal to b given a consistent view of memory.
1053 //
1054 if (t -> trec != NO_TREC && t -> why_blocked == NotBlocked) {
1055 if (!stmValidateNestOfTransactions (t -> trec)) {
1056 debugTrace(DEBUG_sched | DEBUG_stm,
1057 "trec %p found wasting its time", t);
1058
1059 // strip the stack back to the
1060 // ATOMICALLY_FRAME, aborting the (nested)
1061 // transaction, and saving the stack of any
1062 // partially-evaluated thunks on the heap.
1063 throwToSingleThreaded_(cap, t, NULL, rtsTrue);
1064
1065 // ASSERT(get_itbl((StgClosure *)t->sp)->type == ATOMICALLY_FRAME);
1066 }
1067 }
1068
1069 /* some statistics gathering in the parallel case */
1070 }
1071
1072 /* -----------------------------------------------------------------------------
1073 * Handle a thread that returned to the scheduler with ThreadHeepOverflow
1074 * -------------------------------------------------------------------------- */
1075
1076 static rtsBool
1077 scheduleHandleHeapOverflow( Capability *cap, StgTSO *t )
1078 {
1079 // did the task ask for a large block?
1080 if (cap->r.rHpAlloc > BLOCK_SIZE) {
1081 // if so, get one and push it on the front of the nursery.
1082 bdescr *bd;
1083 lnat blocks;
1084
1085 blocks = (lnat)BLOCK_ROUND_UP(cap->r.rHpAlloc) / BLOCK_SIZE;
1086
1087 debugTrace(DEBUG_sched,
1088 "--<< thread %ld (%s) stopped: requesting a large block (size %ld)\n",
1089 (long)t->id, what_next_strs[t->what_next], blocks);
1090
1091 // don't do this if the nursery is (nearly) full, we'll GC first.
1092 if (cap->r.rCurrentNursery->link != NULL ||
1093 cap->r.rNursery->n_blocks == 1) { // paranoia to prevent infinite loop
1094 // if the nursery has only one block.
1095
1096 ACQUIRE_SM_LOCK
1097 bd = allocGroup( blocks );
1098 RELEASE_SM_LOCK
1099 cap->r.rNursery->n_blocks += blocks;
1100
1101 // link the new group into the list
1102 bd->link = cap->r.rCurrentNursery;
1103 bd->u.back = cap->r.rCurrentNursery->u.back;
1104 if (cap->r.rCurrentNursery->u.back != NULL) {
1105 cap->r.rCurrentNursery->u.back->link = bd;
1106 } else {
1107 cap->r.rNursery->blocks = bd;
1108 }
1109 cap->r.rCurrentNursery->u.back = bd;
1110
1111 // initialise it as a nursery block. We initialise the
1112 // step, gen_no, and flags field of *every* sub-block in
1113 // this large block, because this is easier than making
1114 // sure that we always find the block head of a large
1115 // block whenever we call Bdescr() (eg. evacuate() and
1116 // isAlive() in the GC would both have to do this, at
1117 // least).
1118 {
1119 bdescr *x;
1120 for (x = bd; x < bd + blocks; x++) {
1121 initBdescr(x,cap->r.rNursery);
1122 x->free = x->start;
1123 x->flags = 0;
1124 }
1125 }
1126
1127 // This assert can be a killer if the app is doing lots
1128 // of large block allocations.
1129 IF_DEBUG(sanity, checkNurserySanity(cap->r.rNursery));
1130
1131 // now update the nursery to point to the new block
1132 cap->r.rCurrentNursery = bd;
1133
1134 // we might be unlucky and have another thread get on the
1135 // run queue before us and steal the large block, but in that
1136 // case the thread will just end up requesting another large
1137 // block.
1138 pushOnRunQueue(cap,t);
1139 return rtsFalse; /* not actually GC'ing */
1140 }
1141 }
1142
1143 if (cap->r.rHpLim == NULL || cap->context_switch) {
1144 // Sometimes we miss a context switch, e.g. when calling
1145 // primitives in a tight loop, MAYBE_GC() doesn't check the
1146 // context switch flag, and we end up waiting for a GC.
1147 // See #1984, and concurrent/should_run/1984
1148 cap->context_switch = 0;
1149 addToRunQueue(cap,t);
1150 } else {
1151 pushOnRunQueue(cap,t);
1152 }
1153 return rtsTrue;
1154 /* actual GC is done at the end of the while loop in schedule() */
1155 }
1156
1157 /* -----------------------------------------------------------------------------
1158 * Handle a thread that returned to the scheduler with ThreadStackOverflow
1159 * -------------------------------------------------------------------------- */
1160
1161 static void
1162 scheduleHandleStackOverflow (Capability *cap, Task *task, StgTSO *t)
1163 {
1164 /* just adjust the stack for this thread, then pop it back
1165 * on the run queue.
1166 */
1167 {
1168 /* enlarge the stack */
1169 StgTSO *new_t = threadStackOverflow(cap, t);
1170
1171 /* The TSO attached to this Task may have moved, so update the
1172 * pointer to it.
1173 */
1174 if (task->tso == t) {
1175 task->tso = new_t;
1176 }
1177 pushOnRunQueue(cap,new_t);
1178 }
1179 }
1180
1181 /* -----------------------------------------------------------------------------
1182 * Handle a thread that returned to the scheduler with ThreadYielding
1183 * -------------------------------------------------------------------------- */
1184
1185 static rtsBool
1186 scheduleHandleYield( Capability *cap, StgTSO *t, nat prev_what_next )
1187 {
1188 // Reset the context switch flag. We don't do this just before
1189 // running the thread, because that would mean we would lose ticks
1190 // during GC, which can lead to unfair scheduling (a thread hogs
1191 // the CPU because the tick always arrives during GC). This way
1192 // penalises threads that do a lot of allocation, but that seems
1193 // better than the alternative.
1194 cap->context_switch = 0;
1195
1196 /* put the thread back on the run queue. Then, if we're ready to
1197 * GC, check whether this is the last task to stop. If so, wake
1198 * up the GC thread. getThread will block during a GC until the
1199 * GC is finished.
1200 */
1201 #ifdef DEBUG
1202 if (t->what_next != prev_what_next) {
1203 debugTrace(DEBUG_sched,
1204 "--<< thread %ld (%s) stopped to switch evaluators",
1205 (long)t->id, what_next_strs[t->what_next]);
1206 }
1207 #endif
1208
1209 IF_DEBUG(sanity,
1210 //debugBelch("&& Doing sanity check on yielding TSO %ld.", t->id);
1211 checkTSO(t));
1212 ASSERT(t->_link == END_TSO_QUEUE);
1213
1214 // Shortcut if we're just switching evaluators: don't bother
1215 // doing stack squeezing (which can be expensive), just run the
1216 // thread.
1217 if (t->what_next != prev_what_next) {
1218 return rtsTrue;
1219 }
1220
1221 addToRunQueue(cap,t);
1222
1223 return rtsFalse;
1224 }
1225
1226 /* -----------------------------------------------------------------------------
1227 * Handle a thread that returned to the scheduler with ThreadBlocked
1228 * -------------------------------------------------------------------------- */
1229
1230 static void
1231 scheduleHandleThreadBlocked( StgTSO *t
1232 #if !defined(DEBUG)
1233 STG_UNUSED
1234 #endif
1235 )
1236 {
1237
1238 // We don't need to do anything. The thread is blocked, and it
1239 // has tidied up its stack and placed itself on whatever queue
1240 // it needs to be on.
1241
1242 // ASSERT(t->why_blocked != NotBlocked);
1243 // Not true: for example,
1244 // - in THREADED_RTS, the thread may already have been woken
1245 // up by another Capability. This actually happens: try
1246 // conc023 +RTS -N2.
1247 // - the thread may have woken itself up already, because
1248 // threadPaused() might have raised a blocked throwTo
1249 // exception, see maybePerformBlockedException().
1250
1251 #ifdef DEBUG
1252 traceThreadStatus(DEBUG_sched, t);
1253 #endif
1254 }
1255
1256 /* -----------------------------------------------------------------------------
1257 * Handle a thread that returned to the scheduler with ThreadFinished
1258 * -------------------------------------------------------------------------- */
1259
1260 static rtsBool
1261 scheduleHandleThreadFinished (Capability *cap STG_UNUSED, Task *task, StgTSO *t)
1262 {
1263 /* Need to check whether this was a main thread, and if so,
1264 * return with the return value.
1265 *
1266 * We also end up here if the thread kills itself with an
1267 * uncaught exception, see Exception.cmm.
1268 */
1269
1270 // blocked exceptions can now complete, even if the thread was in
1271 // blocked mode (see #2910). This unconditionally calls
1272 // lockTSO(), which ensures that we don't miss any threads that
1273 // are engaged in throwTo() with this thread as a target.
1274 awakenBlockedExceptionQueue (cap, t);
1275
1276 //
1277 // Check whether the thread that just completed was a bound
1278 // thread, and if so return with the result.
1279 //
1280 // There is an assumption here that all thread completion goes
1281 // through this point; we need to make sure that if a thread
1282 // ends up in the ThreadKilled state, that it stays on the run
1283 // queue so it can be dealt with here.
1284 //
1285
1286 if (t->bound) {
1287
1288 if (t->bound != task) {
1289 #if !defined(THREADED_RTS)
1290 // Must be a bound thread that is not the topmost one. Leave
1291 // it on the run queue until the stack has unwound to the
1292 // point where we can deal with this. Leaving it on the run
1293 // queue also ensures that the garbage collector knows about
1294 // this thread and its return value (it gets dropped from the
1295 // step->threads list so there's no other way to find it).
1296 appendToRunQueue(cap,t);
1297 return rtsFalse;
1298 #else
1299 // this cannot happen in the threaded RTS, because a
1300 // bound thread can only be run by the appropriate Task.
1301 barf("finished bound thread that isn't mine");
1302 #endif
1303 }
1304
1305 ASSERT(task->tso == t);
1306
1307 if (t->what_next == ThreadComplete) {
1308 if (task->ret) {
1309 // NOTE: return val is tso->sp[1] (see StgStartup.hc)
1310 *(task->ret) = (StgClosure *)task->tso->sp[1];
1311 }
1312 task->stat = Success;
1313 } else {
1314 if (task->ret) {
1315 *(task->ret) = NULL;
1316 }
1317 if (sched_state >= SCHED_INTERRUPTING) {
1318 if (heap_overflow) {
1319 task->stat = HeapExhausted;
1320 } else {
1321 task->stat = Interrupted;
1322 }
1323 } else {
1324 task->stat = Killed;
1325 }
1326 }
1327 #ifdef DEBUG
1328 removeThreadLabel((StgWord)task->tso->id);
1329 #endif
1330 return rtsTrue; // tells schedule() to return
1331 }
1332
1333 return rtsFalse;
1334 }
1335
1336 /* -----------------------------------------------------------------------------
1337 * Perform a heap census
1338 * -------------------------------------------------------------------------- */
1339
1340 static rtsBool
1341 scheduleNeedHeapProfile( rtsBool ready_to_gc STG_UNUSED )
1342 {
1343 // When we have +RTS -i0 and we're heap profiling, do a census at
1344 // every GC. This lets us get repeatable runs for debugging.
1345 if (performHeapProfile ||
1346 (RtsFlags.ProfFlags.profileInterval==0 &&
1347 RtsFlags.ProfFlags.doHeapProfile && ready_to_gc)) {
1348 return rtsTrue;
1349 } else {
1350 return rtsFalse;
1351 }
1352 }
1353
1354 /* -----------------------------------------------------------------------------
1355 * Perform a garbage collection if necessary
1356 * -------------------------------------------------------------------------- */
1357
1358 static Capability *
1359 scheduleDoGC (Capability *cap, Task *task USED_IF_THREADS, rtsBool force_major)
1360 {
1361 rtsBool heap_census;
1362 #ifdef THREADED_RTS
1363 /* extern static volatile StgWord waiting_for_gc;
1364 lives inside capability.c */
1365 rtsBool gc_type, prev_pending_gc;
1366 nat i;
1367 #endif
1368
1369 if (sched_state == SCHED_SHUTTING_DOWN) {
1370 // The final GC has already been done, and the system is
1371 // shutting down. We'll probably deadlock if we try to GC
1372 // now.
1373 return cap;
1374 }
1375
1376 #ifdef THREADED_RTS
1377 if (sched_state < SCHED_INTERRUPTING
1378 && RtsFlags.ParFlags.parGcEnabled
1379 && N >= RtsFlags.ParFlags.parGcGen
1380 && ! oldest_gen->steps[0].mark)
1381 {
1382 gc_type = PENDING_GC_PAR;
1383 } else {
1384 gc_type = PENDING_GC_SEQ;
1385 }
1386
1387 // In order to GC, there must be no threads running Haskell code.
1388 // Therefore, the GC thread needs to hold *all* the capabilities,
1389 // and release them after the GC has completed.
1390 //
1391 // This seems to be the simplest way: previous attempts involved
1392 // making all the threads with capabilities give up their
1393 // capabilities and sleep except for the *last* one, which
1394 // actually did the GC. But it's quite hard to arrange for all
1395 // the other tasks to sleep and stay asleep.
1396 //
1397
1398 /* Other capabilities are prevented from running yet more Haskell
1399 threads if waiting_for_gc is set. Tested inside
1400 yieldCapability() and releaseCapability() in Capability.c */
1401
1402 prev_pending_gc = cas(&waiting_for_gc, 0, gc_type);
1403 if (prev_pending_gc) {
1404 do {
1405 debugTrace(DEBUG_sched, "someone else is trying to GC (%d)...",
1406 prev_pending_gc);
1407 ASSERT(cap);
1408 yieldCapability(&cap,task);
1409 } while (waiting_for_gc);
1410 return cap; // NOTE: task->cap might have changed here
1411 }
1412
1413 setContextSwitches();
1414
1415 // The final shutdown GC is always single-threaded, because it's
1416 // possible that some of the Capabilities have no worker threads.
1417
1418 if (gc_type == PENDING_GC_SEQ)
1419 {
1420 traceSchedEvent(cap, EVENT_REQUEST_SEQ_GC, 0, 0);
1421 }
1422 else
1423 {
1424 traceSchedEvent(cap, EVENT_REQUEST_PAR_GC, 0, 0);
1425 debugTrace(DEBUG_sched, "ready_to_gc, grabbing GC threads");
1426 }
1427
1428 // do this while the other Capabilities stop:
1429 if (cap) scheduleCheckBlackHoles(cap);
1430
1431 if (gc_type == PENDING_GC_SEQ)
1432 {
1433 // single-threaded GC: grab all the capabilities
1434 for (i=0; i < n_capabilities; i++) {
1435 debugTrace(DEBUG_sched, "ready_to_gc, grabbing all the capabilies (%d/%d)", i, n_capabilities);
1436 if (cap != &capabilities[i]) {
1437 Capability *pcap = &capabilities[i];
1438 // we better hope this task doesn't get migrated to
1439 // another Capability while we're waiting for this one.
1440 // It won't, because load balancing happens while we have
1441 // all the Capabilities, but even so it's a slightly
1442 // unsavoury invariant.
1443 task->cap = pcap;
1444 waitForReturnCapability(&pcap, task);
1445 if (pcap != &capabilities[i]) {
1446 barf("scheduleDoGC: got the wrong capability");
1447 }
1448 }
1449 }
1450 }
1451 else
1452 {
1453 // multi-threaded GC: make sure all the Capabilities donate one
1454 // GC thread each.
1455 waitForGcThreads(cap);
1456 }
1457
1458 #else /* !THREADED_RTS */
1459
1460 // do this while the other Capabilities stop:
1461 if (cap) scheduleCheckBlackHoles(cap);
1462
1463 #endif
1464
1465 IF_DEBUG(scheduler, printAllThreads());
1466
1467 delete_threads_and_gc:
1468 /*
1469 * We now have all the capabilities; if we're in an interrupting
1470 * state, then we should take the opportunity to delete all the
1471 * threads in the system.
1472 */
1473 if (sched_state == SCHED_INTERRUPTING) {
1474 deleteAllThreads(cap);
1475 sched_state = SCHED_SHUTTING_DOWN;
1476 }
1477
1478 heap_census = scheduleNeedHeapProfile(rtsTrue);
1479
1480 #if defined(THREADED_RTS)
1481 traceSchedEvent(cap, EVENT_GC_START, 0, 0);
1482 // reset waiting_for_gc *before* GC, so that when the GC threads
1483 // emerge they don't immediately re-enter the GC.
1484 waiting_for_gc = 0;
1485 GarbageCollect(force_major || heap_census, gc_type, cap);
1486 #else
1487 GarbageCollect(force_major || heap_census, 0, cap);
1488 #endif
1489 traceSchedEvent(cap, EVENT_GC_END, 0, 0);
1490
1491 if (recent_activity == ACTIVITY_INACTIVE && force_major)
1492 {
1493 // We are doing a GC because the system has been idle for a
1494 // timeslice and we need to check for deadlock. Record the
1495 // fact that we've done a GC and turn off the timer signal;
1496 // it will get re-enabled if we run any threads after the GC.
1497 recent_activity = ACTIVITY_DONE_GC;
1498 stopTimer();
1499 }
1500 else
1501 {
1502 // the GC might have taken long enough for the timer to set
1503 // recent_activity = ACTIVITY_INACTIVE, but we aren't
1504 // necessarily deadlocked:
1505 recent_activity = ACTIVITY_YES;
1506 }
1507
1508 #if defined(THREADED_RTS)
1509 if (gc_type == PENDING_GC_PAR)
1510 {
1511 releaseGCThreads(cap);
1512 }
1513 #endif
1514
1515 if (heap_census) {
1516 debugTrace(DEBUG_sched, "performing heap census");
1517 heapCensus();
1518 performHeapProfile = rtsFalse;
1519 }
1520
1521 if (heap_overflow && sched_state < SCHED_INTERRUPTING) {
1522 // GC set the heap_overflow flag, so we should proceed with
1523 // an orderly shutdown now. Ultimately we want the main
1524 // thread to return to its caller with HeapExhausted, at which
1525 // point the caller should call hs_exit(). The first step is
1526 // to delete all the threads.
1527 //
1528 // Another way to do this would be to raise an exception in
1529 // the main thread, which we really should do because it gives
1530 // the program a chance to clean up. But how do we find the
1531 // main thread? It should presumably be the same one that
1532 // gets ^C exceptions, but that's all done on the Haskell side
1533 // (GHC.TopHandler).
1534 sched_state = SCHED_INTERRUPTING;
1535 goto delete_threads_and_gc;
1536 }
1537
1538 #ifdef SPARKBALANCE
1539 /* JB
1540 Once we are all together... this would be the place to balance all
1541 spark pools. No concurrent stealing or adding of new sparks can
1542 occur. Should be defined in Sparks.c. */
1543 balanceSparkPoolsCaps(n_capabilities, capabilities);
1544 #endif
1545
1546 #if defined(THREADED_RTS)
1547 if (gc_type == PENDING_GC_SEQ) {
1548 // release our stash of capabilities.
1549 for (i = 0; i < n_capabilities; i++) {
1550 if (cap != &capabilities[i]) {
1551 task->cap = &capabilities[i];
1552 releaseCapability(&capabilities[i]);
1553 }
1554 }
1555 }
1556 if (cap) {
1557 task->cap = cap;
1558 } else {
1559 task->cap = NULL;
1560 }
1561 #endif
1562
1563 return cap;
1564 }
1565
1566 /* ---------------------------------------------------------------------------
1567 * Singleton fork(). Do not copy any running threads.
1568 * ------------------------------------------------------------------------- */
1569
1570 pid_t
1571 forkProcess(HsStablePtr *entry
1572 #ifndef FORKPROCESS_PRIMOP_SUPPORTED
1573 STG_UNUSED
1574 #endif
1575 )
1576 {
1577 #ifdef FORKPROCESS_PRIMOP_SUPPORTED
1578 Task *task;
1579 pid_t pid;
1580 StgTSO* t,*next;
1581 Capability *cap;
1582 nat s;
1583
1584 #if defined(THREADED_RTS)
1585 if (RtsFlags.ParFlags.nNodes > 1) {
1586 errorBelch("forking not supported with +RTS -N<n> greater than 1");
1587 stg_exit(EXIT_FAILURE);
1588 }
1589 #endif
1590
1591 debugTrace(DEBUG_sched, "forking!");
1592
1593 // ToDo: for SMP, we should probably acquire *all* the capabilities
1594 cap = rts_lock();
1595
1596 // no funny business: hold locks while we fork, otherwise if some
1597 // other thread is holding a lock when the fork happens, the data
1598 // structure protected by the lock will forever be in an
1599 // inconsistent state in the child. See also #1391.
1600 ACQUIRE_LOCK(&sched_mutex);
1601 ACQUIRE_LOCK(&cap->lock);
1602 ACQUIRE_LOCK(&cap->running_task->lock);
1603
1604 pid = fork();
1605
1606 if (pid) { // parent
1607
1608 RELEASE_LOCK(&sched_mutex);
1609 RELEASE_LOCK(&cap->lock);
1610 RELEASE_LOCK(&cap->running_task->lock);
1611
1612 // just return the pid
1613 rts_unlock(cap);
1614 return pid;
1615
1616 } else { // child
1617
1618 #if defined(THREADED_RTS)
1619 initMutex(&sched_mutex);
1620 initMutex(&cap->lock);
1621 initMutex(&cap->running_task->lock);
1622 #endif
1623
1624 // Now, all OS threads except the thread that forked are
1625 // stopped. We need to stop all Haskell threads, including
1626 // those involved in foreign calls. Also we need to delete
1627 // all Tasks, because they correspond to OS threads that are
1628 // now gone.
1629
1630 for (s = 0; s < total_steps; s++) {
1631 for (t = all_steps[s].threads; t != END_TSO_QUEUE; t = next) {
1632 if (t->what_next == ThreadRelocated) {
1633 next = t->_link;
1634 } else {
1635 next = t->global_link;
1636 // don't allow threads to catch the ThreadKilled
1637 // exception, but we do want to raiseAsync() because these
1638 // threads may be evaluating thunks that we need later.
1639 deleteThread_(cap,t);
1640 }
1641 }
1642 }
1643
1644 // Empty the run queue. It seems tempting to let all the
1645 // killed threads stay on the run queue as zombies to be
1646 // cleaned up later, but some of them correspond to bound
1647 // threads for which the corresponding Task does not exist.
1648 cap->run_queue_hd = END_TSO_QUEUE;
1649 cap->run_queue_tl = END_TSO_QUEUE;
1650
1651 // Any suspended C-calling Tasks are no more, their OS threads
1652 // don't exist now:
1653 cap->suspended_ccalling_tasks = NULL;
1654
1655 // Empty the threads lists. Otherwise, the garbage
1656 // collector may attempt to resurrect some of these threads.
1657 for (s = 0; s < total_steps; s++) {
1658 all_steps[s].threads = END_TSO_QUEUE;
1659 }
1660
1661 // Wipe the task list, except the current Task.
1662 ACQUIRE_LOCK(&sched_mutex);
1663 for (task = all_tasks; task != NULL; task=task->all_link) {
1664 if (task != cap->running_task) {
1665 #if defined(THREADED_RTS)
1666 initMutex(&task->lock); // see #1391
1667 #endif
1668 discardTask(task);
1669 }
1670 }
1671 RELEASE_LOCK(&sched_mutex);
1672
1673 #if defined(THREADED_RTS)
1674 // Wipe our spare workers list, they no longer exist. New
1675 // workers will be created if necessary.
1676 cap->spare_workers = NULL;
1677 cap->returning_tasks_hd = NULL;
1678 cap->returning_tasks_tl = NULL;
1679 #endif
1680
1681 // On Unix, all timers are reset in the child, so we need to start
1682 // the timer again.
1683 initTimer();
1684 startTimer();
1685
1686 #if defined(THREADED_RTS)
1687 cap = ioManagerStartCap(cap);
1688 #endif
1689
1690 cap = rts_evalStableIO(cap, entry, NULL); // run the action
1691 rts_checkSchedStatus("forkProcess",cap);
1692
1693 rts_unlock(cap);
1694 hs_exit(); // clean up and exit
1695 stg_exit(EXIT_SUCCESS);
1696 }
1697 #else /* !FORKPROCESS_PRIMOP_SUPPORTED */
1698 barf("forkProcess#: primop not supported on this platform, sorry!\n");
1699 #endif
1700 }
1701
1702 /* ---------------------------------------------------------------------------
1703 * Delete all the threads in the system
1704 * ------------------------------------------------------------------------- */
1705
1706 static void
1707 deleteAllThreads ( Capability *cap )
1708 {
1709 // NOTE: only safe to call if we own all capabilities.
1710
1711 StgTSO* t, *next;
1712 nat s;
1713
1714 debugTrace(DEBUG_sched,"deleting all threads");
1715 for (s = 0; s < total_steps; s++) {
1716 for (t = all_steps[s].threads; t != END_TSO_QUEUE; t = next) {
1717 if (t->what_next == ThreadRelocated) {
1718 next = t->_link;
1719 } else {
1720 next = t->global_link;
1721 deleteThread(cap,t);
1722 }
1723 }
1724 }
1725
1726 // The run queue now contains a bunch of ThreadKilled threads. We
1727 // must not throw these away: the main thread(s) will be in there
1728 // somewhere, and the main scheduler loop has to deal with it.
1729 // Also, the run queue is the only thing keeping these threads from
1730 // being GC'd, and we don't want the "main thread has been GC'd" panic.
1731
1732 #if !defined(THREADED_RTS)
1733 ASSERT(blocked_queue_hd == END_TSO_QUEUE);
1734 ASSERT(sleeping_queue == END_TSO_QUEUE);
1735 #endif
1736 }
1737
1738 /* -----------------------------------------------------------------------------
1739 Managing the suspended_ccalling_tasks list.
1740 Locks required: sched_mutex
1741 -------------------------------------------------------------------------- */
1742
1743 STATIC_INLINE void
1744 suspendTask (Capability *cap, Task *task)
1745 {
1746 ASSERT(task->next == NULL && task->prev == NULL);
1747 task->next = cap->suspended_ccalling_tasks;
1748 task->prev = NULL;
1749 if (cap->suspended_ccalling_tasks) {
1750 cap->suspended_ccalling_tasks->prev = task;
1751 }
1752 cap->suspended_ccalling_tasks = task;
1753 }
1754
1755 STATIC_INLINE void
1756 recoverSuspendedTask (Capability *cap, Task *task)
1757 {
1758 if (task->prev) {
1759 task->prev->next = task->next;
1760 } else {
1761 ASSERT(cap->suspended_ccalling_tasks == task);
1762 cap->suspended_ccalling_tasks = task->next;
1763 }
1764 if (task->next) {
1765 task->next->prev = task->prev;
1766 }
1767 task->next = task->prev = NULL;
1768 }
1769
1770 /* ---------------------------------------------------------------------------
1771 * Suspending & resuming Haskell threads.
1772 *
1773 * When making a "safe" call to C (aka _ccall_GC), the task gives back
1774 * its capability before calling the C function. This allows another
1775 * task to pick up the capability and carry on running Haskell
1776 * threads. It also means that if the C call blocks, it won't lock
1777 * the whole system.
1778 *
1779 * The Haskell thread making the C call is put to sleep for the
1780 * duration of the call, on the susepended_ccalling_threads queue. We
1781 * give out a token to the task, which it can use to resume the thread
1782 * on return from the C function.
1783 * ------------------------------------------------------------------------- */
1784
1785 void *
1786 suspendThread (StgRegTable *reg)
1787 {
1788 Capability *cap;
1789 int saved_errno;
1790 StgTSO *tso;
1791 Task *task;
1792 #if mingw32_HOST_OS
1793 StgWord32 saved_winerror;
1794 #endif
1795
1796 saved_errno = errno;
1797 #if mingw32_HOST_OS
1798 saved_winerror = GetLastError();
1799 #endif
1800
1801 /* assume that *reg is a pointer to the StgRegTable part of a Capability.
1802 */
1803 cap = regTableToCapability(reg);
1804
1805 task = cap->running_task;
1806 tso = cap->r.rCurrentTSO;
1807
1808 traceSchedEvent(cap, EVENT_STOP_THREAD, tso, THREAD_SUSPENDED_FOREIGN_CALL);
1809
1810 // XXX this might not be necessary --SDM
1811 tso->what_next = ThreadRunGHC;
1812
1813 threadPaused(cap,tso);
1814
1815 if ((tso->flags & TSO_BLOCKEX) == 0) {
1816 tso->why_blocked = BlockedOnCCall;
1817 tso->flags |= TSO_BLOCKEX;
1818 tso->flags &= ~TSO_INTERRUPTIBLE;
1819 } else {
1820 tso->why_blocked = BlockedOnCCall_NoUnblockExc;
1821 }
1822
1823 // Hand back capability
1824 task->suspended_tso = tso;
1825
1826 ACQUIRE_LOCK(&cap->lock);
1827
1828 suspendTask(cap,task);
1829 cap->in_haskell = rtsFalse;
1830 releaseCapability_(cap,rtsFalse);
1831
1832 RELEASE_LOCK(&cap->lock);
1833
1834 errno = saved_errno;
1835 #if mingw32_HOST_OS
1836 SetLastError(saved_winerror);
1837 #endif
1838 return task;
1839 }
1840
1841 StgRegTable *
1842 resumeThread (void *task_)
1843 {
1844 StgTSO *tso;
1845 Capability *cap;
1846 Task *task = task_;
1847 int saved_errno;
1848 #if mingw32_HOST_OS
1849 StgWord32 saved_winerror;
1850 #endif
1851
1852 saved_errno = errno;
1853 #if mingw32_HOST_OS
1854 saved_winerror = GetLastError();
1855 #endif
1856
1857 cap = task->cap;
1858 // Wait for permission to re-enter the RTS with the result.
1859 waitForReturnCapability(&cap,task);
1860 // we might be on a different capability now... but if so, our
1861 // entry on the suspended_ccalling_tasks list will also have been
1862 // migrated.
1863
1864 // Remove the thread from the suspended list
1865 recoverSuspendedTask(cap,task);
1866
1867 tso = task->suspended_tso;
1868 task->suspended_tso = NULL;
1869 tso->_link = END_TSO_QUEUE; // no write barrier reqd
1870
1871 traceSchedEvent(cap, EVENT_RUN_THREAD, tso, tso->what_next);
1872
1873 if (tso->why_blocked == BlockedOnCCall) {
1874 // avoid locking the TSO if we don't have to
1875 if (tso->blocked_exceptions != END_TSO_QUEUE) {
1876 awakenBlockedExceptionQueue(cap,tso);
1877 }
1878 tso->flags &= ~(TSO_BLOCKEX | TSO_INTERRUPTIBLE);
1879 }
1880
1881 /* Reset blocking status */
1882 tso->why_blocked = NotBlocked;
1883
1884 cap->r.rCurrentTSO = tso;
1885 cap->in_haskell = rtsTrue;
1886 errno = saved_errno;
1887 #if mingw32_HOST_OS
1888 SetLastError(saved_winerror);
1889 #endif
1890
1891 /* We might have GC'd, mark the TSO dirty again */
1892 dirty_TSO(cap,tso);
1893
1894 IF_DEBUG(sanity, checkTSO(tso));
1895
1896 return &cap->r;
1897 }
1898
1899 /* ---------------------------------------------------------------------------
1900 * scheduleThread()
1901 *
1902 * scheduleThread puts a thread on the end of the runnable queue.
1903 * This will usually be done immediately after a thread is created.
1904 * The caller of scheduleThread must create the thread using e.g.
1905 * createThread and push an appropriate closure
1906 * on this thread's stack before the scheduler is invoked.
1907 * ------------------------------------------------------------------------ */
1908
1909 void
1910 scheduleThread(Capability *cap, StgTSO *tso)
1911 {
1912 // The thread goes at the *end* of the run-queue, to avoid possible
1913 // starvation of any threads already on the queue.
1914 appendToRunQueue(cap,tso);
1915 }
1916
1917 void
1918 scheduleThreadOn(Capability *cap, StgWord cpu USED_IF_THREADS, StgTSO *tso)
1919 {
1920 #if defined(THREADED_RTS)
1921 tso->flags |= TSO_LOCKED; // we requested explicit affinity; don't
1922 // move this thread from now on.
1923 cpu %= RtsFlags.ParFlags.nNodes;
1924 if (cpu == cap->no) {
1925 appendToRunQueue(cap,tso);
1926 } else {
1927 traceSchedEvent (cap, EVENT_MIGRATE_THREAD, tso, capabilities[cpu].no);
1928 wakeupThreadOnCapability(cap, &capabilities[cpu], tso);
1929 }
1930 #else
1931 appendToRunQueue(cap,tso);
1932 #endif
1933 }
1934
1935 Capability *
1936 scheduleWaitThread (StgTSO* tso, /*[out]*/HaskellObj* ret, Capability *cap)
1937 {
1938 Task *task;
1939
1940 // We already created/initialised the Task
1941 task = cap->running_task;
1942
1943 // This TSO is now a bound thread; make the Task and TSO
1944 // point to each other.
1945 tso->bound = task;
1946 tso->cap = cap;
1947
1948 task->tso = tso;
1949 task->ret = ret;
1950 task->stat = NoStatus;
1951
1952 appendToRunQueue(cap,tso);
1953
1954 debugTrace(DEBUG_sched, "new bound thread (%lu)", (unsigned long)tso->id);
1955
1956 cap = schedule(cap,task);
1957
1958 ASSERT(task->stat != NoStatus);
1959 ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
1960
1961 debugTrace(DEBUG_sched, "bound thread (%lu) finished", (unsigned long)task->tso->id);
1962 return cap;
1963 }
1964
1965 /* ----------------------------------------------------------------------------
1966 * Starting Tasks
1967 * ------------------------------------------------------------------------- */
1968
1969 #if defined(THREADED_RTS)
1970 void OSThreadProcAttr
1971 workerStart(Task *task)
1972 {
1973 Capability *cap;
1974
1975 // See startWorkerTask().
1976 ACQUIRE_LOCK(&task->lock);
1977 cap = task->cap;
1978 RELEASE_LOCK(&task->lock);
1979
1980 if (RtsFlags.ParFlags.setAffinity) {
1981 setThreadAffinity(cap->no, n_capabilities);
1982 }
1983
1984 // set the thread-local pointer to the Task:
1985 taskEnter(task);
1986
1987 // schedule() runs without a lock.
1988 cap = schedule(cap,task);
1989
1990 // On exit from schedule(), we have a Capability, but possibly not
1991 // the same one we started with.
1992
1993 // During shutdown, the requirement is that after all the
1994 // Capabilities are shut down, all workers that are shutting down
1995 // have finished workerTaskStop(). This is why we hold on to
1996 // cap->lock until we've finished workerTaskStop() below.
1997 //
1998 // There may be workers still involved in foreign calls; those
1999 // will just block in waitForReturnCapability() because the
2000 // Capability has been shut down.
2001 //
2002 ACQUIRE_LOCK(&cap->lock);
2003 releaseCapability_(cap,rtsFalse);
2004 workerTaskStop(task);
2005 RELEASE_LOCK(&cap->lock);
2006 }
2007 #endif
2008
2009 /* ---------------------------------------------------------------------------
2010 * initScheduler()
2011 *
2012 * Initialise the scheduler. This resets all the queues - if the
2013 * queues contained any threads, they'll be garbage collected at the
2014 * next pass.
2015 *
2016 * ------------------------------------------------------------------------ */
2017
2018 void
2019 initScheduler(void)
2020 {
2021 #if !defined(THREADED_RTS)
2022 blocked_queue_hd = END_TSO_QUEUE;
2023 blocked_queue_tl = END_TSO_QUEUE;
2024 sleeping_queue = END_TSO_QUEUE;
2025 #endif
2026
2027 blackhole_queue = END_TSO_QUEUE;
2028
2029 sched_state = SCHED_RUNNING;
2030 recent_activity = ACTIVITY_YES;
2031
2032 #if defined(THREADED_RTS)
2033 /* Initialise the mutex and condition variables used by
2034 * the scheduler. */
2035 initMutex(&sched_mutex);
2036 #endif
2037
2038 ACQUIRE_LOCK(&sched_mutex);
2039
2040 /* A capability holds the state a native thread needs in
2041 * order to execute STG code. At least one capability is
2042 * floating around (only THREADED_RTS builds have more than one).
2043 */
2044 initCapabilities();
2045
2046 initTaskManager();
2047
2048 #if defined(THREADED_RTS)
2049 initSparkPools();
2050 #endif
2051
2052 #if defined(THREADED_RTS)
2053 /*
2054 * Eagerly start one worker to run each Capability, except for
2055 * Capability 0. The idea is that we're probably going to start a
2056 * bound thread on Capability 0 pretty soon, so we don't want a
2057 * worker task hogging it.
2058 */
2059 {
2060 nat i;
2061 Capability *cap;
2062 for (i = 1; i < n_capabilities; i++) {
2063 cap = &capabilities[i];
2064 ACQUIRE_LOCK(&cap->lock);
2065 startWorkerTask(cap, workerStart);
2066 RELEASE_LOCK(&cap->lock);
2067 }
2068 }
2069 #endif
2070
2071 RELEASE_LOCK(&sched_mutex);
2072 }
2073
2074 void
2075 exitScheduler(
2076 rtsBool wait_foreign
2077 #if !defined(THREADED_RTS)
2078 __attribute__((unused))
2079 #endif
2080 )
2081 /* see Capability.c, shutdownCapability() */
2082 {
2083 Task *task = NULL;
2084
2085 task = newBoundTask();
2086
2087 // If we haven't killed all the threads yet, do it now.
2088 if (sched_state < SCHED_SHUTTING_DOWN) {
2089 sched_state = SCHED_INTERRUPTING;
2090 waitForReturnCapability(&task->cap,task);
2091 scheduleDoGC(task->cap,task,rtsFalse);
2092 releaseCapability(task->cap);
2093 }
2094 sched_state = SCHED_SHUTTING_DOWN;
2095
2096 #if defined(THREADED_RTS)
2097 {
2098 nat i;
2099
2100 for (i = 0; i < n_capabilities; i++) {
2101 shutdownCapability(&capabilities[i], task, wait_foreign);
2102 }
2103 boundTaskExiting(task);
2104 }
2105 #endif
2106 }
2107
2108 void
2109 freeScheduler( void )
2110 {
2111 nat still_running;
2112
2113 ACQUIRE_LOCK(&sched_mutex);
2114 still_running = freeTaskManager();
2115 // We can only free the Capabilities if there are no Tasks still
2116 // running. We might have a Task about to return from a foreign
2117 // call into waitForReturnCapability(), for example (actually,
2118 // this should be the *only* thing that a still-running Task can
2119 // do at this point, and it will block waiting for the
2120 // Capability).
2121 if (still_running == 0) {
2122 freeCapabilities();
2123 if (n_capabilities != 1) {
2124 stgFree(capabilities);
2125 }
2126 }
2127 RELEASE_LOCK(&sched_mutex);
2128 #if defined(THREADED_RTS)
2129 closeMutex(&sched_mutex);
2130 #endif
2131 }
2132
2133 /* -----------------------------------------------------------------------------
2134 performGC
2135
2136 This is the interface to the garbage collector from Haskell land.
2137 We provide this so that external C code can allocate and garbage
2138 collect when called from Haskell via _ccall_GC.
2139 -------------------------------------------------------------------------- */
2140
2141 static void
2142 performGC_(rtsBool force_major)
2143 {
2144 Task *task;
2145
2146 // We must grab a new Task here, because the existing Task may be
2147 // associated with a particular Capability, and chained onto the
2148 // suspended_ccalling_tasks queue.
2149 task = newBoundTask();
2150
2151 waitForReturnCapability(&task->cap,task);
2152 scheduleDoGC(task->cap,task,force_major);
2153 releaseCapability(task->cap);
2154 boundTaskExiting(task);
2155 }
2156
2157 void
2158 performGC(void)
2159 {
2160 performGC_(rtsFalse);
2161 }
2162
2163 void
2164 performMajorGC(void)
2165 {
2166 performGC_(rtsTrue);
2167 }
2168
2169 /* -----------------------------------------------------------------------------
2170 Stack overflow
2171
2172 If the thread has reached its maximum stack size, then raise the
2173 StackOverflow exception in the offending thread. Otherwise
2174 relocate the TSO into a larger chunk of memory and adjust its stack
2175 size appropriately.
2176 -------------------------------------------------------------------------- */
2177
2178 static StgTSO *
2179 threadStackOverflow(Capability *cap, StgTSO *tso)
2180 {
2181 nat new_stack_size, stack_words;
2182 lnat new_tso_size;
2183 StgPtr new_sp;
2184 StgTSO *dest;
2185
2186 IF_DEBUG(sanity,checkTSO(tso));
2187
2188 // don't allow throwTo() to modify the blocked_exceptions queue
2189 // while we are moving the TSO:
2190 lockClosure((StgClosure *)tso);
2191
2192 if (tso->stack_size >= tso->max_stack_size
2193 && !(tso->flags & TSO_BLOCKEX)) {
2194 // NB. never raise a StackOverflow exception if the thread is
2195 // inside Control.Exceptino.block. It is impractical to protect
2196 // against stack overflow exceptions, since virtually anything
2197 // can raise one (even 'catch'), so this is the only sensible
2198 // thing to do here. See bug #767.
2199 //
2200
2201 if (tso->flags & TSO_SQUEEZED) {
2202 return tso;
2203 }
2204 // #3677: In a stack overflow situation, stack squeezing may
2205 // reduce the stack size, but we don't know whether it has been
2206 // reduced enough for the stack check to succeed if we try
2207 // again. Fortunately stack squeezing is idempotent, so all we
2208 // need to do is record whether *any* squeezing happened. If we
2209 // are at the stack's absolute -K limit, and stack squeezing
2210 // happened, then we try running the thread again. The
2211 // TSO_SQUEEZED flag is set by threadPaused() to tell us whether
2212 // squeezing happened or not.
2213
2214 debugTrace(DEBUG_gc,
2215 "threadStackOverflow of TSO %ld (%p): stack too large (now %ld; max is %ld)",
2216 (long)tso->id, tso, (long)tso->stack_size, (long)tso->max_stack_size);
2217 IF_DEBUG(gc,
2218 /* If we're debugging, just print out the top of the stack */
2219 printStackChunk(tso->sp, stg_min(tso->stack+tso->stack_size,
2220 tso->sp+64)));
2221
2222 // Send this thread the StackOverflow exception
2223 unlockTSO(tso);
2224 throwToSingleThreaded(cap, tso, (StgClosure *)stackOverflow_closure);
2225 return tso;
2226 }
2227
2228
2229 // We also want to avoid enlarging the stack if squeezing has
2230 // already released some of it. However, we don't want to get into
2231 // a pathalogical situation where a thread has a nearly full stack
2232 // (near its current limit, but not near the absolute -K limit),
2233 // keeps allocating a little bit, squeezing removes a little bit,
2234 // and then it runs again. So to avoid this, if we squeezed *and*
2235 // there is still less than BLOCK_SIZE_W words free, then we enlarge
2236 // the stack anyway.
2237 if ((tso->flags & TSO_SQUEEZED) &&
2238 ((W_)(tso->sp - tso->stack) >= BLOCK_SIZE_W)) {
2239 unlockTSO(tso);
2240 return tso;
2241 }
2242
2243 /* Try to double the current stack size. If that takes us over the
2244 * maximum stack size for this thread, then use the maximum instead
2245 * (that is, unless we're already at or over the max size and we
2246 * can't raise the StackOverflow exception (see above), in which
2247 * case just double the size). Finally round up so the TSO ends up as
2248 * a whole number of blocks.
2249 */
2250 if (tso->stack_size >= tso->max_stack_size) {
2251 new_stack_size = tso->stack_size * 2;
2252 } else {
2253 new_stack_size = stg_min(tso->stack_size * 2, tso->max_stack_size);
2254 }
2255 new_tso_size = (lnat)BLOCK_ROUND_UP(new_stack_size * sizeof(W_) +
2256 TSO_STRUCT_SIZE)/sizeof(W_);
2257 new_tso_size = round_to_mblocks(new_tso_size); /* Be MBLOCK-friendly */
2258 new_stack_size = new_tso_size - TSO_STRUCT_SIZEW;
2259
2260 debugTrace(DEBUG_sched,
2261 "increasing stack size from %ld words to %d.",
2262 (long)tso->stack_size, new_stack_size);
2263
2264 dest = (StgTSO *)allocateLocal(cap,new_tso_size);
2265 TICK_ALLOC_TSO(new_stack_size,0);
2266
2267 /* copy the TSO block and the old stack into the new area */
2268 memcpy(dest,tso,TSO_STRUCT_SIZE);
2269 stack_words = tso->stack + tso->stack_size - tso->sp;
2270 new_sp = (P_)dest + new_tso_size - stack_words;
2271 memcpy(new_sp, tso->sp, stack_words * sizeof(W_));
2272
2273 /* relocate the stack pointers... */
2274 dest->sp = new_sp;
2275 dest->stack_size = new_stack_size;
2276
2277 /* Mark the old TSO as relocated. We have to check for relocated
2278 * TSOs in the garbage collector and any primops that deal with TSOs.
2279 *
2280 * It's important to set the sp value to just beyond the end
2281 * of the stack, so we don't attempt to scavenge any part of the
2282 * dead TSO's stack.
2283 */
2284 tso->what_next = ThreadRelocated;
2285 setTSOLink(cap,tso,dest);
2286 tso->sp = (P_)&(tso->stack[tso->stack_size]);
2287 tso->why_blocked = NotBlocked;
2288
2289 unlockTSO(dest);
2290 unlockTSO(tso);
2291
2292 IF_DEBUG(sanity,checkTSO(dest));
2293 #if 0
2294 IF_DEBUG(scheduler,printTSO(dest));
2295 #endif
2296
2297 return dest;
2298 }
2299
2300 static StgTSO *
2301 threadStackUnderflow (Task *task STG_UNUSED, StgTSO *tso)
2302 {
2303 bdescr *bd, *new_bd;
2304 lnat free_w, tso_size_w;
2305 StgTSO *new_tso;
2306
2307 tso_size_w = tso_sizeW(tso);
2308
2309 if (tso_size_w < MBLOCK_SIZE_W ||
2310 // TSO is less than 2 mblocks (since the first mblock is
2311 // shorter than MBLOCK_SIZE_W)
2312 (tso_size_w - BLOCKS_PER_MBLOCK*BLOCK_SIZE_W) % MBLOCK_SIZE_W != 0 ||
2313 // or TSO is not a whole number of megablocks (ensuring
2314 // precondition of splitLargeBlock() below)
2315 (tso_size_w <= round_up_to_mblocks(RtsFlags.GcFlags.initialStkSize)) ||
2316 // or TSO is smaller than the minimum stack size (rounded up)
2317 (nat)(tso->stack + tso->stack_size - tso->sp) > tso->stack_size / 4)
2318 // or stack is using more than 1/4 of the available space
2319 {
2320 // then do nothing
2321 return tso;
2322 }
2323
2324 // don't allow throwTo() to modify the blocked_exceptions queue
2325 // while we are moving the TSO:
2326 lockClosure((StgClosure *)tso);
2327
2328 // this is the number of words we'll free
2329 free_w = round_to_mblocks(tso_size_w/2);
2330
2331 bd = Bdescr((StgPtr)tso);
2332 new_bd = splitLargeBlock(bd, free_w / BLOCK_SIZE_W);
2333 bd->free = bd->start + TSO_STRUCT_SIZEW;
2334
2335 new_tso = (StgTSO *)new_bd->start;
2336 memcpy(new_tso,tso,TSO_STRUCT_SIZE);
2337 new_tso->stack_size = new_bd->free - new_tso->stack;
2338
2339 debugTrace(DEBUG_sched, "thread %ld: reducing TSO size from %lu words to %lu",
2340 (long)tso->id, tso_size_w, tso_sizeW(new_tso));
2341
2342 tso->what_next = ThreadRelocated;
2343 tso->_link = new_tso; // no write barrier reqd: same generation
2344
2345 // The TSO attached to this Task may have moved, so update the
2346 // pointer to it.
2347 if (task->tso == tso) {
2348 task->tso = new_tso;
2349 }
2350
2351 unlockTSO(new_tso);
2352 unlockTSO(tso);
2353
2354 IF_DEBUG(sanity,checkTSO(new_tso));
2355
2356 return new_tso;
2357 }
2358
2359 /* ---------------------------------------------------------------------------
2360 Interrupt execution
2361 - usually called inside a signal handler so it mustn't do anything fancy.
2362 ------------------------------------------------------------------------ */
2363
2364 void
2365 interruptStgRts(void)
2366 {
2367 sched_state = SCHED_INTERRUPTING;
2368 setContextSwitches();
2369 #if defined(THREADED_RTS)
2370 wakeUpRts();
2371 #endif
2372 }
2373
2374 /* -----------------------------------------------------------------------------
2375 Wake up the RTS
2376
2377 This function causes at least one OS thread to wake up and run the
2378 scheduler loop. It is invoked when the RTS might be deadlocked, or
2379 an external event has arrived that may need servicing (eg. a
2380 keyboard interrupt).
2381
2382 In the single-threaded RTS we don't do anything here; we only have
2383 one thread anyway, and the event that caused us to want to wake up
2384 will have interrupted any blocking system call in progress anyway.
2385 -------------------------------------------------------------------------- */
2386
2387 #if defined(THREADED_RTS)
2388 void wakeUpRts(void)
2389 {
2390 // This forces the IO Manager thread to wakeup, which will
2391 // in turn ensure that some OS thread wakes up and runs the
2392 // scheduler loop, which will cause a GC and deadlock check.
2393 ioManagerWakeup();
2394 }
2395 #endif
2396
2397 /* -----------------------------------------------------------------------------
2398 * checkBlackHoles()
2399 *
2400 * Check the blackhole_queue for threads that can be woken up. We do
2401 * this periodically: before every GC, and whenever the run queue is
2402 * empty.
2403 *
2404 * An elegant solution might be to just wake up all the blocked
2405 * threads with awakenBlockedQueue occasionally: they'll go back to
2406 * sleep again if the object is still a BLACKHOLE. Unfortunately this
2407 * doesn't give us a way to tell whether we've actually managed to
2408 * wake up any threads, so we would be busy-waiting.
2409 *
2410 * -------------------------------------------------------------------------- */
2411
2412 static rtsBool
2413 checkBlackHoles (Capability *cap)
2414 {
2415 StgTSO **prev, *t;
2416 rtsBool any_woke_up = rtsFalse;
2417 StgHalfWord type;
2418
2419 // blackhole_queue is global:
2420 ASSERT_LOCK_HELD(&sched_mutex);
2421
2422 debugTrace(DEBUG_sched, "checking threads blocked on black holes");
2423
2424 // ASSUMES: sched_mutex
2425 prev = &blackhole_queue;
2426 t = blackhole_queue;
2427 while (t != END_TSO_QUEUE) {
2428 if (t->what_next == ThreadRelocated) {
2429 t = t->_link;
2430 continue;
2431 }
2432 ASSERT(t->why_blocked == BlockedOnBlackHole);
2433 type = get_itbl(UNTAG_CLOSURE(t->block_info.closure))->type;
2434 if (type != BLACKHOLE && type != CAF_BLACKHOLE) {
2435 IF_DEBUG(sanity,checkTSO(t));
2436 t = unblockOne(cap, t);
2437 *prev = t;
2438 any_woke_up = rtsTrue;
2439 } else {
2440 prev = &t->_link;
2441 t = t->_link;
2442 }
2443 }
2444
2445 return any_woke_up;
2446 }
2447
2448 /* -----------------------------------------------------------------------------
2449 Deleting threads
2450
2451 This is used for interruption (^C) and forking, and corresponds to
2452 raising an exception but without letting the thread catch the
2453 exception.
2454 -------------------------------------------------------------------------- */
2455
2456 static void
2457 deleteThread (Capability *cap, StgTSO *tso)
2458 {
2459 // NOTE: must only be called on a TSO that we have exclusive
2460 // access to, because we will call throwToSingleThreaded() below.
2461 // The TSO must be on the run queue of the Capability we own, or
2462 // we must own all Capabilities.
2463
2464 if (tso->why_blocked != BlockedOnCCall &&
2465 tso->why_blocked != BlockedOnCCall_NoUnblockExc) {
2466 throwToSingleThreaded(cap,tso,NULL);
2467 }
2468 }
2469
2470 #ifdef FORKPROCESS_PRIMOP_SUPPORTED
2471 static void
2472 deleteThread_(Capability *cap, StgTSO *tso)
2473 { // for forkProcess only:
2474 // like deleteThread(), but we delete threads in foreign calls, too.
2475
2476 if (tso->why_blocked == BlockedOnCCall ||
2477 tso->why_blocked == BlockedOnCCall_NoUnblockExc) {
2478 unblockOne(cap,tso);
2479 tso->what_next = ThreadKilled;
2480 } else {
2481 deleteThread(cap,tso);
2482 }
2483 }
2484 #endif
2485
2486 /* -----------------------------------------------------------------------------
2487 raiseExceptionHelper
2488
2489 This function is called by the raise# primitve, just so that we can
2490 move some of the tricky bits of raising an exception from C-- into
2491 C. Who knows, it might be a useful re-useable thing here too.
2492 -------------------------------------------------------------------------- */
2493
2494 StgWord
2495 raiseExceptionHelper (StgRegTable *reg, StgTSO *tso, StgClosure *exception)
2496 {
2497 Capability *cap = regTableToCapability(reg);
2498 StgThunk *raise_closure = NULL;
2499 StgPtr p, next;
2500 StgRetInfoTable *info;
2501 //
2502 // This closure represents the expression 'raise# E' where E
2503 // is the exception raise. It is used to overwrite all the
2504 // thunks which are currently under evaluataion.
2505 //
2506
2507 // OLD COMMENT (we don't have MIN_UPD_SIZE now):
2508 // LDV profiling: stg_raise_info has THUNK as its closure
2509 // type. Since a THUNK takes at least MIN_UPD_SIZE words in its
2510 // payload, MIN_UPD_SIZE is more approprate than 1. It seems that
2511 // 1 does not cause any problem unless profiling is performed.
2512 // However, when LDV profiling goes on, we need to linearly scan
2513 // small object pool, where raise_closure is stored, so we should
2514 // use MIN_UPD_SIZE.
2515 //
2516 // raise_closure = (StgClosure *)RET_STGCALL1(P_,allocate,
2517 // sizeofW(StgClosure)+1);
2518 //
2519
2520 //
2521 // Walk up the stack, looking for the catch frame. On the way,
2522 // we update any closures pointed to from update frames with the
2523 // raise closure that we just built.
2524 //
2525 p = tso->sp;
2526 while(1) {
2527 info = get_ret_itbl((StgClosure *)p);
2528 next = p + stack_frame_sizeW((StgClosure *)p);
2529 switch (info->i.type) {
2530
2531 case UPDATE_FRAME:
2532 // Only create raise_closure if we need to.
2533 if (raise_closure == NULL) {
2534 raise_closure =
2535 (StgThunk *)allocateLocal(cap,sizeofW(StgThunk)+1);
2536 SET_HDR(raise_closure, &stg_raise_info, CCCS);
2537 raise_closure->payload[0] = exception;
2538 }
2539 UPD_IND(((StgUpdateFrame *)p)->updatee,(StgClosure *)raise_closure);
2540 p = next;
2541 continue;
2542
2543 case ATOMICALLY_FRAME:
2544 debugTrace(DEBUG_stm, "found ATOMICALLY_FRAME at %p", p);
2545 tso->sp = p;
2546 return ATOMICALLY_FRAME;
2547
2548 case CATCH_FRAME:
2549 tso->sp = p;
2550 return CATCH_FRAME;
2551
2552 case CATCH_STM_FRAME:
2553 debugTrace(DEBUG_stm, "found CATCH_STM_FRAME at %p", p);
2554 tso->sp = p;
2555 return CATCH_STM_FRAME;
2556
2557 case STOP_FRAME:
2558 tso->sp = p;
2559 return STOP_FRAME;
2560
2561 case CATCH_RETRY_FRAME:
2562 default:
2563 p = next;
2564 continue;
2565 }
2566 }
2567 }
2568
2569
2570 /* -----------------------------------------------------------------------------
2571 findRetryFrameHelper
2572
2573 This function is called by the retry# primitive. It traverses the stack
2574 leaving tso->sp referring to the frame which should handle the retry.
2575
2576 This should either be a CATCH_RETRY_FRAME (if the retry# is within an orElse#)
2577 or should be a ATOMICALLY_FRAME (if the retry# reaches the top level).
2578
2579 We skip CATCH_STM_FRAMEs (aborting and rolling back the nested tx that they
2580 create) because retries are not considered to be exceptions, despite the
2581 similar implementation.
2582
2583 We should not expect to see CATCH_FRAME or STOP_FRAME because those should
2584 not be created within memory transactions.
2585 -------------------------------------------------------------------------- */
2586
2587 StgWord
2588 findRetryFrameHelper (StgTSO *tso)
2589 {
2590 StgPtr p, next;
2591 StgRetInfoTable *info;
2592
2593 p = tso -> sp;
2594 while (1) {
2595 info = get_ret_itbl((StgClosure *)p);
2596 next = p + stack_frame_sizeW((StgClosure *)p);
2597 switch (info->i.type) {
2598
2599 case ATOMICALLY_FRAME:
2600 debugTrace(DEBUG_stm,
2601 "found ATOMICALLY_FRAME at %p during retry", p);
2602 tso->sp = p;
2603 return ATOMICALLY_FRAME;
2604
2605 case CATCH_RETRY_FRAME:
2606 debugTrace(DEBUG_stm,
2607 "found CATCH_RETRY_FRAME at %p during retrry", p);
2608 tso->sp = p;
2609 return CATCH_RETRY_FRAME;
2610
2611 case CATCH_STM_FRAME: {
2612 StgTRecHeader *trec = tso -> trec;
2613 StgTRecHeader *outer = trec -> enclosing_trec;
2614 debugTrace(DEBUG_stm,
2615 "found CATCH_STM_FRAME at %p during retry", p);
2616 debugTrace(DEBUG_stm, "trec=%p outer=%p", trec, outer);
2617 stmAbortTransaction(tso -> cap, trec);
2618 stmFreeAbortedTRec(tso -> cap, trec);
2619 tso -> trec = outer;
2620 p = next;
2621 continue;
2622 }
2623
2624
2625 default:
2626 ASSERT(info->i.type != CATCH_FRAME);
2627 ASSERT(info->i.type != STOP_FRAME);
2628 p = next;
2629 continue;
2630 }
2631 }
2632 }
2633
2634 /* -----------------------------------------------------------------------------
2635 resurrectThreads is called after garbage collection on the list of
2636 threads found to be garbage. Each of these threads will be woken
2637 up and sent a signal: BlockedOnDeadMVar if the thread was blocked
2638 on an MVar, or NonTermination if the thread was blocked on a Black
2639 Hole.
2640
2641 Locks: assumes we hold *all* the capabilities.
2642 -------------------------------------------------------------------------- */
2643
2644 void
2645 resurrectThreads (StgTSO *threads)
2646 {
2647 StgTSO *tso, *next;
2648 Capability *cap;
2649 step *step;
2650
2651 for (tso = threads; tso != END_TSO_QUEUE; tso = next) {
2652 next = tso->global_link;
2653
2654 step = Bdescr((P_)tso)->step;
2655 tso->global_link = step->threads;
2656 step->threads = tso;
2657
2658 debugTrace(DEBUG_sched, "resurrecting thread %lu", (unsigned long)tso->id);
2659
2660 // Wake up the thread on the Capability it was last on
2661 cap = tso->cap;
2662
2663 switch (tso->why_blocked) {
2664 case BlockedOnMVar:
2665 /* Called by GC - sched_mutex lock is currently held. */
2666 throwToSingleThreaded(cap, tso,
2667 (StgClosure *)blockedIndefinitelyOnMVar_closure);
2668 break;
2669 case BlockedOnBlackHole:
2670 throwToSingleThreaded(cap, tso,
2671 (StgClosure *)nonTermination_closure);
2672 break;
2673 case BlockedOnSTM:
2674 throwToSingleThreaded(cap, tso,
2675 (StgClosure *)blockedIndefinitelyOnSTM_closure);
2676 break;
2677 case NotBlocked:
2678 /* This might happen if the thread was blocked on a black hole
2679 * belonging to a thread that we've just woken up (raiseAsync
2680 * can wake up threads, remember...).
2681 */
2682 continue;
2683 case BlockedOnException:
2684 // throwTo should never block indefinitely: if the target
2685 // thread dies or completes, throwTo returns.
2686 barf("resurrectThreads: thread BlockedOnException");
2687 break;
2688 default:
2689 barf("resurrectThreads: thread blocked in a strange way");
2690 }
2691 }
2692 }
2693
2694 /* -----------------------------------------------------------------------------
2695 performPendingThrowTos is called after garbage collection, and
2696 passed a list of threads that were found to have pending throwTos
2697 (tso->blocked_exceptions was not empty), and were blocked.
2698 Normally this doesn't happen, because we would deliver the
2699 exception directly if the target thread is blocked, but there are
2700 small windows where it might occur on a multiprocessor (see
2701 throwTo()).
2702
2703 NB. we must be holding all the capabilities at this point, just
2704 like resurrectThreads().
2705 -------------------------------------------------------------------------- */
2706
2707 void
2708 performPendingThrowTos (StgTSO *threads)
2709 {
2710 StgTSO *tso, *next;
2711 Capability *cap;
2712 Task *task, *saved_task;;
2713 step *step;
2714
2715 task = myTask();
2716 cap = task->cap;
2717
2718 for (tso = threads; tso != END_TSO_QUEUE; tso = next) {
2719 next = tso->global_link;
2720
2721 step = Bdescr((P_)tso)->step;
2722 tso->global_link = step->threads;
2723 step->threads = tso;
2724
2725 debugTrace(DEBUG_sched, "performing blocked throwTo to thread %lu", (unsigned long)tso->id);
2726
2727 // We must pretend this Capability belongs to the current Task
2728 // for the time being, as invariants will be broken otherwise.
2729 // In fact the current Task has exclusive access to the systme
2730 // at this point, so this is just bookkeeping:
2731 task->cap = tso->cap;
2732 saved_task = tso->cap->running_task;
2733 tso->cap->running_task = task;
2734 maybePerformBlockedException(tso->cap, tso);
2735 tso->cap->running_task = saved_task;
2736 }
2737
2738 // Restore our original Capability:
2739 task->cap = cap;
2740 }