05315a59d90db636e2a1c92fc38f584c24e90a31
[ghc.git] / rts / Schedule.c
1 /* ---------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 1998-2006
4 *
5 * The scheduler and thread-related functionality
6 *
7 * --------------------------------------------------------------------------*/
8
9 #include "PosixSource.h"
10 #define KEEP_LOCKCLOSURE
11 #include "Rts.h"
12
13 #include "sm/Storage.h"
14 #include "RtsUtils.h"
15 #include "StgRun.h"
16 #include "Schedule.h"
17 #include "Interpreter.h"
18 #include "Printer.h"
19 #include "RtsSignals.h"
20 #include "Sanity.h"
21 #include "Stats.h"
22 #include "STM.h"
23 #include "Prelude.h"
24 #include "ThreadLabels.h"
25 #include "Updates.h"
26 #include "Proftimer.h"
27 #include "ProfHeap.h"
28 #include "Weak.h"
29 #include "eventlog/EventLog.h"
30 #include "sm/GC.h" // waitForGcThreads, releaseGCThreads, N
31 #include "Sparks.h"
32 #include "Capability.h"
33 #include "Task.h"
34 #include "AwaitEvent.h"
35 #if defined(mingw32_HOST_OS)
36 #include "win32/IOManager.h"
37 #endif
38 #include "Trace.h"
39 #include "RaiseAsync.h"
40 #include "Threads.h"
41 #include "Timer.h"
42 #include "ThreadPaused.h"
43
44 #ifdef HAVE_SYS_TYPES_H
45 #include <sys/types.h>
46 #endif
47 #ifdef HAVE_UNISTD_H
48 #include <unistd.h>
49 #endif
50
51 #include <string.h>
52 #include <stdlib.h>
53 #include <stdarg.h>
54
55 #ifdef HAVE_ERRNO_H
56 #include <errno.h>
57 #endif
58
59 /* -----------------------------------------------------------------------------
60 * Global variables
61 * -------------------------------------------------------------------------- */
62
63 #if !defined(THREADED_RTS)
64 // Blocked/sleeping thrads
65 StgTSO *blocked_queue_hd = NULL;
66 StgTSO *blocked_queue_tl = NULL;
67 StgTSO *sleeping_queue = NULL; // perhaps replace with a hash table?
68 #endif
69
70 /* Threads blocked on blackholes.
71 * LOCK: sched_mutex+capability, or all capabilities
72 */
73 StgTSO *blackhole_queue = NULL;
74
75 /* The blackhole_queue should be checked for threads to wake up. See
76 * Schedule.h for more thorough comment.
77 * LOCK: none (doesn't matter if we miss an update)
78 */
79 rtsBool blackholes_need_checking = rtsFalse;
80
81 /* Set to true when the latest garbage collection failed to reclaim
82 * enough space, and the runtime should proceed to shut itself down in
83 * an orderly fashion (emitting profiling info etc.)
84 */
85 rtsBool heap_overflow = rtsFalse;
86
87 /* flag that tracks whether we have done any execution in this time slice.
88 * LOCK: currently none, perhaps we should lock (but needs to be
89 * updated in the fast path of the scheduler).
90 *
91 * NB. must be StgWord, we do xchg() on it.
92 */
93 volatile StgWord recent_activity = ACTIVITY_YES;
94
95 /* if this flag is set as well, give up execution
96 * LOCK: none (changes monotonically)
97 */
98 volatile StgWord sched_state = SCHED_RUNNING;
99
100 /* This is used in `TSO.h' and gcc 2.96 insists that this variable actually
101 * exists - earlier gccs apparently didn't.
102 * -= chak
103 */
104 StgTSO dummy_tso;
105
106 /*
107 * Set to TRUE when entering a shutdown state (via shutdownHaskellAndExit()) --
108 * in an MT setting, needed to signal that a worker thread shouldn't hang around
109 * in the scheduler when it is out of work.
110 */
111 rtsBool shutting_down_scheduler = rtsFalse;
112
113 /*
114 * This mutex protects most of the global scheduler data in
115 * the THREADED_RTS runtime.
116 */
117 #if defined(THREADED_RTS)
118 Mutex sched_mutex;
119 #endif
120
121 #if !defined(mingw32_HOST_OS)
122 #define FORKPROCESS_PRIMOP_SUPPORTED
123 #endif
124
125 /* -----------------------------------------------------------------------------
126 * static function prototypes
127 * -------------------------------------------------------------------------- */
128
129 static Capability *schedule (Capability *initialCapability, Task *task);
130
131 //
132 // These function all encapsulate parts of the scheduler loop, and are
133 // abstracted only to make the structure and control flow of the
134 // scheduler clearer.
135 //
136 static void schedulePreLoop (void);
137 static void scheduleFindWork (Capability *cap);
138 #if defined(THREADED_RTS)
139 static void scheduleYield (Capability **pcap, Task *task, rtsBool);
140 #endif
141 static void scheduleStartSignalHandlers (Capability *cap);
142 static void scheduleCheckBlockedThreads (Capability *cap);
143 static void scheduleCheckWakeupThreads(Capability *cap USED_IF_NOT_THREADS);
144 static void scheduleCheckBlackHoles (Capability *cap);
145 static void scheduleDetectDeadlock (Capability *cap, Task *task);
146 static void schedulePushWork(Capability *cap, Task *task);
147 #if defined(THREADED_RTS)
148 static void scheduleActivateSpark(Capability *cap);
149 #endif
150 static void schedulePostRunThread(Capability *cap, StgTSO *t);
151 static rtsBool scheduleHandleHeapOverflow( Capability *cap, StgTSO *t );
152 static void scheduleHandleStackOverflow( Capability *cap, Task *task,
153 StgTSO *t);
154 static rtsBool scheduleHandleYield( Capability *cap, StgTSO *t,
155 nat prev_what_next );
156 static void scheduleHandleThreadBlocked( StgTSO *t );
157 static rtsBool scheduleHandleThreadFinished( Capability *cap, Task *task,
158 StgTSO *t );
159 static rtsBool scheduleNeedHeapProfile(rtsBool ready_to_gc);
160 static Capability *scheduleDoGC(Capability *cap, Task *task,
161 rtsBool force_major);
162
163 static rtsBool checkBlackHoles(Capability *cap);
164
165 static StgTSO *threadStackOverflow(Capability *cap, StgTSO *tso);
166 static StgTSO *threadStackUnderflow(Task *task, StgTSO *tso);
167
168 static void deleteThread (Capability *cap, StgTSO *tso);
169 static void deleteAllThreads (Capability *cap);
170
171 #ifdef FORKPROCESS_PRIMOP_SUPPORTED
172 static void deleteThread_(Capability *cap, StgTSO *tso);
173 #endif
174
175 #ifdef DEBUG
176 static char *whatNext_strs[] = {
177 [0] = "(unknown)",
178 [ThreadRunGHC] = "ThreadRunGHC",
179 [ThreadInterpret] = "ThreadInterpret",
180 [ThreadKilled] = "ThreadKilled",
181 [ThreadRelocated] = "ThreadRelocated",
182 [ThreadComplete] = "ThreadComplete"
183 };
184 #endif
185
186 /* -----------------------------------------------------------------------------
187 * Putting a thread on the run queue: different scheduling policies
188 * -------------------------------------------------------------------------- */
189
190 STATIC_INLINE void
191 addToRunQueue( Capability *cap, StgTSO *t )
192 {
193 // this does round-robin scheduling; good for concurrency
194 appendToRunQueue(cap,t);
195 }
196
197 /* ---------------------------------------------------------------------------
198 Main scheduling loop.
199
200 We use round-robin scheduling, each thread returning to the
201 scheduler loop when one of these conditions is detected:
202
203 * out of heap space
204 * timer expires (thread yields)
205 * thread blocks
206 * thread ends
207 * stack overflow
208
209 GRAN version:
210 In a GranSim setup this loop iterates over the global event queue.
211 This revolves around the global event queue, which determines what
212 to do next. Therefore, it's more complicated than either the
213 concurrent or the parallel (GUM) setup.
214 This version has been entirely removed (JB 2008/08).
215
216 GUM version:
217 GUM iterates over incoming messages.
218 It starts with nothing to do (thus CurrentTSO == END_TSO_QUEUE),
219 and sends out a fish whenever it has nothing to do; in-between
220 doing the actual reductions (shared code below) it processes the
221 incoming messages and deals with delayed operations
222 (see PendingFetches).
223 This is not the ugliest code you could imagine, but it's bloody close.
224
225 (JB 2008/08) This version was formerly indicated by a PP-Flag PAR,
226 now by PP-flag PARALLEL_HASKELL. The Eden RTS (in GHC-6.x) uses it,
227 as well as future GUM versions. This file has been refurbished to
228 only contain valid code, which is however incomplete, refers to
229 invalid includes etc.
230
231 ------------------------------------------------------------------------ */
232
233 static Capability *
234 schedule (Capability *initialCapability, Task *task)
235 {
236 StgTSO *t;
237 Capability *cap;
238 StgThreadReturnCode ret;
239 nat prev_what_next;
240 rtsBool ready_to_gc;
241 #if defined(THREADED_RTS)
242 rtsBool first = rtsTrue;
243 rtsBool force_yield = rtsFalse;
244 #endif
245
246 cap = initialCapability;
247
248 // Pre-condition: this task owns initialCapability.
249 // The sched_mutex is *NOT* held
250 // NB. on return, we still hold a capability.
251
252 debugTrace (DEBUG_sched,
253 "### NEW SCHEDULER LOOP (task: %p, cap: %p)",
254 task, initialCapability);
255
256 schedulePreLoop();
257
258 // -----------------------------------------------------------
259 // Scheduler loop starts here:
260
261 while (1) {
262
263 // Check whether we have re-entered the RTS from Haskell without
264 // going via suspendThread()/resumeThread (i.e. a 'safe' foreign
265 // call).
266 if (cap->in_haskell) {
267 errorBelch("schedule: re-entered unsafely.\n"
268 " Perhaps a 'foreign import unsafe' should be 'safe'?");
269 stg_exit(EXIT_FAILURE);
270 }
271
272 // The interruption / shutdown sequence.
273 //
274 // In order to cleanly shut down the runtime, we want to:
275 // * make sure that all main threads return to their callers
276 // with the state 'Interrupted'.
277 // * clean up all OS threads assocated with the runtime
278 // * free all memory etc.
279 //
280 // So the sequence for ^C goes like this:
281 //
282 // * ^C handler sets sched_state := SCHED_INTERRUPTING and
283 // arranges for some Capability to wake up
284 //
285 // * all threads in the system are halted, and the zombies are
286 // placed on the run queue for cleaning up. We acquire all
287 // the capabilities in order to delete the threads, this is
288 // done by scheduleDoGC() for convenience (because GC already
289 // needs to acquire all the capabilities). We can't kill
290 // threads involved in foreign calls.
291 //
292 // * somebody calls shutdownHaskell(), which calls exitScheduler()
293 //
294 // * sched_state := SCHED_SHUTTING_DOWN
295 //
296 // * all workers exit when the run queue on their capability
297 // drains. All main threads will also exit when their TSO
298 // reaches the head of the run queue and they can return.
299 //
300 // * eventually all Capabilities will shut down, and the RTS can
301 // exit.
302 //
303 // * We might be left with threads blocked in foreign calls,
304 // we should really attempt to kill these somehow (TODO);
305
306 switch (sched_state) {
307 case SCHED_RUNNING:
308 break;
309 case SCHED_INTERRUPTING:
310 debugTrace(DEBUG_sched, "SCHED_INTERRUPTING");
311 #if defined(THREADED_RTS)
312 discardSparksCap(cap);
313 #endif
314 /* scheduleDoGC() deletes all the threads */
315 cap = scheduleDoGC(cap,task,rtsFalse);
316
317 // after scheduleDoGC(), we must be shutting down. Either some
318 // other Capability did the final GC, or we did it above,
319 // either way we can fall through to the SCHED_SHUTTING_DOWN
320 // case now.
321 ASSERT(sched_state == SCHED_SHUTTING_DOWN);
322 // fall through
323
324 case SCHED_SHUTTING_DOWN:
325 debugTrace(DEBUG_sched, "SCHED_SHUTTING_DOWN");
326 // If we are a worker, just exit. If we're a bound thread
327 // then we will exit below when we've removed our TSO from
328 // the run queue.
329 if (task->tso == NULL && emptyRunQueue(cap)) {
330 return cap;
331 }
332 break;
333 default:
334 barf("sched_state: %d", sched_state);
335 }
336
337 scheduleFindWork(cap);
338
339 /* work pushing, currently relevant only for THREADED_RTS:
340 (pushes threads, wakes up idle capabilities for stealing) */
341 schedulePushWork(cap,task);
342
343 scheduleDetectDeadlock(cap,task);
344
345 #if defined(THREADED_RTS)
346 cap = task->cap; // reload cap, it might have changed
347 #endif
348
349 // Normally, the only way we can get here with no threads to
350 // run is if a keyboard interrupt received during
351 // scheduleCheckBlockedThreads() or scheduleDetectDeadlock().
352 // Additionally, it is not fatal for the
353 // threaded RTS to reach here with no threads to run.
354 //
355 // win32: might be here due to awaitEvent() being abandoned
356 // as a result of a console event having been delivered.
357
358 #if defined(THREADED_RTS)
359 if (first)
360 {
361 // XXX: ToDo
362 // // don't yield the first time, we want a chance to run this
363 // // thread for a bit, even if there are others banging at the
364 // // door.
365 // first = rtsFalse;
366 // ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
367 }
368
369 yield:
370 scheduleYield(&cap,task,force_yield);
371 force_yield = rtsFalse;
372
373 if (emptyRunQueue(cap)) continue; // look for work again
374 #endif
375
376 #if !defined(THREADED_RTS) && !defined(mingw32_HOST_OS)
377 if ( emptyRunQueue(cap) ) {
378 ASSERT(sched_state >= SCHED_INTERRUPTING);
379 }
380 #endif
381
382 //
383 // Get a thread to run
384 //
385 t = popRunQueue(cap);
386
387 // Sanity check the thread we're about to run. This can be
388 // expensive if there is lots of thread switching going on...
389 IF_DEBUG(sanity,checkTSO(t));
390
391 #if defined(THREADED_RTS)
392 // Check whether we can run this thread in the current task.
393 // If not, we have to pass our capability to the right task.
394 {
395 Task *bound = t->bound;
396
397 if (bound) {
398 if (bound == task) {
399 debugTrace(DEBUG_sched,
400 "### Running thread %lu in bound thread", (unsigned long)t->id);
401 // yes, the Haskell thread is bound to the current native thread
402 } else {
403 debugTrace(DEBUG_sched,
404 "### thread %lu bound to another OS thread", (unsigned long)t->id);
405 // no, bound to a different Haskell thread: pass to that thread
406 pushOnRunQueue(cap,t);
407 continue;
408 }
409 } else {
410 // The thread we want to run is unbound.
411 if (task->tso) {
412 debugTrace(DEBUG_sched,
413 "### this OS thread cannot run thread %lu", (unsigned long)t->id);
414 // no, the current native thread is bound to a different
415 // Haskell thread, so pass it to any worker thread
416 pushOnRunQueue(cap,t);
417 continue;
418 }
419 }
420 }
421 #endif
422
423 // If we're shutting down, and this thread has not yet been
424 // killed, kill it now. This sometimes happens when a finalizer
425 // thread is created by the final GC, or a thread previously
426 // in a foreign call returns.
427 if (sched_state >= SCHED_INTERRUPTING &&
428 !(t->what_next == ThreadComplete || t->what_next == ThreadKilled)) {
429 deleteThread(cap,t);
430 }
431
432 /* context switches are initiated by the timer signal, unless
433 * the user specified "context switch as often as possible", with
434 * +RTS -C0
435 */
436 if (RtsFlags.ConcFlags.ctxtSwitchTicks == 0
437 && !emptyThreadQueues(cap)) {
438 cap->context_switch = 1;
439 }
440
441 run_thread:
442
443 // CurrentTSO is the thread to run. t might be different if we
444 // loop back to run_thread, so make sure to set CurrentTSO after
445 // that.
446 cap->r.rCurrentTSO = t;
447
448 debugTrace(DEBUG_sched, "-->> running thread %ld %s ...",
449 (long)t->id, whatNext_strs[t->what_next]);
450
451 startHeapProfTimer();
452
453 // Check for exceptions blocked on this thread
454 maybePerformBlockedException (cap, t);
455
456 // ----------------------------------------------------------------------
457 // Run the current thread
458
459 ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
460 ASSERT(t->cap == cap);
461 ASSERT(t->bound ? t->bound->cap == cap : 1);
462
463 prev_what_next = t->what_next;
464
465 errno = t->saved_errno;
466 #if mingw32_HOST_OS
467 SetLastError(t->saved_winerror);
468 #endif
469
470 cap->in_haskell = rtsTrue;
471
472 dirty_TSO(cap,t);
473
474 #if defined(THREADED_RTS)
475 if (recent_activity == ACTIVITY_DONE_GC) {
476 // ACTIVITY_DONE_GC means we turned off the timer signal to
477 // conserve power (see #1623). Re-enable it here.
478 nat prev;
479 prev = xchg((P_)&recent_activity, ACTIVITY_YES);
480 if (prev == ACTIVITY_DONE_GC) {
481 startTimer();
482 }
483 } else {
484 recent_activity = ACTIVITY_YES;
485 }
486 #endif
487
488 postEvent(cap, EVENT_RUN_THREAD, t->id, 0);
489
490 switch (prev_what_next) {
491
492 case ThreadKilled:
493 case ThreadComplete:
494 /* Thread already finished, return to scheduler. */
495 ret = ThreadFinished;
496 break;
497
498 case ThreadRunGHC:
499 {
500 StgRegTable *r;
501 r = StgRun((StgFunPtr) stg_returnToStackTop, &cap->r);
502 cap = regTableToCapability(r);
503 ret = r->rRet;
504 break;
505 }
506
507 case ThreadInterpret:
508 cap = interpretBCO(cap);
509 ret = cap->r.rRet;
510 break;
511
512 default:
513 barf("schedule: invalid what_next field");
514 }
515
516 cap->in_haskell = rtsFalse;
517
518 // The TSO might have moved, eg. if it re-entered the RTS and a GC
519 // happened. So find the new location:
520 t = cap->r.rCurrentTSO;
521
522 // We have run some Haskell code: there might be blackhole-blocked
523 // threads to wake up now.
524 // Lock-free test here should be ok, we're just setting a flag.
525 if ( blackhole_queue != END_TSO_QUEUE ) {
526 blackholes_need_checking = rtsTrue;
527 }
528
529 // And save the current errno in this thread.
530 // XXX: possibly bogus for SMP because this thread might already
531 // be running again, see code below.
532 t->saved_errno = errno;
533 #if mingw32_HOST_OS
534 // Similarly for Windows error code
535 t->saved_winerror = GetLastError();
536 #endif
537
538 postEvent (cap, EVENT_STOP_THREAD, t->id, ret);
539
540 #if defined(THREADED_RTS)
541 // If ret is ThreadBlocked, and this Task is bound to the TSO that
542 // blocked, we are in limbo - the TSO is now owned by whatever it
543 // is blocked on, and may in fact already have been woken up,
544 // perhaps even on a different Capability. It may be the case
545 // that task->cap != cap. We better yield this Capability
546 // immediately and return to normaility.
547 if (ret == ThreadBlocked) {
548 debugTrace(DEBUG_sched,
549 "--<< thread %lu (%s) stopped: blocked",
550 (unsigned long)t->id, whatNext_strs[t->what_next]);
551 force_yield = rtsTrue;
552 goto yield;
553 }
554 #endif
555
556 ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
557 ASSERT(t->cap == cap);
558
559 // ----------------------------------------------------------------------
560
561 // Costs for the scheduler are assigned to CCS_SYSTEM
562 stopHeapProfTimer();
563 #if defined(PROFILING)
564 CCCS = CCS_SYSTEM;
565 #endif
566
567 schedulePostRunThread(cap,t);
568
569 if (ret != StackOverflow) {
570 t = threadStackUnderflow(task,t);
571 }
572
573 ready_to_gc = rtsFalse;
574
575 switch (ret) {
576 case HeapOverflow:
577 ready_to_gc = scheduleHandleHeapOverflow(cap,t);
578 break;
579
580 case StackOverflow:
581 scheduleHandleStackOverflow(cap,task,t);
582 break;
583
584 case ThreadYielding:
585 if (scheduleHandleYield(cap, t, prev_what_next)) {
586 // shortcut for switching between compiler/interpreter:
587 goto run_thread;
588 }
589 break;
590
591 case ThreadBlocked:
592 scheduleHandleThreadBlocked(t);
593 break;
594
595 case ThreadFinished:
596 if (scheduleHandleThreadFinished(cap, task, t)) return cap;
597 ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
598 break;
599
600 default:
601 barf("schedule: invalid thread return code %d", (int)ret);
602 }
603
604 if (ready_to_gc || scheduleNeedHeapProfile(ready_to_gc)) {
605 cap = scheduleDoGC(cap,task,rtsFalse);
606 }
607 } /* end of while() */
608 }
609
610 /* ----------------------------------------------------------------------------
611 * Setting up the scheduler loop
612 * ------------------------------------------------------------------------- */
613
614 static void
615 schedulePreLoop(void)
616 {
617 // initialisation for scheduler - what cannot go into initScheduler()
618 }
619
620 /* -----------------------------------------------------------------------------
621 * scheduleFindWork()
622 *
623 * Search for work to do, and handle messages from elsewhere.
624 * -------------------------------------------------------------------------- */
625
626 static void
627 scheduleFindWork (Capability *cap)
628 {
629 scheduleStartSignalHandlers(cap);
630
631 // Only check the black holes here if we've nothing else to do.
632 // During normal execution, the black hole list only gets checked
633 // at GC time, to avoid repeatedly traversing this possibly long
634 // list each time around the scheduler.
635 if (emptyRunQueue(cap)) { scheduleCheckBlackHoles(cap); }
636
637 scheduleCheckWakeupThreads(cap);
638
639 scheduleCheckBlockedThreads(cap);
640
641 #if defined(THREADED_RTS)
642 if (emptyRunQueue(cap)) { scheduleActivateSpark(cap); }
643 #endif
644 }
645
646 #if defined(THREADED_RTS)
647 STATIC_INLINE rtsBool
648 shouldYieldCapability (Capability *cap, Task *task)
649 {
650 // we need to yield this capability to someone else if..
651 // - another thread is initiating a GC
652 // - another Task is returning from a foreign call
653 // - the thread at the head of the run queue cannot be run
654 // by this Task (it is bound to another Task, or it is unbound
655 // and this task it bound).
656 return (waiting_for_gc ||
657 cap->returning_tasks_hd != NULL ||
658 (!emptyRunQueue(cap) && (task->tso == NULL
659 ? cap->run_queue_hd->bound != NULL
660 : cap->run_queue_hd->bound != task)));
661 }
662
663 // This is the single place where a Task goes to sleep. There are
664 // two reasons it might need to sleep:
665 // - there are no threads to run
666 // - we need to yield this Capability to someone else
667 // (see shouldYieldCapability())
668 //
669 // Careful: the scheduler loop is quite delicate. Make sure you run
670 // the tests in testsuite/concurrent (all ways) after modifying this,
671 // and also check the benchmarks in nofib/parallel for regressions.
672
673 static void
674 scheduleYield (Capability **pcap, Task *task, rtsBool force_yield)
675 {
676 Capability *cap = *pcap;
677
678 // if we have work, and we don't need to give up the Capability, continue.
679 //
680 // The force_yield flag is used when a bound thread blocks. This
681 // is a particularly tricky situation: the current Task does not
682 // own the TSO any more, since it is on some queue somewhere, and
683 // might be woken up or manipulated by another thread at any time.
684 // The TSO and Task might be migrated to another Capability.
685 // Certain invariants might be in doubt, such as task->bound->cap
686 // == cap. We have to yield the current Capability immediately,
687 // no messing around.
688 //
689 if (!force_yield &&
690 !shouldYieldCapability(cap,task) &&
691 (!emptyRunQueue(cap) ||
692 !emptyWakeupQueue(cap) ||
693 blackholes_need_checking ||
694 sched_state >= SCHED_INTERRUPTING))
695 return;
696
697 // otherwise yield (sleep), and keep yielding if necessary.
698 do {
699 yieldCapability(&cap,task);
700 }
701 while (shouldYieldCapability(cap,task));
702
703 // note there may still be no threads on the run queue at this
704 // point, the caller has to check.
705
706 *pcap = cap;
707 return;
708 }
709 #endif
710
711 /* -----------------------------------------------------------------------------
712 * schedulePushWork()
713 *
714 * Push work to other Capabilities if we have some.
715 * -------------------------------------------------------------------------- */
716
717 static void
718 schedulePushWork(Capability *cap USED_IF_THREADS,
719 Task *task USED_IF_THREADS)
720 {
721 /* following code not for PARALLEL_HASKELL. I kept the call general,
722 future GUM versions might use pushing in a distributed setup */
723 #if defined(THREADED_RTS)
724
725 Capability *free_caps[n_capabilities], *cap0;
726 nat i, n_free_caps;
727
728 // migration can be turned off with +RTS -qg
729 if (!RtsFlags.ParFlags.migrate) return;
730
731 // Check whether we have more threads on our run queue, or sparks
732 // in our pool, that we could hand to another Capability.
733 if (cap->run_queue_hd == END_TSO_QUEUE) {
734 if (sparkPoolSizeCap(cap) < 2) return;
735 } else {
736 if (cap->run_queue_hd->_link == END_TSO_QUEUE &&
737 sparkPoolSizeCap(cap) < 1) return;
738 }
739
740 // First grab as many free Capabilities as we can.
741 for (i=0, n_free_caps=0; i < n_capabilities; i++) {
742 cap0 = &capabilities[i];
743 if (cap != cap0 && tryGrabCapability(cap0,task)) {
744 if (!emptyRunQueue(cap0) || cap->returning_tasks_hd != NULL) {
745 // it already has some work, we just grabbed it at
746 // the wrong moment. Or maybe it's deadlocked!
747 releaseCapability(cap0);
748 } else {
749 free_caps[n_free_caps++] = cap0;
750 }
751 }
752 }
753
754 // we now have n_free_caps free capabilities stashed in
755 // free_caps[]. Share our run queue equally with them. This is
756 // probably the simplest thing we could do; improvements we might
757 // want to do include:
758 //
759 // - giving high priority to moving relatively new threads, on
760 // the gournds that they haven't had time to build up a
761 // working set in the cache on this CPU/Capability.
762 //
763 // - giving low priority to moving long-lived threads
764
765 if (n_free_caps > 0) {
766 StgTSO *prev, *t, *next;
767 rtsBool pushed_to_all;
768
769 debugTrace(DEBUG_sched,
770 "cap %d: %s and %d free capabilities, sharing...",
771 cap->no,
772 (!emptyRunQueue(cap) && cap->run_queue_hd->_link != END_TSO_QUEUE)?
773 "excess threads on run queue":"sparks to share (>=2)",
774 n_free_caps);
775
776 i = 0;
777 pushed_to_all = rtsFalse;
778
779 if (cap->run_queue_hd != END_TSO_QUEUE) {
780 prev = cap->run_queue_hd;
781 t = prev->_link;
782 prev->_link = END_TSO_QUEUE;
783 for (; t != END_TSO_QUEUE; t = next) {
784 next = t->_link;
785 t->_link = END_TSO_QUEUE;
786 if (t->what_next == ThreadRelocated
787 || t->bound == task // don't move my bound thread
788 || tsoLocked(t)) { // don't move a locked thread
789 setTSOLink(cap, prev, t);
790 prev = t;
791 } else if (i == n_free_caps) {
792 pushed_to_all = rtsTrue;
793 i = 0;
794 // keep one for us
795 setTSOLink(cap, prev, t);
796 prev = t;
797 } else {
798 debugTrace(DEBUG_sched, "pushing thread %lu to capability %d", (unsigned long)t->id, free_caps[i]->no);
799 appendToRunQueue(free_caps[i],t);
800
801 postEvent (cap, EVENT_MIGRATE_THREAD, t->id, free_caps[i]->no);
802
803 if (t->bound) { t->bound->cap = free_caps[i]; }
804 t->cap = free_caps[i];
805 i++;
806 }
807 }
808 cap->run_queue_tl = prev;
809 }
810
811 #ifdef SPARK_PUSHING
812 /* JB I left this code in place, it would work but is not necessary */
813
814 // If there are some free capabilities that we didn't push any
815 // threads to, then try to push a spark to each one.
816 if (!pushed_to_all) {
817 StgClosure *spark;
818 // i is the next free capability to push to
819 for (; i < n_free_caps; i++) {
820 if (emptySparkPoolCap(free_caps[i])) {
821 spark = tryStealSpark(cap->sparks);
822 if (spark != NULL) {
823 debugTrace(DEBUG_sched, "pushing spark %p to capability %d", spark, free_caps[i]->no);
824
825 postEvent(free_caps[i], EVENT_STEAL_SPARK, t->id, cap->no);
826
827 newSpark(&(free_caps[i]->r), spark);
828 }
829 }
830 }
831 }
832 #endif /* SPARK_PUSHING */
833
834 // release the capabilities
835 for (i = 0; i < n_free_caps; i++) {
836 task->cap = free_caps[i];
837 releaseAndWakeupCapability(free_caps[i]);
838 }
839 }
840 task->cap = cap; // reset to point to our Capability.
841
842 #endif /* THREADED_RTS */
843
844 }
845
846 /* ----------------------------------------------------------------------------
847 * Start any pending signal handlers
848 * ------------------------------------------------------------------------- */
849
850 #if defined(RTS_USER_SIGNALS) && !defined(THREADED_RTS)
851 static void
852 scheduleStartSignalHandlers(Capability *cap)
853 {
854 if (RtsFlags.MiscFlags.install_signal_handlers && signals_pending()) {
855 // safe outside the lock
856 startSignalHandlers(cap);
857 }
858 }
859 #else
860 static void
861 scheduleStartSignalHandlers(Capability *cap STG_UNUSED)
862 {
863 }
864 #endif
865
866 /* ----------------------------------------------------------------------------
867 * Check for blocked threads that can be woken up.
868 * ------------------------------------------------------------------------- */
869
870 static void
871 scheduleCheckBlockedThreads(Capability *cap USED_IF_NOT_THREADS)
872 {
873 #if !defined(THREADED_RTS)
874 //
875 // Check whether any waiting threads need to be woken up. If the
876 // run queue is empty, and there are no other tasks running, we
877 // can wait indefinitely for something to happen.
878 //
879 if ( !emptyQueue(blocked_queue_hd) || !emptyQueue(sleeping_queue) )
880 {
881 awaitEvent( emptyRunQueue(cap) && !blackholes_need_checking );
882 }
883 #endif
884 }
885
886
887 /* ----------------------------------------------------------------------------
888 * Check for threads woken up by other Capabilities
889 * ------------------------------------------------------------------------- */
890
891 static void
892 scheduleCheckWakeupThreads(Capability *cap USED_IF_THREADS)
893 {
894 #if defined(THREADED_RTS)
895 // Any threads that were woken up by other Capabilities get
896 // appended to our run queue.
897 if (!emptyWakeupQueue(cap)) {
898 ACQUIRE_LOCK(&cap->lock);
899 if (emptyRunQueue(cap)) {
900 cap->run_queue_hd = cap->wakeup_queue_hd;
901 cap->run_queue_tl = cap->wakeup_queue_tl;
902 } else {
903 setTSOLink(cap, cap->run_queue_tl, cap->wakeup_queue_hd);
904 cap->run_queue_tl = cap->wakeup_queue_tl;
905 }
906 cap->wakeup_queue_hd = cap->wakeup_queue_tl = END_TSO_QUEUE;
907 RELEASE_LOCK(&cap->lock);
908 }
909 #endif
910 }
911
912 /* ----------------------------------------------------------------------------
913 * Check for threads blocked on BLACKHOLEs that can be woken up
914 * ------------------------------------------------------------------------- */
915 static void
916 scheduleCheckBlackHoles (Capability *cap)
917 {
918 if ( blackholes_need_checking ) // check without the lock first
919 {
920 ACQUIRE_LOCK(&sched_mutex);
921 if ( blackholes_need_checking ) {
922 blackholes_need_checking = rtsFalse;
923 // important that we reset the flag *before* checking the
924 // blackhole queue, otherwise we could get deadlock. This
925 // happens as follows: we wake up a thread that
926 // immediately runs on another Capability, blocks on a
927 // blackhole, and then we reset the blackholes_need_checking flag.
928 checkBlackHoles(cap);
929 }
930 RELEASE_LOCK(&sched_mutex);
931 }
932 }
933
934 /* ----------------------------------------------------------------------------
935 * Detect deadlock conditions and attempt to resolve them.
936 * ------------------------------------------------------------------------- */
937
938 static void
939 scheduleDetectDeadlock (Capability *cap, Task *task)
940 {
941 /*
942 * Detect deadlock: when we have no threads to run, there are no
943 * threads blocked, waiting for I/O, or sleeping, and all the
944 * other tasks are waiting for work, we must have a deadlock of
945 * some description.
946 */
947 if ( emptyThreadQueues(cap) )
948 {
949 #if defined(THREADED_RTS)
950 /*
951 * In the threaded RTS, we only check for deadlock if there
952 * has been no activity in a complete timeslice. This means
953 * we won't eagerly start a full GC just because we don't have
954 * any threads to run currently.
955 */
956 if (recent_activity != ACTIVITY_INACTIVE) return;
957 #endif
958
959 debugTrace(DEBUG_sched, "deadlocked, forcing major GC...");
960
961 // Garbage collection can release some new threads due to
962 // either (a) finalizers or (b) threads resurrected because
963 // they are unreachable and will therefore be sent an
964 // exception. Any threads thus released will be immediately
965 // runnable.
966 cap = scheduleDoGC (cap, task, rtsTrue/*force major GC*/);
967 // when force_major == rtsTrue. scheduleDoGC sets
968 // recent_activity to ACTIVITY_DONE_GC and turns off the timer
969 // signal.
970
971 if ( !emptyRunQueue(cap) ) return;
972
973 #if defined(RTS_USER_SIGNALS) && !defined(THREADED_RTS)
974 /* If we have user-installed signal handlers, then wait
975 * for signals to arrive rather then bombing out with a
976 * deadlock.
977 */
978 if ( RtsFlags.MiscFlags.install_signal_handlers && anyUserHandlers() ) {
979 debugTrace(DEBUG_sched,
980 "still deadlocked, waiting for signals...");
981
982 awaitUserSignals();
983
984 if (signals_pending()) {
985 startSignalHandlers(cap);
986 }
987
988 // either we have threads to run, or we were interrupted:
989 ASSERT(!emptyRunQueue(cap) || sched_state >= SCHED_INTERRUPTING);
990
991 return;
992 }
993 #endif
994
995 #if !defined(THREADED_RTS)
996 /* Probably a real deadlock. Send the current main thread the
997 * Deadlock exception.
998 */
999 if (task->tso) {
1000 switch (task->tso->why_blocked) {
1001 case BlockedOnSTM:
1002 case BlockedOnBlackHole:
1003 case BlockedOnException:
1004 case BlockedOnMVar:
1005 throwToSingleThreaded(cap, task->tso,
1006 (StgClosure *)nonTermination_closure);
1007 return;
1008 default:
1009 barf("deadlock: main thread blocked in a strange way");
1010 }
1011 }
1012 return;
1013 #endif
1014 }
1015 }
1016
1017
1018 /* ----------------------------------------------------------------------------
1019 * Send pending messages (PARALLEL_HASKELL only)
1020 * ------------------------------------------------------------------------- */
1021
1022 #if defined(PARALLEL_HASKELL)
1023 static void
1024 scheduleSendPendingMessages(void)
1025 {
1026
1027 # if defined(PAR) // global Mem.Mgmt., omit for now
1028 if (PendingFetches != END_BF_QUEUE) {
1029 processFetches();
1030 }
1031 # endif
1032
1033 if (RtsFlags.ParFlags.BufferTime) {
1034 // if we use message buffering, we must send away all message
1035 // packets which have become too old...
1036 sendOldBuffers();
1037 }
1038 }
1039 #endif
1040
1041 /* ----------------------------------------------------------------------------
1042 * Activate spark threads (PARALLEL_HASKELL and THREADED_RTS)
1043 * ------------------------------------------------------------------------- */
1044
1045 #if defined(THREADED_RTS)
1046 static void
1047 scheduleActivateSpark(Capability *cap)
1048 {
1049 if (anySparks())
1050 {
1051 createSparkThread(cap);
1052 debugTrace(DEBUG_sched, "creating a spark thread");
1053 }
1054 }
1055 #endif // PARALLEL_HASKELL || THREADED_RTS
1056
1057 /* ----------------------------------------------------------------------------
1058 * After running a thread...
1059 * ------------------------------------------------------------------------- */
1060
1061 static void
1062 schedulePostRunThread (Capability *cap, StgTSO *t)
1063 {
1064 // We have to be able to catch transactions that are in an
1065 // infinite loop as a result of seeing an inconsistent view of
1066 // memory, e.g.
1067 //
1068 // atomically $ do
1069 // [a,b] <- mapM readTVar [ta,tb]
1070 // when (a == b) loop
1071 //
1072 // and a is never equal to b given a consistent view of memory.
1073 //
1074 if (t -> trec != NO_TREC && t -> why_blocked == NotBlocked) {
1075 if (!stmValidateNestOfTransactions (t -> trec)) {
1076 debugTrace(DEBUG_sched | DEBUG_stm,
1077 "trec %p found wasting its time", t);
1078
1079 // strip the stack back to the
1080 // ATOMICALLY_FRAME, aborting the (nested)
1081 // transaction, and saving the stack of any
1082 // partially-evaluated thunks on the heap.
1083 throwToSingleThreaded_(cap, t, NULL, rtsTrue);
1084
1085 ASSERT(get_itbl((StgClosure *)t->sp)->type == ATOMICALLY_FRAME);
1086 }
1087 }
1088
1089 /* some statistics gathering in the parallel case */
1090 }
1091
1092 /* -----------------------------------------------------------------------------
1093 * Handle a thread that returned to the scheduler with ThreadHeepOverflow
1094 * -------------------------------------------------------------------------- */
1095
1096 static rtsBool
1097 scheduleHandleHeapOverflow( Capability *cap, StgTSO *t )
1098 {
1099 // did the task ask for a large block?
1100 if (cap->r.rHpAlloc > BLOCK_SIZE) {
1101 // if so, get one and push it on the front of the nursery.
1102 bdescr *bd;
1103 lnat blocks;
1104
1105 blocks = (lnat)BLOCK_ROUND_UP(cap->r.rHpAlloc) / BLOCK_SIZE;
1106
1107 debugTrace(DEBUG_sched,
1108 "--<< thread %ld (%s) stopped: requesting a large block (size %ld)\n",
1109 (long)t->id, whatNext_strs[t->what_next], blocks);
1110
1111 // don't do this if the nursery is (nearly) full, we'll GC first.
1112 if (cap->r.rCurrentNursery->link != NULL ||
1113 cap->r.rNursery->n_blocks == 1) { // paranoia to prevent infinite loop
1114 // if the nursery has only one block.
1115
1116 ACQUIRE_SM_LOCK
1117 bd = allocGroup( blocks );
1118 RELEASE_SM_LOCK
1119 cap->r.rNursery->n_blocks += blocks;
1120
1121 // link the new group into the list
1122 bd->link = cap->r.rCurrentNursery;
1123 bd->u.back = cap->r.rCurrentNursery->u.back;
1124 if (cap->r.rCurrentNursery->u.back != NULL) {
1125 cap->r.rCurrentNursery->u.back->link = bd;
1126 } else {
1127 cap->r.rNursery->blocks = bd;
1128 }
1129 cap->r.rCurrentNursery->u.back = bd;
1130
1131 // initialise it as a nursery block. We initialise the
1132 // step, gen_no, and flags field of *every* sub-block in
1133 // this large block, because this is easier than making
1134 // sure that we always find the block head of a large
1135 // block whenever we call Bdescr() (eg. evacuate() and
1136 // isAlive() in the GC would both have to do this, at
1137 // least).
1138 {
1139 bdescr *x;
1140 for (x = bd; x < bd + blocks; x++) {
1141 x->step = cap->r.rNursery;
1142 x->gen_no = 0;
1143 x->flags = 0;
1144 }
1145 }
1146
1147 // This assert can be a killer if the app is doing lots
1148 // of large block allocations.
1149 IF_DEBUG(sanity, checkNurserySanity(cap->r.rNursery));
1150
1151 // now update the nursery to point to the new block
1152 cap->r.rCurrentNursery = bd;
1153
1154 // we might be unlucky and have another thread get on the
1155 // run queue before us and steal the large block, but in that
1156 // case the thread will just end up requesting another large
1157 // block.
1158 pushOnRunQueue(cap,t);
1159 return rtsFalse; /* not actually GC'ing */
1160 }
1161 }
1162
1163 debugTrace(DEBUG_sched,
1164 "--<< thread %ld (%s) stopped: HeapOverflow",
1165 (long)t->id, whatNext_strs[t->what_next]);
1166
1167 if (cap->r.rHpLim == NULL || cap->context_switch) {
1168 // Sometimes we miss a context switch, e.g. when calling
1169 // primitives in a tight loop, MAYBE_GC() doesn't check the
1170 // context switch flag, and we end up waiting for a GC.
1171 // See #1984, and concurrent/should_run/1984
1172 cap->context_switch = 0;
1173 addToRunQueue(cap,t);
1174 } else {
1175 pushOnRunQueue(cap,t);
1176 }
1177 return rtsTrue;
1178 /* actual GC is done at the end of the while loop in schedule() */
1179 }
1180
1181 /* -----------------------------------------------------------------------------
1182 * Handle a thread that returned to the scheduler with ThreadStackOverflow
1183 * -------------------------------------------------------------------------- */
1184
1185 static void
1186 scheduleHandleStackOverflow (Capability *cap, Task *task, StgTSO *t)
1187 {
1188 debugTrace (DEBUG_sched,
1189 "--<< thread %ld (%s) stopped, StackOverflow",
1190 (long)t->id, whatNext_strs[t->what_next]);
1191
1192 /* just adjust the stack for this thread, then pop it back
1193 * on the run queue.
1194 */
1195 {
1196 /* enlarge the stack */
1197 StgTSO *new_t = threadStackOverflow(cap, t);
1198
1199 /* The TSO attached to this Task may have moved, so update the
1200 * pointer to it.
1201 */
1202 if (task->tso == t) {
1203 task->tso = new_t;
1204 }
1205 pushOnRunQueue(cap,new_t);
1206 }
1207 }
1208
1209 /* -----------------------------------------------------------------------------
1210 * Handle a thread that returned to the scheduler with ThreadYielding
1211 * -------------------------------------------------------------------------- */
1212
1213 static rtsBool
1214 scheduleHandleYield( Capability *cap, StgTSO *t, nat prev_what_next )
1215 {
1216 // Reset the context switch flag. We don't do this just before
1217 // running the thread, because that would mean we would lose ticks
1218 // during GC, which can lead to unfair scheduling (a thread hogs
1219 // the CPU because the tick always arrives during GC). This way
1220 // penalises threads that do a lot of allocation, but that seems
1221 // better than the alternative.
1222 cap->context_switch = 0;
1223
1224 /* put the thread back on the run queue. Then, if we're ready to
1225 * GC, check whether this is the last task to stop. If so, wake
1226 * up the GC thread. getThread will block during a GC until the
1227 * GC is finished.
1228 */
1229 #ifdef DEBUG
1230 if (t->what_next != prev_what_next) {
1231 debugTrace(DEBUG_sched,
1232 "--<< thread %ld (%s) stopped to switch evaluators",
1233 (long)t->id, whatNext_strs[t->what_next]);
1234 } else {
1235 debugTrace(DEBUG_sched,
1236 "--<< thread %ld (%s) stopped, yielding",
1237 (long)t->id, whatNext_strs[t->what_next]);
1238 }
1239 #endif
1240
1241 IF_DEBUG(sanity,
1242 //debugBelch("&& Doing sanity check on yielding TSO %ld.", t->id);
1243 checkTSO(t));
1244 ASSERT(t->_link == END_TSO_QUEUE);
1245
1246 // Shortcut if we're just switching evaluators: don't bother
1247 // doing stack squeezing (which can be expensive), just run the
1248 // thread.
1249 if (t->what_next != prev_what_next) {
1250 return rtsTrue;
1251 }
1252
1253 addToRunQueue(cap,t);
1254
1255 return rtsFalse;
1256 }
1257
1258 /* -----------------------------------------------------------------------------
1259 * Handle a thread that returned to the scheduler with ThreadBlocked
1260 * -------------------------------------------------------------------------- */
1261
1262 static void
1263 scheduleHandleThreadBlocked( StgTSO *t
1264 #if !defined(DEBUG)
1265 STG_UNUSED
1266 #endif
1267 )
1268 {
1269
1270 // We don't need to do anything. The thread is blocked, and it
1271 // has tidied up its stack and placed itself on whatever queue
1272 // it needs to be on.
1273
1274 // ASSERT(t->why_blocked != NotBlocked);
1275 // Not true: for example,
1276 // - in THREADED_RTS, the thread may already have been woken
1277 // up by another Capability. This actually happens: try
1278 // conc023 +RTS -N2.
1279 // - the thread may have woken itself up already, because
1280 // threadPaused() might have raised a blocked throwTo
1281 // exception, see maybePerformBlockedException().
1282
1283 #ifdef DEBUG
1284 if (traceClass(DEBUG_sched)) {
1285 debugTraceBegin("--<< thread %lu (%s) stopped: ",
1286 (unsigned long)t->id, whatNext_strs[t->what_next]);
1287 printThreadBlockage(t);
1288 debugTraceEnd();
1289 }
1290 #endif
1291 }
1292
1293 /* -----------------------------------------------------------------------------
1294 * Handle a thread that returned to the scheduler with ThreadFinished
1295 * -------------------------------------------------------------------------- */
1296
1297 static rtsBool
1298 scheduleHandleThreadFinished (Capability *cap STG_UNUSED, Task *task, StgTSO *t)
1299 {
1300 /* Need to check whether this was a main thread, and if so,
1301 * return with the return value.
1302 *
1303 * We also end up here if the thread kills itself with an
1304 * uncaught exception, see Exception.cmm.
1305 */
1306 debugTrace(DEBUG_sched, "--++ thread %lu (%s) finished",
1307 (unsigned long)t->id, whatNext_strs[t->what_next]);
1308
1309 // blocked exceptions can now complete, even if the thread was in
1310 // blocked mode (see #2910). This unconditionally calls
1311 // lockTSO(), which ensures that we don't miss any threads that
1312 // are engaged in throwTo() with this thread as a target.
1313 awakenBlockedExceptionQueue (cap, t);
1314
1315 //
1316 // Check whether the thread that just completed was a bound
1317 // thread, and if so return with the result.
1318 //
1319 // There is an assumption here that all thread completion goes
1320 // through this point; we need to make sure that if a thread
1321 // ends up in the ThreadKilled state, that it stays on the run
1322 // queue so it can be dealt with here.
1323 //
1324
1325 if (t->bound) {
1326
1327 if (t->bound != task) {
1328 #if !defined(THREADED_RTS)
1329 // Must be a bound thread that is not the topmost one. Leave
1330 // it on the run queue until the stack has unwound to the
1331 // point where we can deal with this. Leaving it on the run
1332 // queue also ensures that the garbage collector knows about
1333 // this thread and its return value (it gets dropped from the
1334 // step->threads list so there's no other way to find it).
1335 appendToRunQueue(cap,t);
1336 return rtsFalse;
1337 #else
1338 // this cannot happen in the threaded RTS, because a
1339 // bound thread can only be run by the appropriate Task.
1340 barf("finished bound thread that isn't mine");
1341 #endif
1342 }
1343
1344 ASSERT(task->tso == t);
1345
1346 if (t->what_next == ThreadComplete) {
1347 if (task->ret) {
1348 // NOTE: return val is tso->sp[1] (see StgStartup.hc)
1349 *(task->ret) = (StgClosure *)task->tso->sp[1];
1350 }
1351 task->stat = Success;
1352 } else {
1353 if (task->ret) {
1354 *(task->ret) = NULL;
1355 }
1356 if (sched_state >= SCHED_INTERRUPTING) {
1357 if (heap_overflow) {
1358 task->stat = HeapExhausted;
1359 } else {
1360 task->stat = Interrupted;
1361 }
1362 } else {
1363 task->stat = Killed;
1364 }
1365 }
1366 #ifdef DEBUG
1367 removeThreadLabel((StgWord)task->tso->id);
1368 #endif
1369 return rtsTrue; // tells schedule() to return
1370 }
1371
1372 return rtsFalse;
1373 }
1374
1375 /* -----------------------------------------------------------------------------
1376 * Perform a heap census
1377 * -------------------------------------------------------------------------- */
1378
1379 static rtsBool
1380 scheduleNeedHeapProfile( rtsBool ready_to_gc STG_UNUSED )
1381 {
1382 // When we have +RTS -i0 and we're heap profiling, do a census at
1383 // every GC. This lets us get repeatable runs for debugging.
1384 if (performHeapProfile ||
1385 (RtsFlags.ProfFlags.profileInterval==0 &&
1386 RtsFlags.ProfFlags.doHeapProfile && ready_to_gc)) {
1387 return rtsTrue;
1388 } else {
1389 return rtsFalse;
1390 }
1391 }
1392
1393 /* -----------------------------------------------------------------------------
1394 * Perform a garbage collection if necessary
1395 * -------------------------------------------------------------------------- */
1396
1397 static Capability *
1398 scheduleDoGC (Capability *cap, Task *task USED_IF_THREADS, rtsBool force_major)
1399 {
1400 rtsBool heap_census;
1401 #ifdef THREADED_RTS
1402 /* extern static volatile StgWord waiting_for_gc;
1403 lives inside capability.c */
1404 rtsBool gc_type, prev_pending_gc;
1405 nat i;
1406 #endif
1407
1408 if (sched_state == SCHED_SHUTTING_DOWN) {
1409 // The final GC has already been done, and the system is
1410 // shutting down. We'll probably deadlock if we try to GC
1411 // now.
1412 return cap;
1413 }
1414
1415 #ifdef THREADED_RTS
1416 if (sched_state < SCHED_INTERRUPTING
1417 && RtsFlags.ParFlags.parGcEnabled
1418 && N >= RtsFlags.ParFlags.parGcGen
1419 && ! oldest_gen->steps[0].mark)
1420 {
1421 gc_type = PENDING_GC_PAR;
1422 } else {
1423 gc_type = PENDING_GC_SEQ;
1424 }
1425
1426 // In order to GC, there must be no threads running Haskell code.
1427 // Therefore, the GC thread needs to hold *all* the capabilities,
1428 // and release them after the GC has completed.
1429 //
1430 // This seems to be the simplest way: previous attempts involved
1431 // making all the threads with capabilities give up their
1432 // capabilities and sleep except for the *last* one, which
1433 // actually did the GC. But it's quite hard to arrange for all
1434 // the other tasks to sleep and stay asleep.
1435 //
1436
1437 /* Other capabilities are prevented from running yet more Haskell
1438 threads if waiting_for_gc is set. Tested inside
1439 yieldCapability() and releaseCapability() in Capability.c */
1440
1441 prev_pending_gc = cas(&waiting_for_gc, 0, gc_type);
1442 if (prev_pending_gc) {
1443 do {
1444 debugTrace(DEBUG_sched, "someone else is trying to GC (%d)...",
1445 prev_pending_gc);
1446 ASSERT(cap);
1447 yieldCapability(&cap,task);
1448 } while (waiting_for_gc);
1449 return cap; // NOTE: task->cap might have changed here
1450 }
1451
1452 setContextSwitches();
1453
1454 // The final shutdown GC is always single-threaded, because it's
1455 // possible that some of the Capabilities have no worker threads.
1456
1457 if (gc_type == PENDING_GC_SEQ)
1458 {
1459 postEvent(cap, EVENT_REQUEST_SEQ_GC, 0, 0);
1460 // single-threaded GC: grab all the capabilities
1461 for (i=0; i < n_capabilities; i++) {
1462 debugTrace(DEBUG_sched, "ready_to_gc, grabbing all the capabilies (%d/%d)", i, n_capabilities);
1463 if (cap != &capabilities[i]) {
1464 Capability *pcap = &capabilities[i];
1465 // we better hope this task doesn't get migrated to
1466 // another Capability while we're waiting for this one.
1467 // It won't, because load balancing happens while we have
1468 // all the Capabilities, but even so it's a slightly
1469 // unsavoury invariant.
1470 task->cap = pcap;
1471 waitForReturnCapability(&pcap, task);
1472 if (pcap != &capabilities[i]) {
1473 barf("scheduleDoGC: got the wrong capability");
1474 }
1475 }
1476 }
1477 }
1478 else
1479 {
1480 // multi-threaded GC: make sure all the Capabilities donate one
1481 // GC thread each.
1482 postEvent(cap, EVENT_REQUEST_PAR_GC, 0, 0);
1483 debugTrace(DEBUG_sched, "ready_to_gc, grabbing GC threads");
1484
1485 waitForGcThreads(cap);
1486 }
1487 #endif
1488
1489 // so this happens periodically:
1490 if (cap) scheduleCheckBlackHoles(cap);
1491
1492 IF_DEBUG(scheduler, printAllThreads());
1493
1494 delete_threads_and_gc:
1495 /*
1496 * We now have all the capabilities; if we're in an interrupting
1497 * state, then we should take the opportunity to delete all the
1498 * threads in the system.
1499 */
1500 if (sched_state == SCHED_INTERRUPTING) {
1501 deleteAllThreads(cap);
1502 sched_state = SCHED_SHUTTING_DOWN;
1503 }
1504
1505 heap_census = scheduleNeedHeapProfile(rtsTrue);
1506
1507 #if defined(THREADED_RTS)
1508 postEvent(cap, EVENT_GC_START, 0, 0);
1509 debugTrace(DEBUG_sched, "doing GC");
1510 // reset waiting_for_gc *before* GC, so that when the GC threads
1511 // emerge they don't immediately re-enter the GC.
1512 waiting_for_gc = 0;
1513 GarbageCollect(force_major || heap_census, gc_type, cap);
1514 #else
1515 GarbageCollect(force_major || heap_census, 0, cap);
1516 #endif
1517 postEvent(cap, EVENT_GC_END, 0, 0);
1518
1519 if (recent_activity == ACTIVITY_INACTIVE && force_major)
1520 {
1521 // We are doing a GC because the system has been idle for a
1522 // timeslice and we need to check for deadlock. Record the
1523 // fact that we've done a GC and turn off the timer signal;
1524 // it will get re-enabled if we run any threads after the GC.
1525 recent_activity = ACTIVITY_DONE_GC;
1526 stopTimer();
1527 }
1528 else
1529 {
1530 // the GC might have taken long enough for the timer to set
1531 // recent_activity = ACTIVITY_INACTIVE, but we aren't
1532 // necessarily deadlocked:
1533 recent_activity = ACTIVITY_YES;
1534 }
1535
1536 #if defined(THREADED_RTS)
1537 if (gc_type == PENDING_GC_PAR)
1538 {
1539 releaseGCThreads(cap);
1540 }
1541 #endif
1542
1543 if (heap_census) {
1544 debugTrace(DEBUG_sched, "performing heap census");
1545 heapCensus();
1546 performHeapProfile = rtsFalse;
1547 }
1548
1549 if (heap_overflow && sched_state < SCHED_INTERRUPTING) {
1550 // GC set the heap_overflow flag, so we should proceed with
1551 // an orderly shutdown now. Ultimately we want the main
1552 // thread to return to its caller with HeapExhausted, at which
1553 // point the caller should call hs_exit(). The first step is
1554 // to delete all the threads.
1555 //
1556 // Another way to do this would be to raise an exception in
1557 // the main thread, which we really should do because it gives
1558 // the program a chance to clean up. But how do we find the
1559 // main thread? It should presumably be the same one that
1560 // gets ^C exceptions, but that's all done on the Haskell side
1561 // (GHC.TopHandler).
1562 sched_state = SCHED_INTERRUPTING;
1563 goto delete_threads_and_gc;
1564 }
1565
1566 #ifdef SPARKBALANCE
1567 /* JB
1568 Once we are all together... this would be the place to balance all
1569 spark pools. No concurrent stealing or adding of new sparks can
1570 occur. Should be defined in Sparks.c. */
1571 balanceSparkPoolsCaps(n_capabilities, capabilities);
1572 #endif
1573
1574 #if defined(THREADED_RTS)
1575 if (gc_type == PENDING_GC_SEQ) {
1576 // release our stash of capabilities.
1577 for (i = 0; i < n_capabilities; i++) {
1578 if (cap != &capabilities[i]) {
1579 task->cap = &capabilities[i];
1580 releaseCapability(&capabilities[i]);
1581 }
1582 }
1583 }
1584 if (cap) {
1585 task->cap = cap;
1586 } else {
1587 task->cap = NULL;
1588 }
1589 #endif
1590
1591 return cap;
1592 }
1593
1594 /* ---------------------------------------------------------------------------
1595 * Singleton fork(). Do not copy any running threads.
1596 * ------------------------------------------------------------------------- */
1597
1598 pid_t
1599 forkProcess(HsStablePtr *entry
1600 #ifndef FORKPROCESS_PRIMOP_SUPPORTED
1601 STG_UNUSED
1602 #endif
1603 )
1604 {
1605 #ifdef FORKPROCESS_PRIMOP_SUPPORTED
1606 Task *task;
1607 pid_t pid;
1608 StgTSO* t,*next;
1609 Capability *cap;
1610 nat s;
1611
1612 #if defined(THREADED_RTS)
1613 if (RtsFlags.ParFlags.nNodes > 1) {
1614 errorBelch("forking not supported with +RTS -N<n> greater than 1");
1615 stg_exit(EXIT_FAILURE);
1616 }
1617 #endif
1618
1619 debugTrace(DEBUG_sched, "forking!");
1620
1621 // ToDo: for SMP, we should probably acquire *all* the capabilities
1622 cap = rts_lock();
1623
1624 // no funny business: hold locks while we fork, otherwise if some
1625 // other thread is holding a lock when the fork happens, the data
1626 // structure protected by the lock will forever be in an
1627 // inconsistent state in the child. See also #1391.
1628 ACQUIRE_LOCK(&sched_mutex);
1629 ACQUIRE_LOCK(&cap->lock);
1630 ACQUIRE_LOCK(&cap->running_task->lock);
1631
1632 pid = fork();
1633
1634 if (pid) { // parent
1635
1636 RELEASE_LOCK(&sched_mutex);
1637 RELEASE_LOCK(&cap->lock);
1638 RELEASE_LOCK(&cap->running_task->lock);
1639
1640 // just return the pid
1641 rts_unlock(cap);
1642 return pid;
1643
1644 } else { // child
1645
1646 #if defined(THREADED_RTS)
1647 initMutex(&sched_mutex);
1648 initMutex(&cap->lock);
1649 initMutex(&cap->running_task->lock);
1650 #endif
1651
1652 // Now, all OS threads except the thread that forked are
1653 // stopped. We need to stop all Haskell threads, including
1654 // those involved in foreign calls. Also we need to delete
1655 // all Tasks, because they correspond to OS threads that are
1656 // now gone.
1657
1658 for (s = 0; s < total_steps; s++) {
1659 for (t = all_steps[s].threads; t != END_TSO_QUEUE; t = next) {
1660 if (t->what_next == ThreadRelocated) {
1661 next = t->_link;
1662 } else {
1663 next = t->global_link;
1664 // don't allow threads to catch the ThreadKilled
1665 // exception, but we do want to raiseAsync() because these
1666 // threads may be evaluating thunks that we need later.
1667 deleteThread_(cap,t);
1668 }
1669 }
1670 }
1671
1672 // Empty the run queue. It seems tempting to let all the
1673 // killed threads stay on the run queue as zombies to be
1674 // cleaned up later, but some of them correspond to bound
1675 // threads for which the corresponding Task does not exist.
1676 cap->run_queue_hd = END_TSO_QUEUE;
1677 cap->run_queue_tl = END_TSO_QUEUE;
1678
1679 // Any suspended C-calling Tasks are no more, their OS threads
1680 // don't exist now:
1681 cap->suspended_ccalling_tasks = NULL;
1682
1683 // Empty the threads lists. Otherwise, the garbage
1684 // collector may attempt to resurrect some of these threads.
1685 for (s = 0; s < total_steps; s++) {
1686 all_steps[s].threads = END_TSO_QUEUE;
1687 }
1688
1689 // Wipe the task list, except the current Task.
1690 ACQUIRE_LOCK(&sched_mutex);
1691 for (task = all_tasks; task != NULL; task=task->all_link) {
1692 if (task != cap->running_task) {
1693 #if defined(THREADED_RTS)
1694 initMutex(&task->lock); // see #1391
1695 #endif
1696 discardTask(task);
1697 }
1698 }
1699 RELEASE_LOCK(&sched_mutex);
1700
1701 #if defined(THREADED_RTS)
1702 // Wipe our spare workers list, they no longer exist. New
1703 // workers will be created if necessary.
1704 cap->spare_workers = NULL;
1705 cap->returning_tasks_hd = NULL;
1706 cap->returning_tasks_tl = NULL;
1707 #endif
1708
1709 // On Unix, all timers are reset in the child, so we need to start
1710 // the timer again.
1711 initTimer();
1712 startTimer();
1713
1714 cap = rts_evalStableIO(cap, entry, NULL); // run the action
1715 rts_checkSchedStatus("forkProcess",cap);
1716
1717 rts_unlock(cap);
1718 hs_exit(); // clean up and exit
1719 stg_exit(EXIT_SUCCESS);
1720 }
1721 #else /* !FORKPROCESS_PRIMOP_SUPPORTED */
1722 barf("forkProcess#: primop not supported on this platform, sorry!\n");
1723 #endif
1724 }
1725
1726 /* ---------------------------------------------------------------------------
1727 * Delete all the threads in the system
1728 * ------------------------------------------------------------------------- */
1729
1730 static void
1731 deleteAllThreads ( Capability *cap )
1732 {
1733 // NOTE: only safe to call if we own all capabilities.
1734
1735 StgTSO* t, *next;
1736 nat s;
1737
1738 debugTrace(DEBUG_sched,"deleting all threads");
1739 for (s = 0; s < total_steps; s++) {
1740 for (t = all_steps[s].threads; t != END_TSO_QUEUE; t = next) {
1741 if (t->what_next == ThreadRelocated) {
1742 next = t->_link;
1743 } else {
1744 next = t->global_link;
1745 deleteThread(cap,t);
1746 }
1747 }
1748 }
1749
1750 // The run queue now contains a bunch of ThreadKilled threads. We
1751 // must not throw these away: the main thread(s) will be in there
1752 // somewhere, and the main scheduler loop has to deal with it.
1753 // Also, the run queue is the only thing keeping these threads from
1754 // being GC'd, and we don't want the "main thread has been GC'd" panic.
1755
1756 #if !defined(THREADED_RTS)
1757 ASSERT(blocked_queue_hd == END_TSO_QUEUE);
1758 ASSERT(sleeping_queue == END_TSO_QUEUE);
1759 #endif
1760 }
1761
1762 /* -----------------------------------------------------------------------------
1763 Managing the suspended_ccalling_tasks list.
1764 Locks required: sched_mutex
1765 -------------------------------------------------------------------------- */
1766
1767 STATIC_INLINE void
1768 suspendTask (Capability *cap, Task *task)
1769 {
1770 ASSERT(task->next == NULL && task->prev == NULL);
1771 task->next = cap->suspended_ccalling_tasks;
1772 task->prev = NULL;
1773 if (cap->suspended_ccalling_tasks) {
1774 cap->suspended_ccalling_tasks->prev = task;
1775 }
1776 cap->suspended_ccalling_tasks = task;
1777 }
1778
1779 STATIC_INLINE void
1780 recoverSuspendedTask (Capability *cap, Task *task)
1781 {
1782 if (task->prev) {
1783 task->prev->next = task->next;
1784 } else {
1785 ASSERT(cap->suspended_ccalling_tasks == task);
1786 cap->suspended_ccalling_tasks = task->next;
1787 }
1788 if (task->next) {
1789 task->next->prev = task->prev;
1790 }
1791 task->next = task->prev = NULL;
1792 }
1793
1794 /* ---------------------------------------------------------------------------
1795 * Suspending & resuming Haskell threads.
1796 *
1797 * When making a "safe" call to C (aka _ccall_GC), the task gives back
1798 * its capability before calling the C function. This allows another
1799 * task to pick up the capability and carry on running Haskell
1800 * threads. It also means that if the C call blocks, it won't lock
1801 * the whole system.
1802 *
1803 * The Haskell thread making the C call is put to sleep for the
1804 * duration of the call, on the susepended_ccalling_threads queue. We
1805 * give out a token to the task, which it can use to resume the thread
1806 * on return from the C function.
1807 * ------------------------------------------------------------------------- */
1808
1809 void *
1810 suspendThread (StgRegTable *reg)
1811 {
1812 Capability *cap;
1813 int saved_errno;
1814 StgTSO *tso;
1815 Task *task;
1816 #if mingw32_HOST_OS
1817 StgWord32 saved_winerror;
1818 #endif
1819
1820 saved_errno = errno;
1821 #if mingw32_HOST_OS
1822 saved_winerror = GetLastError();
1823 #endif
1824
1825 /* assume that *reg is a pointer to the StgRegTable part of a Capability.
1826 */
1827 cap = regTableToCapability(reg);
1828
1829 task = cap->running_task;
1830 tso = cap->r.rCurrentTSO;
1831
1832 postEvent(cap, EVENT_STOP_THREAD, tso->id, THREAD_SUSPENDED_FOREIGN_CALL);
1833 debugTrace(DEBUG_sched,
1834 "thread %lu did a safe foreign call",
1835 (unsigned long)cap->r.rCurrentTSO->id);
1836
1837 // XXX this might not be necessary --SDM
1838 tso->what_next = ThreadRunGHC;
1839
1840 threadPaused(cap,tso);
1841
1842 if ((tso->flags & TSO_BLOCKEX) == 0) {
1843 tso->why_blocked = BlockedOnCCall;
1844 tso->flags |= TSO_BLOCKEX;
1845 tso->flags &= ~TSO_INTERRUPTIBLE;
1846 } else {
1847 tso->why_blocked = BlockedOnCCall_NoUnblockExc;
1848 }
1849
1850 // Hand back capability
1851 task->suspended_tso = tso;
1852
1853 ACQUIRE_LOCK(&cap->lock);
1854
1855 suspendTask(cap,task);
1856 cap->in_haskell = rtsFalse;
1857 releaseCapability_(cap,rtsFalse);
1858
1859 RELEASE_LOCK(&cap->lock);
1860
1861 #if defined(THREADED_RTS)
1862 /* Preparing to leave the RTS, so ensure there's a native thread/task
1863 waiting to take over.
1864 */
1865 debugTrace(DEBUG_sched, "thread %lu: leaving RTS", (unsigned long)tso->id);
1866 #endif
1867
1868 errno = saved_errno;
1869 #if mingw32_HOST_OS
1870 SetLastError(saved_winerror);
1871 #endif
1872 return task;
1873 }
1874
1875 StgRegTable *
1876 resumeThread (void *task_)
1877 {
1878 StgTSO *tso;
1879 Capability *cap;
1880 Task *task = task_;
1881 int saved_errno;
1882 #if mingw32_HOST_OS
1883 StgWord32 saved_winerror;
1884 #endif
1885
1886 saved_errno = errno;
1887 #if mingw32_HOST_OS
1888 saved_winerror = GetLastError();
1889 #endif
1890
1891 cap = task->cap;
1892 // Wait for permission to re-enter the RTS with the result.
1893 waitForReturnCapability(&cap,task);
1894 // we might be on a different capability now... but if so, our
1895 // entry on the suspended_ccalling_tasks list will also have been
1896 // migrated.
1897
1898 // Remove the thread from the suspended list
1899 recoverSuspendedTask(cap,task);
1900
1901 tso = task->suspended_tso;
1902 task->suspended_tso = NULL;
1903 tso->_link = END_TSO_QUEUE; // no write barrier reqd
1904
1905 postEvent(cap, EVENT_RUN_THREAD, tso->id, 0);
1906 debugTrace(DEBUG_sched, "thread %lu: re-entering RTS", (unsigned long)tso->id);
1907
1908 if (tso->why_blocked == BlockedOnCCall) {
1909 // avoid locking the TSO if we don't have to
1910 if (tso->blocked_exceptions != END_TSO_QUEUE) {
1911 awakenBlockedExceptionQueue(cap,tso);
1912 }
1913 tso->flags &= ~(TSO_BLOCKEX | TSO_INTERRUPTIBLE);
1914 }
1915
1916 /* Reset blocking status */
1917 tso->why_blocked = NotBlocked;
1918
1919 cap->r.rCurrentTSO = tso;
1920 cap->in_haskell = rtsTrue;
1921 errno = saved_errno;
1922 #if mingw32_HOST_OS
1923 SetLastError(saved_winerror);
1924 #endif
1925
1926 /* We might have GC'd, mark the TSO dirty again */
1927 dirty_TSO(cap,tso);
1928
1929 IF_DEBUG(sanity, checkTSO(tso));
1930
1931 return &cap->r;
1932 }
1933
1934 /* ---------------------------------------------------------------------------
1935 * scheduleThread()
1936 *
1937 * scheduleThread puts a thread on the end of the runnable queue.
1938 * This will usually be done immediately after a thread is created.
1939 * The caller of scheduleThread must create the thread using e.g.
1940 * createThread and push an appropriate closure
1941 * on this thread's stack before the scheduler is invoked.
1942 * ------------------------------------------------------------------------ */
1943
1944 void
1945 scheduleThread(Capability *cap, StgTSO *tso)
1946 {
1947 // The thread goes at the *end* of the run-queue, to avoid possible
1948 // starvation of any threads already on the queue.
1949 appendToRunQueue(cap,tso);
1950 }
1951
1952 void
1953 scheduleThreadOn(Capability *cap, StgWord cpu USED_IF_THREADS, StgTSO *tso)
1954 {
1955 #if defined(THREADED_RTS)
1956 tso->flags |= TSO_LOCKED; // we requested explicit affinity; don't
1957 // move this thread from now on.
1958 cpu %= RtsFlags.ParFlags.nNodes;
1959 if (cpu == cap->no) {
1960 appendToRunQueue(cap,tso);
1961 } else {
1962 postEvent (cap, EVENT_MIGRATE_THREAD, tso->id, capabilities[cpu].no);
1963 wakeupThreadOnCapability(cap, &capabilities[cpu], tso);
1964 }
1965 #else
1966 appendToRunQueue(cap,tso);
1967 #endif
1968 }
1969
1970 Capability *
1971 scheduleWaitThread (StgTSO* tso, /*[out]*/HaskellObj* ret, Capability *cap)
1972 {
1973 Task *task;
1974
1975 // We already created/initialised the Task
1976 task = cap->running_task;
1977
1978 // This TSO is now a bound thread; make the Task and TSO
1979 // point to each other.
1980 tso->bound = task;
1981 tso->cap = cap;
1982
1983 task->tso = tso;
1984 task->ret = ret;
1985 task->stat = NoStatus;
1986
1987 appendToRunQueue(cap,tso);
1988
1989 debugTrace(DEBUG_sched, "new bound thread (%lu)", (unsigned long)tso->id);
1990
1991 cap = schedule(cap,task);
1992
1993 ASSERT(task->stat != NoStatus);
1994 ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
1995
1996 debugTrace(DEBUG_sched, "bound thread (%lu) finished", (unsigned long)task->tso->id);
1997 return cap;
1998 }
1999
2000 /* ----------------------------------------------------------------------------
2001 * Starting Tasks
2002 * ------------------------------------------------------------------------- */
2003
2004 #if defined(THREADED_RTS)
2005 void OSThreadProcAttr
2006 workerStart(Task *task)
2007 {
2008 Capability *cap;
2009
2010 // See startWorkerTask().
2011 ACQUIRE_LOCK(&task->lock);
2012 cap = task->cap;
2013 RELEASE_LOCK(&task->lock);
2014
2015 if (RtsFlags.ParFlags.setAffinity) {
2016 setThreadAffinity(cap->no, n_capabilities);
2017 }
2018
2019 // set the thread-local pointer to the Task:
2020 taskEnter(task);
2021
2022 // schedule() runs without a lock.
2023 cap = schedule(cap,task);
2024
2025 // On exit from schedule(), we have a Capability, but possibly not
2026 // the same one we started with.
2027
2028 // During shutdown, the requirement is that after all the
2029 // Capabilities are shut down, all workers that are shutting down
2030 // have finished workerTaskStop(). This is why we hold on to
2031 // cap->lock until we've finished workerTaskStop() below.
2032 //
2033 // There may be workers still involved in foreign calls; those
2034 // will just block in waitForReturnCapability() because the
2035 // Capability has been shut down.
2036 //
2037 ACQUIRE_LOCK(&cap->lock);
2038 releaseCapability_(cap,rtsFalse);
2039 workerTaskStop(task);
2040 RELEASE_LOCK(&cap->lock);
2041 }
2042 #endif
2043
2044 /* ---------------------------------------------------------------------------
2045 * initScheduler()
2046 *
2047 * Initialise the scheduler. This resets all the queues - if the
2048 * queues contained any threads, they'll be garbage collected at the
2049 * next pass.
2050 *
2051 * ------------------------------------------------------------------------ */
2052
2053 void
2054 initScheduler(void)
2055 {
2056 #if !defined(THREADED_RTS)
2057 blocked_queue_hd = END_TSO_QUEUE;
2058 blocked_queue_tl = END_TSO_QUEUE;
2059 sleeping_queue = END_TSO_QUEUE;
2060 #endif
2061
2062 blackhole_queue = END_TSO_QUEUE;
2063
2064 sched_state = SCHED_RUNNING;
2065 recent_activity = ACTIVITY_YES;
2066
2067 #if defined(THREADED_RTS)
2068 /* Initialise the mutex and condition variables used by
2069 * the scheduler. */
2070 initMutex(&sched_mutex);
2071 #endif
2072
2073 ACQUIRE_LOCK(&sched_mutex);
2074
2075 /* A capability holds the state a native thread needs in
2076 * order to execute STG code. At least one capability is
2077 * floating around (only THREADED_RTS builds have more than one).
2078 */
2079 initCapabilities();
2080
2081 initTaskManager();
2082
2083 #if defined(THREADED_RTS)
2084 initSparkPools();
2085 #endif
2086
2087 #if defined(THREADED_RTS)
2088 /*
2089 * Eagerly start one worker to run each Capability, except for
2090 * Capability 0. The idea is that we're probably going to start a
2091 * bound thread on Capability 0 pretty soon, so we don't want a
2092 * worker task hogging it.
2093 */
2094 {
2095 nat i;
2096 Capability *cap;
2097 for (i = 1; i < n_capabilities; i++) {
2098 cap = &capabilities[i];
2099 ACQUIRE_LOCK(&cap->lock);
2100 startWorkerTask(cap, workerStart);
2101 RELEASE_LOCK(&cap->lock);
2102 }
2103 }
2104 #endif
2105
2106 RELEASE_LOCK(&sched_mutex);
2107 }
2108
2109 void
2110 exitScheduler(
2111 rtsBool wait_foreign
2112 #if !defined(THREADED_RTS)
2113 __attribute__((unused))
2114 #endif
2115 )
2116 /* see Capability.c, shutdownCapability() */
2117 {
2118 Task *task = NULL;
2119
2120 task = newBoundTask();
2121
2122 // If we haven't killed all the threads yet, do it now.
2123 if (sched_state < SCHED_SHUTTING_DOWN) {
2124 sched_state = SCHED_INTERRUPTING;
2125 waitForReturnCapability(&task->cap,task);
2126 scheduleDoGC(task->cap,task,rtsFalse);
2127 releaseCapability(task->cap);
2128 }
2129 sched_state = SCHED_SHUTTING_DOWN;
2130
2131 #if defined(THREADED_RTS)
2132 {
2133 nat i;
2134
2135 for (i = 0; i < n_capabilities; i++) {
2136 shutdownCapability(&capabilities[i], task, wait_foreign);
2137 }
2138 boundTaskExiting(task);
2139 }
2140 #endif
2141 }
2142
2143 void
2144 freeScheduler( void )
2145 {
2146 nat still_running;
2147
2148 ACQUIRE_LOCK(&sched_mutex);
2149 still_running = freeTaskManager();
2150 // We can only free the Capabilities if there are no Tasks still
2151 // running. We might have a Task about to return from a foreign
2152 // call into waitForReturnCapability(), for example (actually,
2153 // this should be the *only* thing that a still-running Task can
2154 // do at this point, and it will block waiting for the
2155 // Capability).
2156 if (still_running == 0) {
2157 freeCapabilities();
2158 if (n_capabilities != 1) {
2159 stgFree(capabilities);
2160 }
2161 }
2162 RELEASE_LOCK(&sched_mutex);
2163 #if defined(THREADED_RTS)
2164 closeMutex(&sched_mutex);
2165 #endif
2166 }
2167
2168 /* -----------------------------------------------------------------------------
2169 performGC
2170
2171 This is the interface to the garbage collector from Haskell land.
2172 We provide this so that external C code can allocate and garbage
2173 collect when called from Haskell via _ccall_GC.
2174 -------------------------------------------------------------------------- */
2175
2176 static void
2177 performGC_(rtsBool force_major)
2178 {
2179 Task *task;
2180
2181 // We must grab a new Task here, because the existing Task may be
2182 // associated with a particular Capability, and chained onto the
2183 // suspended_ccalling_tasks queue.
2184 task = newBoundTask();
2185
2186 waitForReturnCapability(&task->cap,task);
2187 scheduleDoGC(task->cap,task,force_major);
2188 releaseCapability(task->cap);
2189 boundTaskExiting(task);
2190 }
2191
2192 void
2193 performGC(void)
2194 {
2195 performGC_(rtsFalse);
2196 }
2197
2198 void
2199 performMajorGC(void)
2200 {
2201 performGC_(rtsTrue);
2202 }
2203
2204 /* -----------------------------------------------------------------------------
2205 Stack overflow
2206
2207 If the thread has reached its maximum stack size, then raise the
2208 StackOverflow exception in the offending thread. Otherwise
2209 relocate the TSO into a larger chunk of memory and adjust its stack
2210 size appropriately.
2211 -------------------------------------------------------------------------- */
2212
2213 static StgTSO *
2214 threadStackOverflow(Capability *cap, StgTSO *tso)
2215 {
2216 nat new_stack_size, stack_words;
2217 lnat new_tso_size;
2218 StgPtr new_sp;
2219 StgTSO *dest;
2220
2221 IF_DEBUG(sanity,checkTSO(tso));
2222
2223 // don't allow throwTo() to modify the blocked_exceptions queue
2224 // while we are moving the TSO:
2225 lockClosure((StgClosure *)tso);
2226
2227 if (tso->stack_size >= tso->max_stack_size && !(tso->flags & TSO_BLOCKEX)) {
2228 // NB. never raise a StackOverflow exception if the thread is
2229 // inside Control.Exceptino.block. It is impractical to protect
2230 // against stack overflow exceptions, since virtually anything
2231 // can raise one (even 'catch'), so this is the only sensible
2232 // thing to do here. See bug #767.
2233
2234 debugTrace(DEBUG_gc,
2235 "threadStackOverflow of TSO %ld (%p): stack too large (now %ld; max is %ld)",
2236 (long)tso->id, tso, (long)tso->stack_size, (long)tso->max_stack_size);
2237 IF_DEBUG(gc,
2238 /* If we're debugging, just print out the top of the stack */
2239 printStackChunk(tso->sp, stg_min(tso->stack+tso->stack_size,
2240 tso->sp+64)));
2241
2242 // Send this thread the StackOverflow exception
2243 unlockTSO(tso);
2244 throwToSingleThreaded(cap, tso, (StgClosure *)stackOverflow_closure);
2245 return tso;
2246 }
2247
2248 /* Try to double the current stack size. If that takes us over the
2249 * maximum stack size for this thread, then use the maximum instead
2250 * (that is, unless we're already at or over the max size and we
2251 * can't raise the StackOverflow exception (see above), in which
2252 * case just double the size). Finally round up so the TSO ends up as
2253 * a whole number of blocks.
2254 */
2255 if (tso->stack_size >= tso->max_stack_size) {
2256 new_stack_size = tso->stack_size * 2;
2257 } else {
2258 new_stack_size = stg_min(tso->stack_size * 2, tso->max_stack_size);
2259 }
2260 new_tso_size = (lnat)BLOCK_ROUND_UP(new_stack_size * sizeof(W_) +
2261 TSO_STRUCT_SIZE)/sizeof(W_);
2262 new_tso_size = round_to_mblocks(new_tso_size); /* Be MBLOCK-friendly */
2263 new_stack_size = new_tso_size - TSO_STRUCT_SIZEW;
2264
2265 debugTrace(DEBUG_sched,
2266 "increasing stack size from %ld words to %d.",
2267 (long)tso->stack_size, new_stack_size);
2268
2269 dest = (StgTSO *)allocateLocal(cap,new_tso_size);
2270 TICK_ALLOC_TSO(new_stack_size,0);
2271
2272 /* copy the TSO block and the old stack into the new area */
2273 memcpy(dest,tso,TSO_STRUCT_SIZE);
2274 stack_words = tso->stack + tso->stack_size - tso->sp;
2275 new_sp = (P_)dest + new_tso_size - stack_words;
2276 memcpy(new_sp, tso->sp, stack_words * sizeof(W_));
2277
2278 /* relocate the stack pointers... */
2279 dest->sp = new_sp;
2280 dest->stack_size = new_stack_size;
2281
2282 /* Mark the old TSO as relocated. We have to check for relocated
2283 * TSOs in the garbage collector and any primops that deal with TSOs.
2284 *
2285 * It's important to set the sp value to just beyond the end
2286 * of the stack, so we don't attempt to scavenge any part of the
2287 * dead TSO's stack.
2288 */
2289 tso->what_next = ThreadRelocated;
2290 setTSOLink(cap,tso,dest);
2291 tso->sp = (P_)&(tso->stack[tso->stack_size]);
2292 tso->why_blocked = NotBlocked;
2293
2294 unlockTSO(dest);
2295 unlockTSO(tso);
2296
2297 IF_DEBUG(sanity,checkTSO(dest));
2298 #if 0
2299 IF_DEBUG(scheduler,printTSO(dest));
2300 #endif
2301
2302 return dest;
2303 }
2304
2305 static StgTSO *
2306 threadStackUnderflow (Task *task STG_UNUSED, StgTSO *tso)
2307 {
2308 bdescr *bd, *new_bd;
2309 lnat free_w, tso_size_w;
2310 StgTSO *new_tso;
2311
2312 tso_size_w = tso_sizeW(tso);
2313
2314 if (tso_size_w < MBLOCK_SIZE_W ||
2315 // TSO is less than 2 mblocks (since the first mblock is
2316 // shorter than MBLOCK_SIZE_W)
2317 (tso_size_w - BLOCKS_PER_MBLOCK*BLOCK_SIZE_W) % MBLOCK_SIZE_W != 0 ||
2318 // or TSO is not a whole number of megablocks (ensuring
2319 // precondition of splitLargeBlock() below)
2320 (tso_size_w <= round_up_to_mblocks(RtsFlags.GcFlags.initialStkSize)) ||
2321 // or TSO is smaller than the minimum stack size (rounded up)
2322 (nat)(tso->stack + tso->stack_size - tso->sp) > tso->stack_size / 4)
2323 // or stack is using more than 1/4 of the available space
2324 {
2325 // then do nothing
2326 return tso;
2327 }
2328
2329 // don't allow throwTo() to modify the blocked_exceptions queue
2330 // while we are moving the TSO:
2331 lockClosure((StgClosure *)tso);
2332
2333 // this is the number of words we'll free
2334 free_w = round_to_mblocks(tso_size_w/2);
2335
2336 bd = Bdescr((StgPtr)tso);
2337 new_bd = splitLargeBlock(bd, free_w / BLOCK_SIZE_W);
2338 bd->free = bd->start + TSO_STRUCT_SIZEW;
2339
2340 new_tso = (StgTSO *)new_bd->start;
2341 memcpy(new_tso,tso,TSO_STRUCT_SIZE);
2342 new_tso->stack_size = new_bd->free - new_tso->stack;
2343
2344 debugTrace(DEBUG_sched, "thread %ld: reducing TSO size from %lu words to %lu",
2345 (long)tso->id, tso_size_w, tso_sizeW(new_tso));
2346
2347 tso->what_next = ThreadRelocated;
2348 tso->_link = new_tso; // no write barrier reqd: same generation
2349
2350 // The TSO attached to this Task may have moved, so update the
2351 // pointer to it.
2352 if (task->tso == tso) {
2353 task->tso = new_tso;
2354 }
2355
2356 unlockTSO(new_tso);
2357 unlockTSO(tso);
2358
2359 IF_DEBUG(sanity,checkTSO(new_tso));
2360
2361 return new_tso;
2362 }
2363
2364 /* ---------------------------------------------------------------------------
2365 Interrupt execution
2366 - usually called inside a signal handler so it mustn't do anything fancy.
2367 ------------------------------------------------------------------------ */
2368
2369 void
2370 interruptStgRts(void)
2371 {
2372 sched_state = SCHED_INTERRUPTING;
2373 setContextSwitches();
2374 #if defined(THREADED_RTS)
2375 wakeUpRts();
2376 #endif
2377 }
2378
2379 /* -----------------------------------------------------------------------------
2380 Wake up the RTS
2381
2382 This function causes at least one OS thread to wake up and run the
2383 scheduler loop. It is invoked when the RTS might be deadlocked, or
2384 an external event has arrived that may need servicing (eg. a
2385 keyboard interrupt).
2386
2387 In the single-threaded RTS we don't do anything here; we only have
2388 one thread anyway, and the event that caused us to want to wake up
2389 will have interrupted any blocking system call in progress anyway.
2390 -------------------------------------------------------------------------- */
2391
2392 #if defined(THREADED_RTS)
2393 void wakeUpRts(void)
2394 {
2395 // This forces the IO Manager thread to wakeup, which will
2396 // in turn ensure that some OS thread wakes up and runs the
2397 // scheduler loop, which will cause a GC and deadlock check.
2398 ioManagerWakeup();
2399 }
2400 #endif
2401
2402 /* -----------------------------------------------------------------------------
2403 * checkBlackHoles()
2404 *
2405 * Check the blackhole_queue for threads that can be woken up. We do
2406 * this periodically: before every GC, and whenever the run queue is
2407 * empty.
2408 *
2409 * An elegant solution might be to just wake up all the blocked
2410 * threads with awakenBlockedQueue occasionally: they'll go back to
2411 * sleep again if the object is still a BLACKHOLE. Unfortunately this
2412 * doesn't give us a way to tell whether we've actually managed to
2413 * wake up any threads, so we would be busy-waiting.
2414 *
2415 * -------------------------------------------------------------------------- */
2416
2417 static rtsBool
2418 checkBlackHoles (Capability *cap)
2419 {
2420 StgTSO **prev, *t;
2421 rtsBool any_woke_up = rtsFalse;
2422 StgHalfWord type;
2423
2424 // blackhole_queue is global:
2425 ASSERT_LOCK_HELD(&sched_mutex);
2426
2427 debugTrace(DEBUG_sched, "checking threads blocked on black holes");
2428
2429 // ASSUMES: sched_mutex
2430 prev = &blackhole_queue;
2431 t = blackhole_queue;
2432 while (t != END_TSO_QUEUE) {
2433 if (t->what_next == ThreadRelocated) {
2434 t = t->_link;
2435 continue;
2436 }
2437 ASSERT(t->why_blocked == BlockedOnBlackHole);
2438 type = get_itbl(UNTAG_CLOSURE(t->block_info.closure))->type;
2439 if (type != BLACKHOLE && type != CAF_BLACKHOLE) {
2440 IF_DEBUG(sanity,checkTSO(t));
2441 t = unblockOne(cap, t);
2442 *prev = t;
2443 any_woke_up = rtsTrue;
2444 } else {
2445 prev = &t->_link;
2446 t = t->_link;
2447 }
2448 }
2449
2450 return any_woke_up;
2451 }
2452
2453 /* -----------------------------------------------------------------------------
2454 Deleting threads
2455
2456 This is used for interruption (^C) and forking, and corresponds to
2457 raising an exception but without letting the thread catch the
2458 exception.
2459 -------------------------------------------------------------------------- */
2460
2461 static void
2462 deleteThread (Capability *cap, StgTSO *tso)
2463 {
2464 // NOTE: must only be called on a TSO that we have exclusive
2465 // access to, because we will call throwToSingleThreaded() below.
2466 // The TSO must be on the run queue of the Capability we own, or
2467 // we must own all Capabilities.
2468
2469 if (tso->why_blocked != BlockedOnCCall &&
2470 tso->why_blocked != BlockedOnCCall_NoUnblockExc) {
2471 throwToSingleThreaded(cap,tso,NULL);
2472 }
2473 }
2474
2475 #ifdef FORKPROCESS_PRIMOP_SUPPORTED
2476 static void
2477 deleteThread_(Capability *cap, StgTSO *tso)
2478 { // for forkProcess only:
2479 // like deleteThread(), but we delete threads in foreign calls, too.
2480
2481 if (tso->why_blocked == BlockedOnCCall ||
2482 tso->why_blocked == BlockedOnCCall_NoUnblockExc) {
2483 unblockOne(cap,tso);
2484 tso->what_next = ThreadKilled;
2485 } else {
2486 deleteThread(cap,tso);
2487 }
2488 }
2489 #endif
2490
2491 /* -----------------------------------------------------------------------------
2492 raiseExceptionHelper
2493
2494 This function is called by the raise# primitve, just so that we can
2495 move some of the tricky bits of raising an exception from C-- into
2496 C. Who knows, it might be a useful re-useable thing here too.
2497 -------------------------------------------------------------------------- */
2498
2499 StgWord
2500 raiseExceptionHelper (StgRegTable *reg, StgTSO *tso, StgClosure *exception)
2501 {
2502 Capability *cap = regTableToCapability(reg);
2503 StgThunk *raise_closure = NULL;
2504 StgPtr p, next;
2505 StgRetInfoTable *info;
2506 //
2507 // This closure represents the expression 'raise# E' where E
2508 // is the exception raise. It is used to overwrite all the
2509 // thunks which are currently under evaluataion.
2510 //
2511
2512 // OLD COMMENT (we don't have MIN_UPD_SIZE now):
2513 // LDV profiling: stg_raise_info has THUNK as its closure
2514 // type. Since a THUNK takes at least MIN_UPD_SIZE words in its
2515 // payload, MIN_UPD_SIZE is more approprate than 1. It seems that
2516 // 1 does not cause any problem unless profiling is performed.
2517 // However, when LDV profiling goes on, we need to linearly scan
2518 // small object pool, where raise_closure is stored, so we should
2519 // use MIN_UPD_SIZE.
2520 //
2521 // raise_closure = (StgClosure *)RET_STGCALL1(P_,allocate,
2522 // sizeofW(StgClosure)+1);
2523 //
2524
2525 //
2526 // Walk up the stack, looking for the catch frame. On the way,
2527 // we update any closures pointed to from update frames with the
2528 // raise closure that we just built.
2529 //
2530 p = tso->sp;
2531 while(1) {
2532 info = get_ret_itbl((StgClosure *)p);
2533 next = p + stack_frame_sizeW((StgClosure *)p);
2534 switch (info->i.type) {
2535
2536 case UPDATE_FRAME:
2537 // Only create raise_closure if we need to.
2538 if (raise_closure == NULL) {
2539 raise_closure =
2540 (StgThunk *)allocateLocal(cap,sizeofW(StgThunk)+1);
2541 SET_HDR(raise_closure, &stg_raise_info, CCCS);
2542 raise_closure->payload[0] = exception;
2543 }
2544 UPD_IND(((StgUpdateFrame *)p)->updatee,(StgClosure *)raise_closure);
2545 p = next;
2546 continue;
2547
2548 case ATOMICALLY_FRAME:
2549 debugTrace(DEBUG_stm, "found ATOMICALLY_FRAME at %p", p);
2550 tso->sp = p;
2551 return ATOMICALLY_FRAME;
2552
2553 case CATCH_FRAME:
2554 tso->sp = p;
2555 return CATCH_FRAME;
2556
2557 case CATCH_STM_FRAME:
2558 debugTrace(DEBUG_stm, "found CATCH_STM_FRAME at %p", p);
2559 tso->sp = p;
2560 return CATCH_STM_FRAME;
2561
2562 case STOP_FRAME:
2563 tso->sp = p;
2564 return STOP_FRAME;
2565
2566 case CATCH_RETRY_FRAME:
2567 default:
2568 p = next;
2569 continue;
2570 }
2571 }
2572 }
2573
2574
2575 /* -----------------------------------------------------------------------------
2576 findRetryFrameHelper
2577
2578 This function is called by the retry# primitive. It traverses the stack
2579 leaving tso->sp referring to the frame which should handle the retry.
2580
2581 This should either be a CATCH_RETRY_FRAME (if the retry# is within an orElse#)
2582 or should be a ATOMICALLY_FRAME (if the retry# reaches the top level).
2583
2584 We skip CATCH_STM_FRAMEs (aborting and rolling back the nested tx that they
2585 create) because retries are not considered to be exceptions, despite the
2586 similar implementation.
2587
2588 We should not expect to see CATCH_FRAME or STOP_FRAME because those should
2589 not be created within memory transactions.
2590 -------------------------------------------------------------------------- */
2591
2592 StgWord
2593 findRetryFrameHelper (StgTSO *tso)
2594 {
2595 StgPtr p, next;
2596 StgRetInfoTable *info;
2597
2598 p = tso -> sp;
2599 while (1) {
2600 info = get_ret_itbl((StgClosure *)p);
2601 next = p + stack_frame_sizeW((StgClosure *)p);
2602 switch (info->i.type) {
2603
2604 case ATOMICALLY_FRAME:
2605 debugTrace(DEBUG_stm,
2606 "found ATOMICALLY_FRAME at %p during retry", p);
2607 tso->sp = p;
2608 return ATOMICALLY_FRAME;
2609
2610 case CATCH_RETRY_FRAME:
2611 debugTrace(DEBUG_stm,
2612 "found CATCH_RETRY_FRAME at %p during retrry", p);
2613 tso->sp = p;
2614 return CATCH_RETRY_FRAME;
2615
2616 case CATCH_STM_FRAME: {
2617 StgTRecHeader *trec = tso -> trec;
2618 StgTRecHeader *outer = stmGetEnclosingTRec(trec);
2619 debugTrace(DEBUG_stm,
2620 "found CATCH_STM_FRAME at %p during retry", p);
2621 debugTrace(DEBUG_stm, "trec=%p outer=%p", trec, outer);
2622 stmAbortTransaction(tso -> cap, trec);
2623 stmFreeAbortedTRec(tso -> cap, trec);
2624 tso -> trec = outer;
2625 p = next;
2626 continue;
2627 }
2628
2629
2630 default:
2631 ASSERT(info->i.type != CATCH_FRAME);
2632 ASSERT(info->i.type != STOP_FRAME);
2633 p = next;
2634 continue;
2635 }
2636 }
2637 }
2638
2639 /* -----------------------------------------------------------------------------
2640 resurrectThreads is called after garbage collection on the list of
2641 threads found to be garbage. Each of these threads will be woken
2642 up and sent a signal: BlockedOnDeadMVar if the thread was blocked
2643 on an MVar, or NonTermination if the thread was blocked on a Black
2644 Hole.
2645
2646 Locks: assumes we hold *all* the capabilities.
2647 -------------------------------------------------------------------------- */
2648
2649 void
2650 resurrectThreads (StgTSO *threads)
2651 {
2652 StgTSO *tso, *next;
2653 Capability *cap;
2654 step *step;
2655
2656 for (tso = threads; tso != END_TSO_QUEUE; tso = next) {
2657 next = tso->global_link;
2658
2659 step = Bdescr((P_)tso)->step;
2660 tso->global_link = step->threads;
2661 step->threads = tso;
2662
2663 debugTrace(DEBUG_sched, "resurrecting thread %lu", (unsigned long)tso->id);
2664
2665 // Wake up the thread on the Capability it was last on
2666 cap = tso->cap;
2667
2668 switch (tso->why_blocked) {
2669 case BlockedOnMVar:
2670 /* Called by GC - sched_mutex lock is currently held. */
2671 throwToSingleThreaded(cap, tso,
2672 (StgClosure *)blockedIndefinitelyOnMVar_closure);
2673 break;
2674 case BlockedOnBlackHole:
2675 throwToSingleThreaded(cap, tso,
2676 (StgClosure *)nonTermination_closure);
2677 break;
2678 case BlockedOnSTM:
2679 throwToSingleThreaded(cap, tso,
2680 (StgClosure *)blockedIndefinitelyOnSTM_closure);
2681 break;
2682 case NotBlocked:
2683 /* This might happen if the thread was blocked on a black hole
2684 * belonging to a thread that we've just woken up (raiseAsync
2685 * can wake up threads, remember...).
2686 */
2687 continue;
2688 case BlockedOnException:
2689 // throwTo should never block indefinitely: if the target
2690 // thread dies or completes, throwTo returns.
2691 barf("resurrectThreads: thread BlockedOnException");
2692 break;
2693 default:
2694 barf("resurrectThreads: thread blocked in a strange way");
2695 }
2696 }
2697 }
2698
2699 /* -----------------------------------------------------------------------------
2700 performPendingThrowTos is called after garbage collection, and
2701 passed a list of threads that were found to have pending throwTos
2702 (tso->blocked_exceptions was not empty), and were blocked.
2703 Normally this doesn't happen, because we would deliver the
2704 exception directly if the target thread is blocked, but there are
2705 small windows where it might occur on a multiprocessor (see
2706 throwTo()).
2707
2708 NB. we must be holding all the capabilities at this point, just
2709 like resurrectThreads().
2710 -------------------------------------------------------------------------- */
2711
2712 void
2713 performPendingThrowTos (StgTSO *threads)
2714 {
2715 StgTSO *tso, *next;
2716 Capability *cap;
2717 Task *task, *saved_task;;
2718 step *step;
2719
2720 task = myTask();
2721 cap = task->cap;
2722
2723 for (tso = threads; tso != END_TSO_QUEUE; tso = next) {
2724 next = tso->global_link;
2725
2726 step = Bdescr((P_)tso)->step;
2727 tso->global_link = step->threads;
2728 step->threads = tso;
2729
2730 debugTrace(DEBUG_sched, "performing blocked throwTo to thread %lu", (unsigned long)tso->id);
2731
2732 // We must pretend this Capability belongs to the current Task
2733 // for the time being, as invariants will be broken otherwise.
2734 // In fact the current Task has exclusive access to the systme
2735 // at this point, so this is just bookkeeping:
2736 task->cap = tso->cap;
2737 saved_task = tso->cap->running_task;
2738 tso->cap->running_task = task;
2739 maybePerformBlockedException(tso->cap, tso);
2740 tso->cap->running_task = saved_task;
2741 }
2742
2743 // Restore our original Capability:
2744 task->cap = cap;
2745 }