Close the races between throwTo and thread completion
[ghc.git] / rts / RaiseAsync.c
1 /* ---------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 1998-2006
4 *
5 * Asynchronous exceptions
6 *
7 * --------------------------------------------------------------------------*/
8
9 #include "PosixSource.h"
10 #include "Rts.h"
11 #include "Threads.h"
12 #include "Trace.h"
13 #include "RaiseAsync.h"
14 #include "SMP.h"
15 #include "Schedule.h"
16 #include "LdvProfile.h"
17 #include "Updates.h"
18 #include "STM.h"
19 #include "Sanity.h"
20 #include "Profiling.h"
21 #if defined(mingw32_HOST_OS)
22 #include "win32/IOManager.h"
23 #endif
24
25 static void raiseAsync (Capability *cap,
26 StgTSO *tso,
27 StgClosure *exception,
28 rtsBool stop_at_atomically,
29 StgUpdateFrame *stop_here);
30
31 static void removeFromQueues(Capability *cap, StgTSO *tso);
32
33 static void blockedThrowTo (Capability *cap, StgTSO *source, StgTSO *target);
34
35 static void performBlockedException (Capability *cap,
36 StgTSO *source, StgTSO *target);
37
38 /* -----------------------------------------------------------------------------
39 throwToSingleThreaded
40
41 This version of throwTo is safe to use if and only if one of the
42 following holds:
43
44 - !THREADED_RTS
45
46 - all the other threads in the system are stopped (eg. during GC).
47
48 - we surely own the target TSO (eg. we just took it from the
49 run queue of the current capability, or we are running it).
50
51 It doesn't cater for blocking the source thread until the exception
52 has been raised.
53 -------------------------------------------------------------------------- */
54
55 void
56 throwToSingleThreaded(Capability *cap, StgTSO *tso, StgClosure *exception)
57 {
58 throwToSingleThreaded_(cap, tso, exception, rtsFalse);
59 }
60
61 void
62 throwToSingleThreaded_(Capability *cap, StgTSO *tso, StgClosure *exception,
63 rtsBool stop_at_atomically)
64 {
65 // Thread already dead?
66 if (tso->what_next == ThreadComplete || tso->what_next == ThreadKilled) {
67 return;
68 }
69
70 // Remove it from any blocking queues
71 removeFromQueues(cap,tso);
72
73 raiseAsync(cap, tso, exception, stop_at_atomically, NULL);
74 }
75
76 void
77 suspendComputation(Capability *cap, StgTSO *tso, StgUpdateFrame *stop_here)
78 {
79 // Thread already dead?
80 if (tso->what_next == ThreadComplete || tso->what_next == ThreadKilled) {
81 return;
82 }
83
84 // Remove it from any blocking queues
85 removeFromQueues(cap,tso);
86
87 raiseAsync(cap, tso, NULL, rtsFalse, stop_here);
88 }
89
90 /* -----------------------------------------------------------------------------
91 throwTo
92
93 This function may be used to throw an exception from one thread to
94 another, during the course of normal execution. This is a tricky
95 task: the target thread might be running on another CPU, or it
96 may be blocked and could be woken up at any point by another CPU.
97 We have some delicate synchronisation to do.
98
99 There is a completely safe fallback scheme: it is always possible
100 to just block the source TSO on the target TSO's blocked_exceptions
101 queue. This queue is locked using lockTSO()/unlockTSO(). It is
102 checked at regular intervals: before and after running a thread
103 (schedule() and threadPaused() respectively), and just before GC
104 (scheduleDoGC()). Activating a thread on this queue should be done
105 using maybePerformBlockedException(): this is done in the context
106 of the target thread, so the exception can be raised eagerly.
107
108 This fallback scheme works even if the target thread is complete or
109 killed: scheduleDoGC() will discover the blocked thread before the
110 target is GC'd.
111
112 Blocking the source thread on the target thread's blocked_exception
113 queue is also employed when the target thread is currently blocking
114 exceptions (ie. inside Control.Exception.block).
115
116 We could use the safe fallback scheme exclusively, but that
117 wouldn't be ideal: most calls to throwTo would block immediately,
118 possibly until the next GC, which might require the deadlock
119 detection mechanism to kick in. So we try to provide promptness
120 wherever possible.
121
122 We can promptly deliver the exception if the target thread is:
123
124 - runnable, on the same Capability as the source thread (because
125 we own the run queue and therefore the target thread).
126
127 - blocked, and we can obtain exclusive access to it. Obtaining
128 exclusive access to the thread depends on how it is blocked.
129
130 We must also be careful to not trip over threadStackOverflow(),
131 which might be moving the TSO to enlarge its stack.
132 lockTSO()/unlockTSO() are used here too.
133
134 Returns:
135
136 THROWTO_SUCCESS exception was raised, ok to continue
137
138 THROWTO_BLOCKED exception was not raised; block the source
139 thread then call throwToReleaseTarget() when
140 the source thread is properly tidied away.
141
142 -------------------------------------------------------------------------- */
143
144 nat
145 throwTo (Capability *cap, // the Capability we hold
146 StgTSO *source, // the TSO sending the exception
147 StgTSO *target, // the TSO receiving the exception
148 StgClosure *exception, // the exception closure
149 /*[out]*/ void **out USED_IF_THREADS)
150 {
151 StgWord status;
152
153 // follow ThreadRelocated links in the target first
154 while (target->what_next == ThreadRelocated) {
155 target = target->_link;
156 // No, it might be a WHITEHOLE:
157 // ASSERT(get_itbl(target)->type == TSO);
158 }
159
160 debugTrace(DEBUG_sched, "throwTo: from thread %lu to thread %lu",
161 (unsigned long)source->id, (unsigned long)target->id);
162
163 #ifdef DEBUG
164 if (traceClass(DEBUG_sched)) {
165 debugTraceBegin("throwTo: target");
166 printThreadStatus(target);
167 debugTraceEnd();
168 }
169 #endif
170
171 goto check_target;
172 retry:
173 debugTrace(DEBUG_sched, "throwTo: retrying...");
174
175 check_target:
176 // Thread already dead?
177 if (target->what_next == ThreadComplete
178 || target->what_next == ThreadKilled) {
179 return THROWTO_SUCCESS;
180 }
181
182 status = target->why_blocked;
183
184 switch (status) {
185 case NotBlocked:
186 /* if status==NotBlocked, and target->cap == cap, then
187 we own this TSO and can raise the exception.
188
189 How do we establish this condition? Very carefully.
190
191 Let
192 P = (status == NotBlocked)
193 Q = (tso->cap == cap)
194
195 Now, if P & Q are true, then the TSO is locked and owned by
196 this capability. No other OS thread can steal it.
197
198 If P==0 and Q==1: the TSO is blocked, but attached to this
199 capabilty, and it can be stolen by another capability.
200
201 If P==1 and Q==0: the TSO is runnable on another
202 capability. At any time, the TSO may change from runnable
203 to blocked and vice versa, while it remains owned by
204 another capability.
205
206 Suppose we test like this:
207
208 p = P
209 q = Q
210 if (p && q) ...
211
212 this is defeated by another capability stealing a blocked
213 TSO from us to wake it up (Schedule.c:unblockOne()). The
214 other thread is doing
215
216 Q = 0
217 P = 1
218
219 assuming arbitrary reordering, we could see this
220 interleaving:
221
222 start: P==0 && Q==1
223 P = 1
224 p = P
225 q = Q
226 Q = 0
227 if (p && q) ...
228
229 so we need a memory barrier:
230
231 p = P
232 mb()
233 q = Q
234 if (p && q) ...
235
236 this avoids the problematic case. There are other cases
237 to consider, but this is the tricky one.
238
239 Note that we must be sure that unblockOne() does the
240 writes in the correct order: Q before P. The memory
241 barrier ensures that if we have seen the write to P, we
242 have also seen the write to Q.
243 */
244 {
245 Capability *target_cap;
246
247 write_barrier();
248 target_cap = target->cap;
249 if (target_cap == cap && (target->flags & TSO_BLOCKEX) == 0) {
250 // It's on our run queue and not blocking exceptions
251 raiseAsync(cap, target, exception, rtsFalse, NULL);
252 return THROWTO_SUCCESS;
253 } else {
254 // Otherwise, just block on the blocked_exceptions queue
255 // of the target thread. The queue will get looked at
256 // soon enough: it is checked before and after running a
257 // thread, and during GC.
258 lockTSO(target);
259
260 // Avoid race with threadStackOverflow, which may have
261 // just moved this TSO.
262 if (target->what_next == ThreadRelocated) {
263 unlockTSO(target);
264 target = target->_link;
265 goto retry;
266 }
267 // check again for ThreadComplete and ThreadKilled. This
268 // cooperates with scheduleHandleThreadFinished to ensure
269 // that we never miss any threads that are throwing an
270 // exception to a thread in the process of terminating.
271 if (target->what_next == ThreadComplete
272 || target->what_next == ThreadKilled) {
273 unlockTSO(target);
274 return THROWTO_SUCCESS;
275 }
276 blockedThrowTo(cap,source,target);
277 *out = target;
278 return THROWTO_BLOCKED;
279 }
280 }
281
282 case BlockedOnMVar:
283 {
284 /*
285 To establish ownership of this TSO, we need to acquire a
286 lock on the MVar that it is blocked on.
287 */
288 StgMVar *mvar;
289 StgInfoTable *info USED_IF_THREADS;
290
291 mvar = (StgMVar *)target->block_info.closure;
292
293 // ASSUMPTION: tso->block_info must always point to a
294 // closure. In the threaded RTS it does.
295 switch (get_itbl(mvar)->type) {
296 case MVAR_CLEAN:
297 case MVAR_DIRTY:
298 break;
299 default:
300 goto retry;
301 }
302
303 info = lockClosure((StgClosure *)mvar);
304
305 if (target->what_next == ThreadRelocated) {
306 target = target->_link;
307 unlockClosure((StgClosure *)mvar,info);
308 goto retry;
309 }
310 // we have the MVar, let's check whether the thread
311 // is still blocked on the same MVar.
312 if (target->why_blocked != BlockedOnMVar
313 || (StgMVar *)target->block_info.closure != mvar) {
314 unlockClosure((StgClosure *)mvar, info);
315 goto retry;
316 }
317
318 if ((target->flags & TSO_BLOCKEX) &&
319 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
320 lockClosure((StgClosure *)target);
321 blockedThrowTo(cap,source,target);
322 unlockClosure((StgClosure *)mvar, info);
323 *out = target;
324 return THROWTO_BLOCKED; // caller releases TSO
325 } else {
326 removeThreadFromMVarQueue(cap, mvar, target);
327 raiseAsync(cap, target, exception, rtsFalse, NULL);
328 unblockOne(cap, target);
329 unlockClosure((StgClosure *)mvar, info);
330 return THROWTO_SUCCESS;
331 }
332 }
333
334 case BlockedOnBlackHole:
335 {
336 ACQUIRE_LOCK(&sched_mutex);
337 // double checking the status after the memory barrier:
338 if (target->why_blocked != BlockedOnBlackHole) {
339 RELEASE_LOCK(&sched_mutex);
340 goto retry;
341 }
342
343 if (target->flags & TSO_BLOCKEX) {
344 lockTSO(target);
345 blockedThrowTo(cap,source,target);
346 RELEASE_LOCK(&sched_mutex);
347 *out = target;
348 return THROWTO_BLOCKED; // caller releases TSO
349 } else {
350 removeThreadFromQueue(cap, &blackhole_queue, target);
351 raiseAsync(cap, target, exception, rtsFalse, NULL);
352 unblockOne(cap, target);
353 RELEASE_LOCK(&sched_mutex);
354 return THROWTO_SUCCESS;
355 }
356 }
357
358 case BlockedOnException:
359 {
360 StgTSO *target2;
361 StgInfoTable *info;
362
363 /*
364 To obtain exclusive access to a BlockedOnException thread,
365 we must call lockClosure() on the TSO on which it is blocked.
366 Since the TSO might change underneath our feet, after we
367 call lockClosure() we must check that
368
369 (a) the closure we locked is actually a TSO
370 (b) the original thread is still BlockedOnException,
371 (c) the original thread is still blocked on the TSO we locked
372 and (d) the target thread has not been relocated.
373
374 We synchronise with threadStackOverflow() (which relocates
375 threads) using lockClosure()/unlockClosure().
376 */
377 target2 = target->block_info.tso;
378
379 info = lockClosure((StgClosure *)target2);
380 if (info != &stg_TSO_info) {
381 unlockClosure((StgClosure *)target2, info);
382 goto retry;
383 }
384 if (target->what_next == ThreadRelocated) {
385 target = target->_link;
386 unlockTSO(target2);
387 goto retry;
388 }
389 if (target2->what_next == ThreadRelocated) {
390 target->block_info.tso = target2->_link;
391 unlockTSO(target2);
392 goto retry;
393 }
394 if (target->why_blocked != BlockedOnException
395 || target->block_info.tso != target2) {
396 unlockTSO(target2);
397 goto retry;
398 }
399
400 /*
401 Now we have exclusive rights to the target TSO...
402
403 If it is blocking exceptions, add the source TSO to its
404 blocked_exceptions queue. Otherwise, raise the exception.
405 */
406 if ((target->flags & TSO_BLOCKEX) &&
407 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
408 lockTSO(target);
409 blockedThrowTo(cap,source,target);
410 unlockTSO(target2);
411 *out = target;
412 return THROWTO_BLOCKED;
413 } else {
414 removeThreadFromQueue(cap, &target2->blocked_exceptions, target);
415 raiseAsync(cap, target, exception, rtsFalse, NULL);
416 unblockOne(cap, target);
417 unlockTSO(target2);
418 return THROWTO_SUCCESS;
419 }
420 }
421
422 case BlockedOnSTM:
423 lockTSO(target);
424 // Unblocking BlockedOnSTM threads requires the TSO to be
425 // locked; see STM.c:unpark_tso().
426 if (target->why_blocked != BlockedOnSTM) {
427 unlockTSO(target);
428 goto retry;
429 }
430 if ((target->flags & TSO_BLOCKEX) &&
431 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
432 blockedThrowTo(cap,source,target);
433 *out = target;
434 return THROWTO_BLOCKED;
435 } else {
436 raiseAsync(cap, target, exception, rtsFalse, NULL);
437 unblockOne(cap, target);
438 unlockTSO(target);
439 return THROWTO_SUCCESS;
440 }
441
442 case BlockedOnCCall:
443 case BlockedOnCCall_NoUnblockExc:
444 // I don't think it's possible to acquire ownership of a
445 // BlockedOnCCall thread. We just assume that the target
446 // thread is blocking exceptions, and block on its
447 // blocked_exception queue.
448 lockTSO(target);
449 if (target->why_blocked != BlockedOnCCall &&
450 target->why_blocked != BlockedOnCCall_NoUnblockExc) {
451 unlockTSO(target);
452 goto retry;
453 }
454 blockedThrowTo(cap,source,target);
455 *out = target;
456 return THROWTO_BLOCKED;
457
458 #ifndef THREADEDED_RTS
459 case BlockedOnRead:
460 case BlockedOnWrite:
461 case BlockedOnDelay:
462 #if defined(mingw32_HOST_OS)
463 case BlockedOnDoProc:
464 #endif
465 if ((target->flags & TSO_BLOCKEX) &&
466 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
467 blockedThrowTo(cap,source,target);
468 return THROWTO_BLOCKED;
469 } else {
470 removeFromQueues(cap,target);
471 raiseAsync(cap, target, exception, rtsFalse, NULL);
472 return THROWTO_SUCCESS;
473 }
474 #endif
475
476 default:
477 barf("throwTo: unrecognised why_blocked value");
478 }
479 barf("throwTo");
480 }
481
482 // Block a TSO on another TSO's blocked_exceptions queue.
483 // Precondition: we hold an exclusive lock on the target TSO (this is
484 // complex to achieve as there's no single lock on a TSO; see
485 // throwTo()).
486 static void
487 blockedThrowTo (Capability *cap, StgTSO *source, StgTSO *target)
488 {
489 debugTrace(DEBUG_sched, "throwTo: blocking on thread %lu", (unsigned long)target->id);
490 setTSOLink(cap, source, target->blocked_exceptions);
491 target->blocked_exceptions = source;
492 dirty_TSO(cap,target); // we modified the blocked_exceptions queue
493
494 source->block_info.tso = target;
495 write_barrier(); // throwTo_exception *must* be visible if BlockedOnException is.
496 source->why_blocked = BlockedOnException;
497 }
498
499
500 #ifdef THREADED_RTS
501 void
502 throwToReleaseTarget (void *tso)
503 {
504 unlockTSO((StgTSO *)tso);
505 }
506 #endif
507
508 /* -----------------------------------------------------------------------------
509 Waking up threads blocked in throwTo
510
511 There are two ways to do this: maybePerformBlockedException() will
512 perform the throwTo() for the thread at the head of the queue
513 immediately, and leave the other threads on the queue.
514 maybePerformBlockedException() also checks the TSO_BLOCKEX flag
515 before raising an exception.
516
517 awakenBlockedExceptionQueue() will wake up all the threads in the
518 queue, but not perform any throwTo() immediately. This might be
519 more appropriate when the target thread is the one actually running
520 (see Exception.cmm).
521
522 Returns: non-zero if an exception was raised, zero otherwise.
523 -------------------------------------------------------------------------- */
524
525 int
526 maybePerformBlockedException (Capability *cap, StgTSO *tso)
527 {
528 StgTSO *source;
529
530 if (tso->what_next == ThreadComplete || tso->what_next == ThreadFinished) {
531 if (tso->blocked_exceptions != END_TSO_QUEUE) {
532 awakenBlockedExceptionQueue(cap,tso);
533 return 1;
534 } else {
535 return 0;
536 }
537 }
538
539 if (tso->blocked_exceptions != END_TSO_QUEUE &&
540 (tso->flags & TSO_BLOCKEX) != 0) {
541 debugTrace(DEBUG_sched, "throwTo: thread %lu has blocked exceptions but is inside block", (unsigned long)tso->id);
542 }
543
544 if (tso->blocked_exceptions != END_TSO_QUEUE
545 && ((tso->flags & TSO_BLOCKEX) == 0
546 || ((tso->flags & TSO_INTERRUPTIBLE) && interruptible(tso)))) {
547
548 // Lock the TSO, this gives us exclusive access to the queue
549 lockTSO(tso);
550
551 // Check the queue again; it might have changed before we
552 // locked it.
553 if (tso->blocked_exceptions == END_TSO_QUEUE) {
554 unlockTSO(tso);
555 return 0;
556 }
557
558 // We unblock just the first thread on the queue, and perform
559 // its throw immediately.
560 source = tso->blocked_exceptions;
561 performBlockedException(cap, source, tso);
562 tso->blocked_exceptions = unblockOne_(cap, source,
563 rtsFalse/*no migrate*/);
564 unlockTSO(tso);
565 return 1;
566 }
567 return 0;
568 }
569
570 // awakenBlockedExceptionQueue(): Just wake up the whole queue of
571 // blocked exceptions and let them try again.
572
573 void
574 awakenBlockedExceptionQueue (Capability *cap, StgTSO *tso)
575 {
576 lockTSO(tso);
577 awakenBlockedQueue(cap, tso->blocked_exceptions);
578 tso->blocked_exceptions = END_TSO_QUEUE;
579 unlockTSO(tso);
580 }
581
582 static void
583 performBlockedException (Capability *cap, StgTSO *source, StgTSO *target)
584 {
585 StgClosure *exception;
586
587 ASSERT(source->why_blocked == BlockedOnException);
588 ASSERT(source->block_info.tso->id == target->id);
589 ASSERT(source->sp[0] == (StgWord)&stg_block_throwto_info);
590 ASSERT(((StgTSO *)source->sp[1])->id == target->id);
591 // check ids not pointers, because the thread might be relocated
592
593 exception = (StgClosure *)source->sp[2];
594 throwToSingleThreaded(cap, target, exception);
595 source->sp += 3;
596 }
597
598 /* -----------------------------------------------------------------------------
599 Remove a thread from blocking queues.
600
601 This is for use when we raise an exception in another thread, which
602 may be blocked.
603
604 Precondition: we have exclusive access to the TSO, via the same set
605 of conditions as throwToSingleThreaded() (c.f.).
606 -------------------------------------------------------------------------- */
607
608 static void
609 removeFromQueues(Capability *cap, StgTSO *tso)
610 {
611 switch (tso->why_blocked) {
612
613 case NotBlocked:
614 return;
615
616 case BlockedOnSTM:
617 // Be careful: nothing to do here! We tell the scheduler that the
618 // thread is runnable and we leave it to the stack-walking code to
619 // abort the transaction while unwinding the stack. We should
620 // perhaps have a debugging test to make sure that this really
621 // happens and that the 'zombie' transaction does not get
622 // committed.
623 goto done;
624
625 case BlockedOnMVar:
626 removeThreadFromMVarQueue(cap, (StgMVar *)tso->block_info.closure, tso);
627 goto done;
628
629 case BlockedOnBlackHole:
630 removeThreadFromQueue(cap, &blackhole_queue, tso);
631 goto done;
632
633 case BlockedOnException:
634 {
635 StgTSO *target = tso->block_info.tso;
636
637 // NO: when called by threadPaused(), we probably have this
638 // TSO already locked (WHITEHOLEd) because we just placed
639 // ourselves on its queue.
640 // ASSERT(get_itbl(target)->type == TSO);
641
642 while (target->what_next == ThreadRelocated) {
643 target = target->_link;
644 }
645
646 removeThreadFromQueue(cap, &target->blocked_exceptions, tso);
647 goto done;
648 }
649
650 #if !defined(THREADED_RTS)
651 case BlockedOnRead:
652 case BlockedOnWrite:
653 #if defined(mingw32_HOST_OS)
654 case BlockedOnDoProc:
655 #endif
656 removeThreadFromDeQueue(cap, &blocked_queue_hd, &blocked_queue_tl, tso);
657 #if defined(mingw32_HOST_OS)
658 /* (Cooperatively) signal that the worker thread should abort
659 * the request.
660 */
661 abandonWorkRequest(tso->block_info.async_result->reqID);
662 #endif
663 goto done;
664
665 case BlockedOnDelay:
666 removeThreadFromQueue(cap, &sleeping_queue, tso);
667 goto done;
668 #endif
669
670 default:
671 barf("removeFromQueues: %d", tso->why_blocked);
672 }
673
674 done:
675 tso->_link = END_TSO_QUEUE; // no write barrier reqd
676 tso->why_blocked = NotBlocked;
677 tso->block_info.closure = NULL;
678 appendToRunQueue(cap,tso);
679
680 // We might have just migrated this TSO to our Capability:
681 if (tso->bound) {
682 tso->bound->cap = cap;
683 }
684 tso->cap = cap;
685 }
686
687 /* -----------------------------------------------------------------------------
688 * raiseAsync()
689 *
690 * The following function implements the magic for raising an
691 * asynchronous exception in an existing thread.
692 *
693 * We first remove the thread from any queue on which it might be
694 * blocked. The possible blockages are MVARs and BLACKHOLE_BQs.
695 *
696 * We strip the stack down to the innermost CATCH_FRAME, building
697 * thunks in the heap for all the active computations, so they can
698 * be restarted if necessary. When we reach a CATCH_FRAME, we build
699 * an application of the handler to the exception, and push it on
700 * the top of the stack.
701 *
702 * How exactly do we save all the active computations? We create an
703 * AP_STACK for every UpdateFrame on the stack. Entering one of these
704 * AP_STACKs pushes everything from the corresponding update frame
705 * upwards onto the stack. (Actually, it pushes everything up to the
706 * next update frame plus a pointer to the next AP_STACK object.
707 * Entering the next AP_STACK object pushes more onto the stack until we
708 * reach the last AP_STACK object - at which point the stack should look
709 * exactly as it did when we killed the TSO and we can continue
710 * execution by entering the closure on top of the stack.
711 *
712 * We can also kill a thread entirely - this happens if either (a) the
713 * exception passed to raiseAsync is NULL, or (b) there's no
714 * CATCH_FRAME on the stack. In either case, we strip the entire
715 * stack and replace the thread with a zombie.
716 *
717 * ToDo: in THREADED_RTS mode, this function is only safe if either
718 * (a) we hold all the Capabilities (eg. in GC, or if there is only
719 * one Capability), or (b) we own the Capability that the TSO is
720 * currently blocked on or on the run queue of.
721 *
722 * -------------------------------------------------------------------------- */
723
724 static void
725 raiseAsync(Capability *cap, StgTSO *tso, StgClosure *exception,
726 rtsBool stop_at_atomically, StgUpdateFrame *stop_here)
727 {
728 StgRetInfoTable *info;
729 StgPtr sp, frame;
730 StgClosure *updatee;
731 nat i;
732
733 debugTrace(DEBUG_sched,
734 "raising exception in thread %ld.", (long)tso->id);
735
736 #if defined(PROFILING)
737 /*
738 * Debugging tool: on raising an exception, show where we are.
739 * See also Exception.cmm:raisezh_fast.
740 * This wasn't done for asynchronous exceptions originally; see #1450
741 */
742 if (RtsFlags.ProfFlags.showCCSOnException)
743 {
744 fprintCCS_stderr(tso->prof.CCCS);
745 }
746 #endif
747
748 // mark it dirty; we're about to change its stack.
749 dirty_TSO(cap, tso);
750
751 sp = tso->sp;
752
753 // ASSUMES: the thread is not already complete or dead. Upper
754 // layers should deal with that.
755 ASSERT(tso->what_next != ThreadComplete && tso->what_next != ThreadKilled);
756
757 if (stop_here != NULL) {
758 updatee = stop_here->updatee;
759 } else {
760 updatee = NULL;
761 }
762
763 // The stack freezing code assumes there's a closure pointer on
764 // the top of the stack, so we have to arrange that this is the case...
765 //
766 if (sp[0] == (W_)&stg_enter_info) {
767 sp++;
768 } else {
769 sp--;
770 sp[0] = (W_)&stg_dummy_ret_closure;
771 }
772
773 frame = sp + 1;
774 while (stop_here == NULL || frame < (StgPtr)stop_here) {
775
776 // 1. Let the top of the stack be the "current closure"
777 //
778 // 2. Walk up the stack until we find either an UPDATE_FRAME or a
779 // CATCH_FRAME.
780 //
781 // 3. If it's an UPDATE_FRAME, then make an AP_STACK containing the
782 // current closure applied to the chunk of stack up to (but not
783 // including) the update frame. This closure becomes the "current
784 // closure". Go back to step 2.
785 //
786 // 4. If it's a CATCH_FRAME, then leave the exception handler on
787 // top of the stack applied to the exception.
788 //
789 // 5. If it's a STOP_FRAME, then kill the thread.
790 //
791 // NB: if we pass an ATOMICALLY_FRAME then abort the associated
792 // transaction
793
794 info = get_ret_itbl((StgClosure *)frame);
795
796 switch (info->i.type) {
797
798 case UPDATE_FRAME:
799 {
800 StgAP_STACK * ap;
801 nat words;
802
803 // First build an AP_STACK consisting of the stack chunk above the
804 // current update frame, with the top word on the stack as the
805 // fun field.
806 //
807 words = frame - sp - 1;
808 ap = (StgAP_STACK *)allocateLocal(cap,AP_STACK_sizeW(words));
809
810 ap->size = words;
811 ap->fun = (StgClosure *)sp[0];
812 sp++;
813 for(i=0; i < (nat)words; ++i) {
814 ap->payload[i] = (StgClosure *)*sp++;
815 }
816
817 SET_HDR(ap,&stg_AP_STACK_info,
818 ((StgClosure *)frame)->header.prof.ccs /* ToDo */);
819 TICK_ALLOC_UP_THK(words+1,0);
820
821 //IF_DEBUG(scheduler,
822 // debugBelch("sched: Updating ");
823 // printPtr((P_)((StgUpdateFrame *)frame)->updatee);
824 // debugBelch(" with ");
825 // printObj((StgClosure *)ap);
826 // );
827
828 if (((StgUpdateFrame *)frame)->updatee == updatee) {
829 // If this update frame points to the same closure as
830 // the update frame further down the stack
831 // (stop_here), then don't perform the update. We
832 // want to keep the blackhole in this case, so we can
833 // detect and report the loop (#2783).
834 ap = (StgAP_STACK*)updatee;
835 } else {
836 // Perform the update
837 // TODO: this may waste some work, if the thunk has
838 // already been updated by another thread.
839 UPD_IND(((StgUpdateFrame *)frame)->updatee, (StgClosure *)ap);
840 }
841
842 sp += sizeofW(StgUpdateFrame) - 1;
843 sp[0] = (W_)ap; // push onto stack
844 frame = sp + 1;
845 continue; //no need to bump frame
846 }
847
848 case STOP_FRAME:
849 {
850 // We've stripped the entire stack, the thread is now dead.
851 tso->what_next = ThreadKilled;
852 tso->sp = frame + sizeofW(StgStopFrame);
853 return;
854 }
855
856 case CATCH_FRAME:
857 // If we find a CATCH_FRAME, and we've got an exception to raise,
858 // then build the THUNK raise(exception), and leave it on
859 // top of the CATCH_FRAME ready to enter.
860 //
861 {
862 #ifdef PROFILING
863 StgCatchFrame *cf = (StgCatchFrame *)frame;
864 #endif
865 StgThunk *raise;
866
867 if (exception == NULL) break;
868
869 // we've got an exception to raise, so let's pass it to the
870 // handler in this frame.
871 //
872 raise = (StgThunk *)allocateLocal(cap,sizeofW(StgThunk)+1);
873 TICK_ALLOC_SE_THK(1,0);
874 SET_HDR(raise,&stg_raise_info,cf->header.prof.ccs);
875 raise->payload[0] = exception;
876
877 // throw away the stack from Sp up to the CATCH_FRAME.
878 //
879 sp = frame - 1;
880
881 /* Ensure that async excpetions are blocked now, so we don't get
882 * a surprise exception before we get around to executing the
883 * handler.
884 */
885 tso->flags |= TSO_BLOCKEX | TSO_INTERRUPTIBLE;
886
887 /* Put the newly-built THUNK on top of the stack, ready to execute
888 * when the thread restarts.
889 */
890 sp[0] = (W_)raise;
891 sp[-1] = (W_)&stg_enter_info;
892 tso->sp = sp-1;
893 tso->what_next = ThreadRunGHC;
894 IF_DEBUG(sanity, checkTSO(tso));
895 return;
896 }
897
898 case ATOMICALLY_FRAME:
899 if (stop_at_atomically) {
900 ASSERT(stmGetEnclosingTRec(tso->trec) == NO_TREC);
901 stmCondemnTransaction(cap, tso -> trec);
902 tso->sp = frame;
903 tso->what_next = ThreadRunGHC;
904 return;
905 }
906 // Not stop_at_atomically... fall through and abort the
907 // transaction.
908
909 case CATCH_STM_FRAME:
910 case CATCH_RETRY_FRAME:
911 // IF we find an ATOMICALLY_FRAME then we abort the
912 // current transaction and propagate the exception. In
913 // this case (unlike ordinary exceptions) we do not care
914 // whether the transaction is valid or not because its
915 // possible validity cannot have caused the exception
916 // and will not be visible after the abort.
917
918 {
919 StgTRecHeader *trec = tso -> trec;
920 StgTRecHeader *outer = stmGetEnclosingTRec(trec);
921 debugTrace(DEBUG_stm,
922 "found atomically block delivering async exception");
923 stmAbortTransaction(cap, trec);
924 stmFreeAbortedTRec(cap, trec);
925 tso -> trec = outer;
926 break;
927 };
928
929 default:
930 break;
931 }
932
933 // move on to the next stack frame
934 frame += stack_frame_sizeW((StgClosure *)frame);
935 }
936
937 // if we got here, then we stopped at stop_here
938 ASSERT(stop_here != NULL);
939 }
940
941