Fix two more locking issues in throwTo()
[ghc.git] / rts / RaiseAsync.c
1 /* ---------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 1998-2006
4 *
5 * Asynchronous exceptions
6 *
7 * --------------------------------------------------------------------------*/
8
9 #include "PosixSource.h"
10 #include "Rts.h"
11 #include "Threads.h"
12 #include "Trace.h"
13 #include "RaiseAsync.h"
14 #include "SMP.h"
15 #include "Schedule.h"
16 #include "LdvProfile.h"
17 #include "Updates.h"
18 #include "STM.h"
19 #include "Sanity.h"
20 #include "Profiling.h"
21 #if defined(mingw32_HOST_OS)
22 #include "win32/IOManager.h"
23 #endif
24
25 static void raiseAsync (Capability *cap,
26 StgTSO *tso,
27 StgClosure *exception,
28 rtsBool stop_at_atomically,
29 StgUpdateFrame *stop_here);
30
31 static void removeFromQueues(Capability *cap, StgTSO *tso);
32
33 static void blockedThrowTo (Capability *cap, StgTSO *source, StgTSO *target);
34
35 static void performBlockedException (Capability *cap,
36 StgTSO *source, StgTSO *target);
37
38 /* -----------------------------------------------------------------------------
39 throwToSingleThreaded
40
41 This version of throwTo is safe to use if and only if one of the
42 following holds:
43
44 - !THREADED_RTS
45
46 - all the other threads in the system are stopped (eg. during GC).
47
48 - we surely own the target TSO (eg. we just took it from the
49 run queue of the current capability, or we are running it).
50
51 It doesn't cater for blocking the source thread until the exception
52 has been raised.
53 -------------------------------------------------------------------------- */
54
55 void
56 throwToSingleThreaded(Capability *cap, StgTSO *tso, StgClosure *exception)
57 {
58 throwToSingleThreaded_(cap, tso, exception, rtsFalse);
59 }
60
61 void
62 throwToSingleThreaded_(Capability *cap, StgTSO *tso, StgClosure *exception,
63 rtsBool stop_at_atomically)
64 {
65 // Thread already dead?
66 if (tso->what_next == ThreadComplete || tso->what_next == ThreadKilled) {
67 return;
68 }
69
70 // Remove it from any blocking queues
71 removeFromQueues(cap,tso);
72
73 raiseAsync(cap, tso, exception, stop_at_atomically, NULL);
74 }
75
76 void
77 suspendComputation(Capability *cap, StgTSO *tso, StgUpdateFrame *stop_here)
78 {
79 // Thread already dead?
80 if (tso->what_next == ThreadComplete || tso->what_next == ThreadKilled) {
81 return;
82 }
83
84 // Remove it from any blocking queues
85 removeFromQueues(cap,tso);
86
87 raiseAsync(cap, tso, NULL, rtsFalse, stop_here);
88 }
89
90 /* -----------------------------------------------------------------------------
91 throwTo
92
93 This function may be used to throw an exception from one thread to
94 another, during the course of normal execution. This is a tricky
95 task: the target thread might be running on another CPU, or it
96 may be blocked and could be woken up at any point by another CPU.
97 We have some delicate synchronisation to do.
98
99 There is a completely safe fallback scheme: it is always possible
100 to just block the source TSO on the target TSO's blocked_exceptions
101 queue. This queue is locked using lockTSO()/unlockTSO(). It is
102 checked at regular intervals: before and after running a thread
103 (schedule() and threadPaused() respectively), and just before GC
104 (scheduleDoGC()). Activating a thread on this queue should be done
105 using maybePerformBlockedException(): this is done in the context
106 of the target thread, so the exception can be raised eagerly.
107
108 This fallback scheme works even if the target thread is complete or
109 killed: scheduleDoGC() will discover the blocked thread before the
110 target is GC'd.
111
112 Blocking the source thread on the target thread's blocked_exception
113 queue is also employed when the target thread is currently blocking
114 exceptions (ie. inside Control.Exception.block).
115
116 We could use the safe fallback scheme exclusively, but that
117 wouldn't be ideal: most calls to throwTo would block immediately,
118 possibly until the next GC, which might require the deadlock
119 detection mechanism to kick in. So we try to provide promptness
120 wherever possible.
121
122 We can promptly deliver the exception if the target thread is:
123
124 - runnable, on the same Capability as the source thread (because
125 we own the run queue and therefore the target thread).
126
127 - blocked, and we can obtain exclusive access to it. Obtaining
128 exclusive access to the thread depends on how it is blocked.
129
130 We must also be careful to not trip over threadStackOverflow(),
131 which might be moving the TSO to enlarge its stack.
132 lockTSO()/unlockTSO() are used here too.
133
134 Returns:
135
136 THROWTO_SUCCESS exception was raised, ok to continue
137
138 THROWTO_BLOCKED exception was not raised; block the source
139 thread then call throwToReleaseTarget() when
140 the source thread is properly tidied away.
141
142 -------------------------------------------------------------------------- */
143
144 nat
145 throwTo (Capability *cap, // the Capability we hold
146 StgTSO *source, // the TSO sending the exception
147 StgTSO *target, // the TSO receiving the exception
148 StgClosure *exception, // the exception closure
149 /*[out]*/ void **out USED_IF_THREADS)
150 {
151 StgWord status;
152
153 // follow ThreadRelocated links in the target first
154 while (target->what_next == ThreadRelocated) {
155 target = target->_link;
156 // No, it might be a WHITEHOLE:
157 // ASSERT(get_itbl(target)->type == TSO);
158 }
159
160 debugTrace(DEBUG_sched, "throwTo: from thread %lu to thread %lu",
161 (unsigned long)source->id, (unsigned long)target->id);
162
163 #ifdef DEBUG
164 if (traceClass(DEBUG_sched)) {
165 debugTraceBegin("throwTo: target");
166 printThreadStatus(target);
167 debugTraceEnd();
168 }
169 #endif
170
171 goto check_target;
172 retry:
173 debugTrace(DEBUG_sched, "throwTo: retrying...");
174
175 check_target:
176 // Thread already dead?
177 if (target->what_next == ThreadComplete
178 || target->what_next == ThreadKilled) {
179 return THROWTO_SUCCESS;
180 }
181
182 status = target->why_blocked;
183
184 switch (status) {
185 case NotBlocked:
186 /* if status==NotBlocked, and target->cap == cap, then
187 we own this TSO and can raise the exception.
188
189 How do we establish this condition? Very carefully.
190
191 Let
192 P = (status == NotBlocked)
193 Q = (tso->cap == cap)
194
195 Now, if P & Q are true, then the TSO is locked and owned by
196 this capability. No other OS thread can steal it.
197
198 If P==0 and Q==1: the TSO is blocked, but attached to this
199 capabilty, and it can be stolen by another capability.
200
201 If P==1 and Q==0: the TSO is runnable on another
202 capability. At any time, the TSO may change from runnable
203 to blocked and vice versa, while it remains owned by
204 another capability.
205
206 Suppose we test like this:
207
208 p = P
209 q = Q
210 if (p && q) ...
211
212 this is defeated by another capability stealing a blocked
213 TSO from us to wake it up (Schedule.c:unblockOne()). The
214 other thread is doing
215
216 Q = 0
217 P = 1
218
219 assuming arbitrary reordering, we could see this
220 interleaving:
221
222 start: P==0 && Q==1
223 P = 1
224 p = P
225 q = Q
226 Q = 0
227 if (p && q) ...
228
229 so we need a memory barrier:
230
231 p = P
232 mb()
233 q = Q
234 if (p && q) ...
235
236 this avoids the problematic case. There are other cases
237 to consider, but this is the tricky one.
238
239 Note that we must be sure that unblockOne() does the
240 writes in the correct order: Q before P. The memory
241 barrier ensures that if we have seen the write to P, we
242 have also seen the write to Q.
243 */
244 {
245 Capability *target_cap;
246
247 write_barrier();
248 target_cap = target->cap;
249 if (target_cap == cap && (target->flags & TSO_BLOCKEX) == 0) {
250 // It's on our run queue and not blocking exceptions
251 raiseAsync(cap, target, exception, rtsFalse, NULL);
252 return THROWTO_SUCCESS;
253 } else {
254 // Otherwise, just block on the blocked_exceptions queue
255 // of the target thread. The queue will get looked at
256 // soon enough: it is checked before and after running a
257 // thread, and during GC.
258 lockTSO(target);
259
260 // Avoid race with threadStackOverflow, which may have
261 // just moved this TSO.
262 if (target->what_next == ThreadRelocated) {
263 unlockTSO(target);
264 target = target->_link;
265 goto retry;
266 }
267 blockedThrowTo(cap,source,target);
268 *out = target;
269 return THROWTO_BLOCKED;
270 }
271 }
272
273 case BlockedOnMVar:
274 {
275 /*
276 To establish ownership of this TSO, we need to acquire a
277 lock on the MVar that it is blocked on.
278 */
279 StgMVar *mvar;
280 StgInfoTable *info USED_IF_THREADS;
281
282 mvar = (StgMVar *)target->block_info.closure;
283
284 // ASSUMPTION: tso->block_info must always point to a
285 // closure. In the threaded RTS it does.
286 switch (get_itbl(mvar)->type) {
287 case MVAR_CLEAN:
288 case MVAR_DIRTY:
289 break;
290 default:
291 goto retry;
292 }
293
294 info = lockClosure((StgClosure *)mvar);
295
296 if (target->what_next == ThreadRelocated) {
297 target = target->_link;
298 unlockClosure((StgClosure *)mvar,info);
299 goto retry;
300 }
301 // we have the MVar, let's check whether the thread
302 // is still blocked on the same MVar.
303 if (target->why_blocked != BlockedOnMVar
304 || (StgMVar *)target->block_info.closure != mvar) {
305 unlockClosure((StgClosure *)mvar, info);
306 goto retry;
307 }
308
309 if ((target->flags & TSO_BLOCKEX) &&
310 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
311 lockClosure((StgClosure *)target);
312 blockedThrowTo(cap,source,target);
313 unlockClosure((StgClosure *)mvar, info);
314 *out = target;
315 return THROWTO_BLOCKED; // caller releases TSO
316 } else {
317 removeThreadFromMVarQueue(cap, mvar, target);
318 raiseAsync(cap, target, exception, rtsFalse, NULL);
319 unblockOne(cap, target);
320 unlockClosure((StgClosure *)mvar, info);
321 return THROWTO_SUCCESS;
322 }
323 }
324
325 case BlockedOnBlackHole:
326 {
327 ACQUIRE_LOCK(&sched_mutex);
328 // double checking the status after the memory barrier:
329 if (target->why_blocked != BlockedOnBlackHole) {
330 RELEASE_LOCK(&sched_mutex);
331 goto retry;
332 }
333
334 if (target->flags & TSO_BLOCKEX) {
335 lockTSO(target);
336 blockedThrowTo(cap,source,target);
337 RELEASE_LOCK(&sched_mutex);
338 *out = target;
339 return THROWTO_BLOCKED; // caller releases TSO
340 } else {
341 removeThreadFromQueue(cap, &blackhole_queue, target);
342 raiseAsync(cap, target, exception, rtsFalse, NULL);
343 unblockOne(cap, target);
344 RELEASE_LOCK(&sched_mutex);
345 return THROWTO_SUCCESS;
346 }
347 }
348
349 case BlockedOnException:
350 {
351 StgTSO *target2;
352 StgInfoTable *info;
353
354 /*
355 To obtain exclusive access to a BlockedOnException thread,
356 we must call lockClosure() on the TSO on which it is blocked.
357 Since the TSO might change underneath our feet, after we
358 call lockClosure() we must check that
359
360 (a) the closure we locked is actually a TSO
361 (b) the original thread is still BlockedOnException,
362 (c) the original thread is still blocked on the TSO we locked
363 and (d) the target thread has not been relocated.
364
365 We synchronise with threadStackOverflow() (which relocates
366 threads) using lockClosure()/unlockClosure().
367 */
368 target2 = target->block_info.tso;
369
370 info = lockClosure((StgClosure *)target2);
371 if (info != &stg_TSO_info) {
372 unlockClosure((StgClosure *)target2, info);
373 goto retry;
374 }
375 if (target->what_next == ThreadRelocated) {
376 target = target->_link;
377 unlockTSO(target2);
378 goto retry;
379 }
380 if (target2->what_next == ThreadRelocated) {
381 target->block_info.tso = target2->_link;
382 unlockTSO(target2);
383 goto retry;
384 }
385 if (target->why_blocked != BlockedOnException
386 || target->block_info.tso != target2) {
387 unlockTSO(target2);
388 goto retry;
389 }
390
391 /*
392 Now we have exclusive rights to the target TSO...
393
394 If it is blocking exceptions, add the source TSO to its
395 blocked_exceptions queue. Otherwise, raise the exception.
396 */
397 if ((target->flags & TSO_BLOCKEX) &&
398 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
399 lockTSO(target);
400 blockedThrowTo(cap,source,target);
401 unlockTSO(target2);
402 *out = target;
403 return THROWTO_BLOCKED;
404 } else {
405 removeThreadFromQueue(cap, &target2->blocked_exceptions, target);
406 raiseAsync(cap, target, exception, rtsFalse, NULL);
407 unblockOne(cap, target);
408 unlockTSO(target2);
409 return THROWTO_SUCCESS;
410 }
411 }
412
413 case BlockedOnSTM:
414 lockTSO(target);
415 // Unblocking BlockedOnSTM threads requires the TSO to be
416 // locked; see STM.c:unpark_tso().
417 if (target->why_blocked != BlockedOnSTM) {
418 unlockTSO(target);
419 goto retry;
420 }
421 if ((target->flags & TSO_BLOCKEX) &&
422 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
423 blockedThrowTo(cap,source,target);
424 *out = target;
425 return THROWTO_BLOCKED;
426 } else {
427 raiseAsync(cap, target, exception, rtsFalse, NULL);
428 unblockOne(cap, target);
429 unlockTSO(target);
430 return THROWTO_SUCCESS;
431 }
432
433 case BlockedOnCCall:
434 case BlockedOnCCall_NoUnblockExc:
435 // I don't think it's possible to acquire ownership of a
436 // BlockedOnCCall thread. We just assume that the target
437 // thread is blocking exceptions, and block on its
438 // blocked_exception queue.
439 lockTSO(target);
440 if (target->why_blocked != BlockedOnCCall &&
441 target->why_blocked != BlockedOnCCall_NoUnblockExc) {
442 unlockTSO(target);
443 goto retry;
444 }
445 blockedThrowTo(cap,source,target);
446 *out = target;
447 return THROWTO_BLOCKED;
448
449 #ifndef THREADEDED_RTS
450 case BlockedOnRead:
451 case BlockedOnWrite:
452 case BlockedOnDelay:
453 #if defined(mingw32_HOST_OS)
454 case BlockedOnDoProc:
455 #endif
456 if ((target->flags & TSO_BLOCKEX) &&
457 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
458 blockedThrowTo(cap,source,target);
459 return THROWTO_BLOCKED;
460 } else {
461 removeFromQueues(cap,target);
462 raiseAsync(cap, target, exception, rtsFalse, NULL);
463 return THROWTO_SUCCESS;
464 }
465 #endif
466
467 default:
468 barf("throwTo: unrecognised why_blocked value");
469 }
470 barf("throwTo");
471 }
472
473 // Block a TSO on another TSO's blocked_exceptions queue.
474 // Precondition: we hold an exclusive lock on the target TSO (this is
475 // complex to achieve as there's no single lock on a TSO; see
476 // throwTo()).
477 static void
478 blockedThrowTo (Capability *cap, StgTSO *source, StgTSO *target)
479 {
480 debugTrace(DEBUG_sched, "throwTo: blocking on thread %lu", (unsigned long)target->id);
481 setTSOLink(cap, source, target->blocked_exceptions);
482 target->blocked_exceptions = source;
483 dirty_TSO(cap,target); // we modified the blocked_exceptions queue
484
485 source->block_info.tso = target;
486 write_barrier(); // throwTo_exception *must* be visible if BlockedOnException is.
487 source->why_blocked = BlockedOnException;
488 }
489
490
491 #ifdef THREADED_RTS
492 void
493 throwToReleaseTarget (void *tso)
494 {
495 unlockTSO((StgTSO *)tso);
496 }
497 #endif
498
499 /* -----------------------------------------------------------------------------
500 Waking up threads blocked in throwTo
501
502 There are two ways to do this: maybePerformBlockedException() will
503 perform the throwTo() for the thread at the head of the queue
504 immediately, and leave the other threads on the queue.
505 maybePerformBlockedException() also checks the TSO_BLOCKEX flag
506 before raising an exception.
507
508 awakenBlockedExceptionQueue() will wake up all the threads in the
509 queue, but not perform any throwTo() immediately. This might be
510 more appropriate when the target thread is the one actually running
511 (see Exception.cmm).
512
513 Returns: non-zero if an exception was raised, zero otherwise.
514 -------------------------------------------------------------------------- */
515
516 int
517 maybePerformBlockedException (Capability *cap, StgTSO *tso)
518 {
519 StgTSO *source;
520
521 if (tso->what_next == ThreadComplete || tso->what_next == ThreadFinished) {
522 if (tso->blocked_exceptions != END_TSO_QUEUE) {
523 awakenBlockedExceptionQueue(cap,tso);
524 return 1;
525 } else {
526 return 0;
527 }
528 }
529
530 if (tso->blocked_exceptions != END_TSO_QUEUE &&
531 (tso->flags & TSO_BLOCKEX) != 0) {
532 debugTrace(DEBUG_sched, "throwTo: thread %lu has blocked exceptions but is inside block", (unsigned long)tso->id);
533 }
534
535 if (tso->blocked_exceptions != END_TSO_QUEUE
536 && ((tso->flags & TSO_BLOCKEX) == 0
537 || ((tso->flags & TSO_INTERRUPTIBLE) && interruptible(tso)))) {
538
539 // Lock the TSO, this gives us exclusive access to the queue
540 lockTSO(tso);
541
542 // Check the queue again; it might have changed before we
543 // locked it.
544 if (tso->blocked_exceptions == END_TSO_QUEUE) {
545 unlockTSO(tso);
546 return 0;
547 }
548
549 // We unblock just the first thread on the queue, and perform
550 // its throw immediately.
551 source = tso->blocked_exceptions;
552 performBlockedException(cap, source, tso);
553 tso->blocked_exceptions = unblockOne_(cap, source,
554 rtsFalse/*no migrate*/);
555 unlockTSO(tso);
556 return 1;
557 }
558 return 0;
559 }
560
561 void
562 awakenBlockedExceptionQueue (Capability *cap, StgTSO *tso)
563 {
564 if (tso->blocked_exceptions != END_TSO_QUEUE) {
565 lockTSO(tso);
566 awakenBlockedQueue(cap, tso->blocked_exceptions);
567 tso->blocked_exceptions = END_TSO_QUEUE;
568 unlockTSO(tso);
569 }
570 }
571
572 static void
573 performBlockedException (Capability *cap, StgTSO *source, StgTSO *target)
574 {
575 StgClosure *exception;
576
577 ASSERT(source->why_blocked == BlockedOnException);
578 ASSERT(source->block_info.tso->id == target->id);
579 ASSERT(source->sp[0] == (StgWord)&stg_block_throwto_info);
580 ASSERT(((StgTSO *)source->sp[1])->id == target->id);
581 // check ids not pointers, because the thread might be relocated
582
583 exception = (StgClosure *)source->sp[2];
584 throwToSingleThreaded(cap, target, exception);
585 source->sp += 3;
586 }
587
588 /* -----------------------------------------------------------------------------
589 Remove a thread from blocking queues.
590
591 This is for use when we raise an exception in another thread, which
592 may be blocked.
593
594 Precondition: we have exclusive access to the TSO, via the same set
595 of conditions as throwToSingleThreaded() (c.f.).
596 -------------------------------------------------------------------------- */
597
598 static void
599 removeFromQueues(Capability *cap, StgTSO *tso)
600 {
601 switch (tso->why_blocked) {
602
603 case NotBlocked:
604 return;
605
606 case BlockedOnSTM:
607 // Be careful: nothing to do here! We tell the scheduler that the
608 // thread is runnable and we leave it to the stack-walking code to
609 // abort the transaction while unwinding the stack. We should
610 // perhaps have a debugging test to make sure that this really
611 // happens and that the 'zombie' transaction does not get
612 // committed.
613 goto done;
614
615 case BlockedOnMVar:
616 removeThreadFromMVarQueue(cap, (StgMVar *)tso->block_info.closure, tso);
617 goto done;
618
619 case BlockedOnBlackHole:
620 removeThreadFromQueue(cap, &blackhole_queue, tso);
621 goto done;
622
623 case BlockedOnException:
624 {
625 StgTSO *target = tso->block_info.tso;
626
627 // NO: when called by threadPaused(), we probably have this
628 // TSO already locked (WHITEHOLEd) because we just placed
629 // ourselves on its queue.
630 // ASSERT(get_itbl(target)->type == TSO);
631
632 while (target->what_next == ThreadRelocated) {
633 target = target->_link;
634 }
635
636 removeThreadFromQueue(cap, &target->blocked_exceptions, tso);
637 goto done;
638 }
639
640 #if !defined(THREADED_RTS)
641 case BlockedOnRead:
642 case BlockedOnWrite:
643 #if defined(mingw32_HOST_OS)
644 case BlockedOnDoProc:
645 #endif
646 removeThreadFromDeQueue(cap, &blocked_queue_hd, &blocked_queue_tl, tso);
647 #if defined(mingw32_HOST_OS)
648 /* (Cooperatively) signal that the worker thread should abort
649 * the request.
650 */
651 abandonWorkRequest(tso->block_info.async_result->reqID);
652 #endif
653 goto done;
654
655 case BlockedOnDelay:
656 removeThreadFromQueue(cap, &sleeping_queue, tso);
657 goto done;
658 #endif
659
660 default:
661 barf("removeFromQueues: %d", tso->why_blocked);
662 }
663
664 done:
665 tso->_link = END_TSO_QUEUE; // no write barrier reqd
666 tso->why_blocked = NotBlocked;
667 tso->block_info.closure = NULL;
668 appendToRunQueue(cap,tso);
669
670 // We might have just migrated this TSO to our Capability:
671 if (tso->bound) {
672 tso->bound->cap = cap;
673 }
674 tso->cap = cap;
675 }
676
677 /* -----------------------------------------------------------------------------
678 * raiseAsync()
679 *
680 * The following function implements the magic for raising an
681 * asynchronous exception in an existing thread.
682 *
683 * We first remove the thread from any queue on which it might be
684 * blocked. The possible blockages are MVARs and BLACKHOLE_BQs.
685 *
686 * We strip the stack down to the innermost CATCH_FRAME, building
687 * thunks in the heap for all the active computations, so they can
688 * be restarted if necessary. When we reach a CATCH_FRAME, we build
689 * an application of the handler to the exception, and push it on
690 * the top of the stack.
691 *
692 * How exactly do we save all the active computations? We create an
693 * AP_STACK for every UpdateFrame on the stack. Entering one of these
694 * AP_STACKs pushes everything from the corresponding update frame
695 * upwards onto the stack. (Actually, it pushes everything up to the
696 * next update frame plus a pointer to the next AP_STACK object.
697 * Entering the next AP_STACK object pushes more onto the stack until we
698 * reach the last AP_STACK object - at which point the stack should look
699 * exactly as it did when we killed the TSO and we can continue
700 * execution by entering the closure on top of the stack.
701 *
702 * We can also kill a thread entirely - this happens if either (a) the
703 * exception passed to raiseAsync is NULL, or (b) there's no
704 * CATCH_FRAME on the stack. In either case, we strip the entire
705 * stack and replace the thread with a zombie.
706 *
707 * ToDo: in THREADED_RTS mode, this function is only safe if either
708 * (a) we hold all the Capabilities (eg. in GC, or if there is only
709 * one Capability), or (b) we own the Capability that the TSO is
710 * currently blocked on or on the run queue of.
711 *
712 * -------------------------------------------------------------------------- */
713
714 static void
715 raiseAsync(Capability *cap, StgTSO *tso, StgClosure *exception,
716 rtsBool stop_at_atomically, StgUpdateFrame *stop_here)
717 {
718 StgRetInfoTable *info;
719 StgPtr sp, frame;
720 StgClosure *updatee;
721 nat i;
722
723 debugTrace(DEBUG_sched,
724 "raising exception in thread %ld.", (long)tso->id);
725
726 #if defined(PROFILING)
727 /*
728 * Debugging tool: on raising an exception, show where we are.
729 * See also Exception.cmm:raisezh_fast.
730 * This wasn't done for asynchronous exceptions originally; see #1450
731 */
732 if (RtsFlags.ProfFlags.showCCSOnException)
733 {
734 fprintCCS_stderr(tso->prof.CCCS);
735 }
736 #endif
737
738 // mark it dirty; we're about to change its stack.
739 dirty_TSO(cap, tso);
740
741 sp = tso->sp;
742
743 // ASSUMES: the thread is not already complete or dead. Upper
744 // layers should deal with that.
745 ASSERT(tso->what_next != ThreadComplete && tso->what_next != ThreadKilled);
746
747 if (stop_here != NULL) {
748 updatee = stop_here->updatee;
749 } else {
750 updatee = NULL;
751 }
752
753 // The stack freezing code assumes there's a closure pointer on
754 // the top of the stack, so we have to arrange that this is the case...
755 //
756 if (sp[0] == (W_)&stg_enter_info) {
757 sp++;
758 } else {
759 sp--;
760 sp[0] = (W_)&stg_dummy_ret_closure;
761 }
762
763 frame = sp + 1;
764 while (stop_here == NULL || frame < (StgPtr)stop_here) {
765
766 // 1. Let the top of the stack be the "current closure"
767 //
768 // 2. Walk up the stack until we find either an UPDATE_FRAME or a
769 // CATCH_FRAME.
770 //
771 // 3. If it's an UPDATE_FRAME, then make an AP_STACK containing the
772 // current closure applied to the chunk of stack up to (but not
773 // including) the update frame. This closure becomes the "current
774 // closure". Go back to step 2.
775 //
776 // 4. If it's a CATCH_FRAME, then leave the exception handler on
777 // top of the stack applied to the exception.
778 //
779 // 5. If it's a STOP_FRAME, then kill the thread.
780 //
781 // NB: if we pass an ATOMICALLY_FRAME then abort the associated
782 // transaction
783
784 info = get_ret_itbl((StgClosure *)frame);
785
786 switch (info->i.type) {
787
788 case UPDATE_FRAME:
789 {
790 StgAP_STACK * ap;
791 nat words;
792
793 // First build an AP_STACK consisting of the stack chunk above the
794 // current update frame, with the top word on the stack as the
795 // fun field.
796 //
797 words = frame - sp - 1;
798 ap = (StgAP_STACK *)allocateLocal(cap,AP_STACK_sizeW(words));
799
800 ap->size = words;
801 ap->fun = (StgClosure *)sp[0];
802 sp++;
803 for(i=0; i < (nat)words; ++i) {
804 ap->payload[i] = (StgClosure *)*sp++;
805 }
806
807 SET_HDR(ap,&stg_AP_STACK_info,
808 ((StgClosure *)frame)->header.prof.ccs /* ToDo */);
809 TICK_ALLOC_UP_THK(words+1,0);
810
811 //IF_DEBUG(scheduler,
812 // debugBelch("sched: Updating ");
813 // printPtr((P_)((StgUpdateFrame *)frame)->updatee);
814 // debugBelch(" with ");
815 // printObj((StgClosure *)ap);
816 // );
817
818 if (((StgUpdateFrame *)frame)->updatee == updatee) {
819 // If this update frame points to the same closure as
820 // the update frame further down the stack
821 // (stop_here), then don't perform the update. We
822 // want to keep the blackhole in this case, so we can
823 // detect and report the loop (#2783).
824 ap = (StgAP_STACK*)updatee;
825 } else {
826 // Perform the update
827 // TODO: this may waste some work, if the thunk has
828 // already been updated by another thread.
829 UPD_IND(((StgUpdateFrame *)frame)->updatee, (StgClosure *)ap);
830 }
831
832 sp += sizeofW(StgUpdateFrame) - 1;
833 sp[0] = (W_)ap; // push onto stack
834 frame = sp + 1;
835 continue; //no need to bump frame
836 }
837
838 case STOP_FRAME:
839 {
840 // We've stripped the entire stack, the thread is now dead.
841 tso->what_next = ThreadKilled;
842 tso->sp = frame + sizeofW(StgStopFrame);
843 return;
844 }
845
846 case CATCH_FRAME:
847 // If we find a CATCH_FRAME, and we've got an exception to raise,
848 // then build the THUNK raise(exception), and leave it on
849 // top of the CATCH_FRAME ready to enter.
850 //
851 {
852 #ifdef PROFILING
853 StgCatchFrame *cf = (StgCatchFrame *)frame;
854 #endif
855 StgThunk *raise;
856
857 if (exception == NULL) break;
858
859 // we've got an exception to raise, so let's pass it to the
860 // handler in this frame.
861 //
862 raise = (StgThunk *)allocateLocal(cap,sizeofW(StgThunk)+1);
863 TICK_ALLOC_SE_THK(1,0);
864 SET_HDR(raise,&stg_raise_info,cf->header.prof.ccs);
865 raise->payload[0] = exception;
866
867 // throw away the stack from Sp up to the CATCH_FRAME.
868 //
869 sp = frame - 1;
870
871 /* Ensure that async excpetions are blocked now, so we don't get
872 * a surprise exception before we get around to executing the
873 * handler.
874 */
875 tso->flags |= TSO_BLOCKEX | TSO_INTERRUPTIBLE;
876
877 /* Put the newly-built THUNK on top of the stack, ready to execute
878 * when the thread restarts.
879 */
880 sp[0] = (W_)raise;
881 sp[-1] = (W_)&stg_enter_info;
882 tso->sp = sp-1;
883 tso->what_next = ThreadRunGHC;
884 IF_DEBUG(sanity, checkTSO(tso));
885 return;
886 }
887
888 case ATOMICALLY_FRAME:
889 if (stop_at_atomically) {
890 ASSERT(stmGetEnclosingTRec(tso->trec) == NO_TREC);
891 stmCondemnTransaction(cap, tso -> trec);
892 tso->sp = frame;
893 tso->what_next = ThreadRunGHC;
894 return;
895 }
896 // Not stop_at_atomically... fall through and abort the
897 // transaction.
898
899 case CATCH_STM_FRAME:
900 case CATCH_RETRY_FRAME:
901 // IF we find an ATOMICALLY_FRAME then we abort the
902 // current transaction and propagate the exception. In
903 // this case (unlike ordinary exceptions) we do not care
904 // whether the transaction is valid or not because its
905 // possible validity cannot have caused the exception
906 // and will not be visible after the abort.
907
908 {
909 StgTRecHeader *trec = tso -> trec;
910 StgTRecHeader *outer = stmGetEnclosingTRec(trec);
911 debugTrace(DEBUG_stm,
912 "found atomically block delivering async exception");
913 stmAbortTransaction(cap, trec);
914 stmFreeAbortedTRec(cap, trec);
915 tso -> trec = outer;
916 break;
917 };
918
919 default:
920 break;
921 }
922
923 // move on to the next stack frame
924 frame += stack_frame_sizeW((StgClosure *)frame);
925 }
926
927 // if we got here, then we stopped at stop_here
928 ASSERT(stop_here != NULL);
929 }
930
931