RTS tidyup sweep, first phase
[ghc.git] / rts / RaiseAsync.c
1 /* ---------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 1998-2006
4 *
5 * Asynchronous exceptions
6 *
7 * --------------------------------------------------------------------------*/
8
9 #include "PosixSource.h"
10 #include "Rts.h"
11
12 #include "sm/Storage.h"
13 #include "Threads.h"
14 #include "Trace.h"
15 #include "RaiseAsync.h"
16 #include "Schedule.h"
17 #include "Updates.h"
18 #include "STM.h"
19 #include "Sanity.h"
20 #include "Profiling.h"
21 #include "eventlog/EventLog.h"
22 #if defined(mingw32_HOST_OS)
23 #include "win32/IOManager.h"
24 #endif
25
26 static void raiseAsync (Capability *cap,
27 StgTSO *tso,
28 StgClosure *exception,
29 rtsBool stop_at_atomically,
30 StgUpdateFrame *stop_here);
31
32 static void removeFromQueues(Capability *cap, StgTSO *tso);
33
34 static void blockedThrowTo (Capability *cap, StgTSO *source, StgTSO *target);
35
36 static void performBlockedException (Capability *cap,
37 StgTSO *source, StgTSO *target);
38
39 /* -----------------------------------------------------------------------------
40 throwToSingleThreaded
41
42 This version of throwTo is safe to use if and only if one of the
43 following holds:
44
45 - !THREADED_RTS
46
47 - all the other threads in the system are stopped (eg. during GC).
48
49 - we surely own the target TSO (eg. we just took it from the
50 run queue of the current capability, or we are running it).
51
52 It doesn't cater for blocking the source thread until the exception
53 has been raised.
54 -------------------------------------------------------------------------- */
55
56 void
57 throwToSingleThreaded(Capability *cap, StgTSO *tso, StgClosure *exception)
58 {
59 throwToSingleThreaded_(cap, tso, exception, rtsFalse);
60 }
61
62 void
63 throwToSingleThreaded_(Capability *cap, StgTSO *tso, StgClosure *exception,
64 rtsBool stop_at_atomically)
65 {
66 // Thread already dead?
67 if (tso->what_next == ThreadComplete || tso->what_next == ThreadKilled) {
68 return;
69 }
70
71 // Remove it from any blocking queues
72 removeFromQueues(cap,tso);
73
74 raiseAsync(cap, tso, exception, stop_at_atomically, NULL);
75 }
76
77 void
78 suspendComputation(Capability *cap, StgTSO *tso, StgUpdateFrame *stop_here)
79 {
80 // Thread already dead?
81 if (tso->what_next == ThreadComplete || tso->what_next == ThreadKilled) {
82 return;
83 }
84
85 // Remove it from any blocking queues
86 removeFromQueues(cap,tso);
87
88 raiseAsync(cap, tso, NULL, rtsFalse, stop_here);
89 }
90
91 /* -----------------------------------------------------------------------------
92 throwTo
93
94 This function may be used to throw an exception from one thread to
95 another, during the course of normal execution. This is a tricky
96 task: the target thread might be running on another CPU, or it
97 may be blocked and could be woken up at any point by another CPU.
98 We have some delicate synchronisation to do.
99
100 There is a completely safe fallback scheme: it is always possible
101 to just block the source TSO on the target TSO's blocked_exceptions
102 queue. This queue is locked using lockTSO()/unlockTSO(). It is
103 checked at regular intervals: before and after running a thread
104 (schedule() and threadPaused() respectively), and just before GC
105 (scheduleDoGC()). Activating a thread on this queue should be done
106 using maybePerformBlockedException(): this is done in the context
107 of the target thread, so the exception can be raised eagerly.
108
109 This fallback scheme works even if the target thread is complete or
110 killed: scheduleDoGC() will discover the blocked thread before the
111 target is GC'd.
112
113 Blocking the source thread on the target thread's blocked_exception
114 queue is also employed when the target thread is currently blocking
115 exceptions (ie. inside Control.Exception.block).
116
117 We could use the safe fallback scheme exclusively, but that
118 wouldn't be ideal: most calls to throwTo would block immediately,
119 possibly until the next GC, which might require the deadlock
120 detection mechanism to kick in. So we try to provide promptness
121 wherever possible.
122
123 We can promptly deliver the exception if the target thread is:
124
125 - runnable, on the same Capability as the source thread (because
126 we own the run queue and therefore the target thread).
127
128 - blocked, and we can obtain exclusive access to it. Obtaining
129 exclusive access to the thread depends on how it is blocked.
130
131 We must also be careful to not trip over threadStackOverflow(),
132 which might be moving the TSO to enlarge its stack.
133 lockTSO()/unlockTSO() are used here too.
134
135 Returns:
136
137 THROWTO_SUCCESS exception was raised, ok to continue
138
139 THROWTO_BLOCKED exception was not raised; block the source
140 thread then call throwToReleaseTarget() when
141 the source thread is properly tidied away.
142
143 -------------------------------------------------------------------------- */
144
145 nat
146 throwTo (Capability *cap, // the Capability we hold
147 StgTSO *source, // the TSO sending the exception
148 StgTSO *target, // the TSO receiving the exception
149 StgClosure *exception, // the exception closure
150 /*[out]*/ void **out USED_IF_THREADS)
151 {
152 StgWord status;
153
154 // follow ThreadRelocated links in the target first
155 while (target->what_next == ThreadRelocated) {
156 target = target->_link;
157 // No, it might be a WHITEHOLE:
158 // ASSERT(get_itbl(target)->type == TSO);
159 }
160
161 debugTrace(DEBUG_sched, "throwTo: from thread %lu to thread %lu",
162 (unsigned long)source->id, (unsigned long)target->id);
163
164 #ifdef DEBUG
165 if (traceClass(DEBUG_sched)) {
166 debugTraceBegin("throwTo: target");
167 printThreadStatus(target);
168 debugTraceEnd();
169 }
170 #endif
171
172 goto check_target;
173 retry:
174 debugTrace(DEBUG_sched, "throwTo: retrying...");
175
176 check_target:
177 // Thread already dead?
178 if (target->what_next == ThreadComplete
179 || target->what_next == ThreadKilled) {
180 return THROWTO_SUCCESS;
181 }
182
183 status = target->why_blocked;
184
185 switch (status) {
186 case NotBlocked:
187 /* if status==NotBlocked, and target->cap == cap, then
188 we own this TSO and can raise the exception.
189
190 How do we establish this condition? Very carefully.
191
192 Let
193 P = (status == NotBlocked)
194 Q = (tso->cap == cap)
195
196 Now, if P & Q are true, then the TSO is locked and owned by
197 this capability. No other OS thread can steal it.
198
199 If P==0 and Q==1: the TSO is blocked, but attached to this
200 capabilty, and it can be stolen by another capability.
201
202 If P==1 and Q==0: the TSO is runnable on another
203 capability. At any time, the TSO may change from runnable
204 to blocked and vice versa, while it remains owned by
205 another capability.
206
207 Suppose we test like this:
208
209 p = P
210 q = Q
211 if (p && q) ...
212
213 this is defeated by another capability stealing a blocked
214 TSO from us to wake it up (Schedule.c:unblockOne()). The
215 other thread is doing
216
217 Q = 0
218 P = 1
219
220 assuming arbitrary reordering, we could see this
221 interleaving:
222
223 start: P==0 && Q==1
224 P = 1
225 p = P
226 q = Q
227 Q = 0
228 if (p && q) ...
229
230 so we need a memory barrier:
231
232 p = P
233 mb()
234 q = Q
235 if (p && q) ...
236
237 this avoids the problematic case. There are other cases
238 to consider, but this is the tricky one.
239
240 Note that we must be sure that unblockOne() does the
241 writes in the correct order: Q before P. The memory
242 barrier ensures that if we have seen the write to P, we
243 have also seen the write to Q.
244 */
245 {
246 Capability *target_cap;
247
248 write_barrier();
249 target_cap = target->cap;
250 if (target_cap == cap && (target->flags & TSO_BLOCKEX) == 0) {
251 // It's on our run queue and not blocking exceptions
252 raiseAsync(cap, target, exception, rtsFalse, NULL);
253 return THROWTO_SUCCESS;
254 } else {
255 // Otherwise, just block on the blocked_exceptions queue
256 // of the target thread. The queue will get looked at
257 // soon enough: it is checked before and after running a
258 // thread, and during GC.
259 lockTSO(target);
260
261 // Avoid race with threadStackOverflow, which may have
262 // just moved this TSO.
263 if (target->what_next == ThreadRelocated) {
264 unlockTSO(target);
265 target = target->_link;
266 goto retry;
267 }
268 // check again for ThreadComplete and ThreadKilled. This
269 // cooperates with scheduleHandleThreadFinished to ensure
270 // that we never miss any threads that are throwing an
271 // exception to a thread in the process of terminating.
272 if (target->what_next == ThreadComplete
273 || target->what_next == ThreadKilled) {
274 unlockTSO(target);
275 return THROWTO_SUCCESS;
276 }
277 blockedThrowTo(cap,source,target);
278 *out = target;
279 return THROWTO_BLOCKED;
280 }
281 }
282
283 case BlockedOnMVar:
284 {
285 /*
286 To establish ownership of this TSO, we need to acquire a
287 lock on the MVar that it is blocked on.
288 */
289 StgMVar *mvar;
290 StgInfoTable *info USED_IF_THREADS;
291
292 mvar = (StgMVar *)target->block_info.closure;
293
294 // ASSUMPTION: tso->block_info must always point to a
295 // closure. In the threaded RTS it does.
296 switch (get_itbl(mvar)->type) {
297 case MVAR_CLEAN:
298 case MVAR_DIRTY:
299 break;
300 default:
301 goto retry;
302 }
303
304 info = lockClosure((StgClosure *)mvar);
305
306 if (target->what_next == ThreadRelocated) {
307 target = target->_link;
308 unlockClosure((StgClosure *)mvar,info);
309 goto retry;
310 }
311 // we have the MVar, let's check whether the thread
312 // is still blocked on the same MVar.
313 if (target->why_blocked != BlockedOnMVar
314 || (StgMVar *)target->block_info.closure != mvar) {
315 unlockClosure((StgClosure *)mvar, info);
316 goto retry;
317 }
318
319 if ((target->flags & TSO_BLOCKEX) &&
320 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
321 lockClosure((StgClosure *)target);
322 blockedThrowTo(cap,source,target);
323 unlockClosure((StgClosure *)mvar, info);
324 *out = target;
325 return THROWTO_BLOCKED; // caller releases TSO
326 } else {
327 removeThreadFromMVarQueue(cap, mvar, target);
328 raiseAsync(cap, target, exception, rtsFalse, NULL);
329 unblockOne(cap, target);
330 unlockClosure((StgClosure *)mvar, info);
331 return THROWTO_SUCCESS;
332 }
333 }
334
335 case BlockedOnBlackHole:
336 {
337 ACQUIRE_LOCK(&sched_mutex);
338 // double checking the status after the memory barrier:
339 if (target->why_blocked != BlockedOnBlackHole) {
340 RELEASE_LOCK(&sched_mutex);
341 goto retry;
342 }
343
344 if (target->flags & TSO_BLOCKEX) {
345 lockTSO(target);
346 blockedThrowTo(cap,source,target);
347 RELEASE_LOCK(&sched_mutex);
348 *out = target;
349 return THROWTO_BLOCKED; // caller releases TSO
350 } else {
351 removeThreadFromQueue(cap, &blackhole_queue, target);
352 raiseAsync(cap, target, exception, rtsFalse, NULL);
353 unblockOne(cap, target);
354 RELEASE_LOCK(&sched_mutex);
355 return THROWTO_SUCCESS;
356 }
357 }
358
359 case BlockedOnException:
360 {
361 StgTSO *target2;
362 StgInfoTable *info;
363
364 /*
365 To obtain exclusive access to a BlockedOnException thread,
366 we must call lockClosure() on the TSO on which it is blocked.
367 Since the TSO might change underneath our feet, after we
368 call lockClosure() we must check that
369
370 (a) the closure we locked is actually a TSO
371 (b) the original thread is still BlockedOnException,
372 (c) the original thread is still blocked on the TSO we locked
373 and (d) the target thread has not been relocated.
374
375 We synchronise with threadStackOverflow() (which relocates
376 threads) using lockClosure()/unlockClosure().
377 */
378 target2 = target->block_info.tso;
379
380 info = lockClosure((StgClosure *)target2);
381 if (info != &stg_TSO_info) {
382 unlockClosure((StgClosure *)target2, info);
383 goto retry;
384 }
385 if (target->what_next == ThreadRelocated) {
386 target = target->_link;
387 unlockTSO(target2);
388 goto retry;
389 }
390 if (target2->what_next == ThreadRelocated) {
391 target->block_info.tso = target2->_link;
392 unlockTSO(target2);
393 goto retry;
394 }
395 if (target->why_blocked != BlockedOnException
396 || target->block_info.tso != target2) {
397 unlockTSO(target2);
398 goto retry;
399 }
400
401 /*
402 Now we have exclusive rights to the target TSO...
403
404 If it is blocking exceptions, add the source TSO to its
405 blocked_exceptions queue. Otherwise, raise the exception.
406 */
407 if ((target->flags & TSO_BLOCKEX) &&
408 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
409 lockTSO(target);
410 blockedThrowTo(cap,source,target);
411 unlockTSO(target2);
412 *out = target;
413 return THROWTO_BLOCKED;
414 } else {
415 removeThreadFromQueue(cap, &target2->blocked_exceptions, target);
416 raiseAsync(cap, target, exception, rtsFalse, NULL);
417 unblockOne(cap, target);
418 unlockTSO(target2);
419 return THROWTO_SUCCESS;
420 }
421 }
422
423 case BlockedOnSTM:
424 lockTSO(target);
425 // Unblocking BlockedOnSTM threads requires the TSO to be
426 // locked; see STM.c:unpark_tso().
427 if (target->why_blocked != BlockedOnSTM) {
428 unlockTSO(target);
429 goto retry;
430 }
431 if ((target->flags & TSO_BLOCKEX) &&
432 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
433 blockedThrowTo(cap,source,target);
434 *out = target;
435 return THROWTO_BLOCKED;
436 } else {
437 raiseAsync(cap, target, exception, rtsFalse, NULL);
438 unblockOne(cap, target);
439 unlockTSO(target);
440 return THROWTO_SUCCESS;
441 }
442
443 case BlockedOnCCall:
444 case BlockedOnCCall_NoUnblockExc:
445 // I don't think it's possible to acquire ownership of a
446 // BlockedOnCCall thread. We just assume that the target
447 // thread is blocking exceptions, and block on its
448 // blocked_exception queue.
449 lockTSO(target);
450 if (target->why_blocked != BlockedOnCCall &&
451 target->why_blocked != BlockedOnCCall_NoUnblockExc) {
452 unlockTSO(target);
453 goto retry;
454 }
455 blockedThrowTo(cap,source,target);
456 *out = target;
457 return THROWTO_BLOCKED;
458
459 #ifndef THREADEDED_RTS
460 case BlockedOnRead:
461 case BlockedOnWrite:
462 case BlockedOnDelay:
463 #if defined(mingw32_HOST_OS)
464 case BlockedOnDoProc:
465 #endif
466 if ((target->flags & TSO_BLOCKEX) &&
467 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
468 blockedThrowTo(cap,source,target);
469 return THROWTO_BLOCKED;
470 } else {
471 removeFromQueues(cap,target);
472 raiseAsync(cap, target, exception, rtsFalse, NULL);
473 return THROWTO_SUCCESS;
474 }
475 #endif
476
477 default:
478 barf("throwTo: unrecognised why_blocked value");
479 }
480 barf("throwTo");
481 }
482
483 // Block a TSO on another TSO's blocked_exceptions queue.
484 // Precondition: we hold an exclusive lock on the target TSO (this is
485 // complex to achieve as there's no single lock on a TSO; see
486 // throwTo()).
487 static void
488 blockedThrowTo (Capability *cap, StgTSO *source, StgTSO *target)
489 {
490 debugTrace(DEBUG_sched, "throwTo: blocking on thread %lu", (unsigned long)target->id);
491 setTSOLink(cap, source, target->blocked_exceptions);
492 target->blocked_exceptions = source;
493 dirty_TSO(cap,target); // we modified the blocked_exceptions queue
494
495 source->block_info.tso = target;
496 write_barrier(); // throwTo_exception *must* be visible if BlockedOnException is.
497 source->why_blocked = BlockedOnException;
498 }
499
500
501 #ifdef THREADED_RTS
502 void
503 throwToReleaseTarget (void *tso)
504 {
505 unlockTSO((StgTSO *)tso);
506 }
507 #endif
508
509 /* -----------------------------------------------------------------------------
510 Waking up threads blocked in throwTo
511
512 There are two ways to do this: maybePerformBlockedException() will
513 perform the throwTo() for the thread at the head of the queue
514 immediately, and leave the other threads on the queue.
515 maybePerformBlockedException() also checks the TSO_BLOCKEX flag
516 before raising an exception.
517
518 awakenBlockedExceptionQueue() will wake up all the threads in the
519 queue, but not perform any throwTo() immediately. This might be
520 more appropriate when the target thread is the one actually running
521 (see Exception.cmm).
522
523 Returns: non-zero if an exception was raised, zero otherwise.
524 -------------------------------------------------------------------------- */
525
526 int
527 maybePerformBlockedException (Capability *cap, StgTSO *tso)
528 {
529 StgTSO *source;
530
531 if (tso->what_next == ThreadComplete || tso->what_next == ThreadFinished) {
532 if (tso->blocked_exceptions != END_TSO_QUEUE) {
533 awakenBlockedExceptionQueue(cap,tso);
534 return 1;
535 } else {
536 return 0;
537 }
538 }
539
540 if (tso->blocked_exceptions != END_TSO_QUEUE &&
541 (tso->flags & TSO_BLOCKEX) != 0) {
542 debugTrace(DEBUG_sched, "throwTo: thread %lu has blocked exceptions but is inside block", (unsigned long)tso->id);
543 }
544
545 if (tso->blocked_exceptions != END_TSO_QUEUE
546 && ((tso->flags & TSO_BLOCKEX) == 0
547 || ((tso->flags & TSO_INTERRUPTIBLE) && interruptible(tso)))) {
548
549 // Lock the TSO, this gives us exclusive access to the queue
550 lockTSO(tso);
551
552 // Check the queue again; it might have changed before we
553 // locked it.
554 if (tso->blocked_exceptions == END_TSO_QUEUE) {
555 unlockTSO(tso);
556 return 0;
557 }
558
559 // We unblock just the first thread on the queue, and perform
560 // its throw immediately.
561 source = tso->blocked_exceptions;
562 performBlockedException(cap, source, tso);
563 tso->blocked_exceptions = unblockOne_(cap, source,
564 rtsFalse/*no migrate*/);
565 unlockTSO(tso);
566 return 1;
567 }
568 return 0;
569 }
570
571 // awakenBlockedExceptionQueue(): Just wake up the whole queue of
572 // blocked exceptions and let them try again.
573
574 void
575 awakenBlockedExceptionQueue (Capability *cap, StgTSO *tso)
576 {
577 lockTSO(tso);
578 awakenBlockedQueue(cap, tso->blocked_exceptions);
579 tso->blocked_exceptions = END_TSO_QUEUE;
580 unlockTSO(tso);
581 }
582
583 static void
584 performBlockedException (Capability *cap, StgTSO *source, StgTSO *target)
585 {
586 StgClosure *exception;
587
588 ASSERT(source->why_blocked == BlockedOnException);
589 ASSERT(source->block_info.tso->id == target->id);
590 ASSERT(source->sp[0] == (StgWord)&stg_block_throwto_info);
591 ASSERT(((StgTSO *)source->sp[1])->id == target->id);
592 // check ids not pointers, because the thread might be relocated
593
594 exception = (StgClosure *)source->sp[2];
595 throwToSingleThreaded(cap, target, exception);
596 source->sp += 3;
597 }
598
599 /* -----------------------------------------------------------------------------
600 Remove a thread from blocking queues.
601
602 This is for use when we raise an exception in another thread, which
603 may be blocked.
604
605 Precondition: we have exclusive access to the TSO, via the same set
606 of conditions as throwToSingleThreaded() (c.f.).
607 -------------------------------------------------------------------------- */
608
609 static void
610 removeFromQueues(Capability *cap, StgTSO *tso)
611 {
612 switch (tso->why_blocked) {
613
614 case NotBlocked:
615 return;
616
617 case BlockedOnSTM:
618 // Be careful: nothing to do here! We tell the scheduler that the
619 // thread is runnable and we leave it to the stack-walking code to
620 // abort the transaction while unwinding the stack. We should
621 // perhaps have a debugging test to make sure that this really
622 // happens and that the 'zombie' transaction does not get
623 // committed.
624 goto done;
625
626 case BlockedOnMVar:
627 removeThreadFromMVarQueue(cap, (StgMVar *)tso->block_info.closure, tso);
628 goto done;
629
630 case BlockedOnBlackHole:
631 removeThreadFromQueue(cap, &blackhole_queue, tso);
632 goto done;
633
634 case BlockedOnException:
635 {
636 StgTSO *target = tso->block_info.tso;
637
638 // NO: when called by threadPaused(), we probably have this
639 // TSO already locked (WHITEHOLEd) because we just placed
640 // ourselves on its queue.
641 // ASSERT(get_itbl(target)->type == TSO);
642
643 while (target->what_next == ThreadRelocated) {
644 target = target->_link;
645 }
646
647 removeThreadFromQueue(cap, &target->blocked_exceptions, tso);
648 goto done;
649 }
650
651 #if !defined(THREADED_RTS)
652 case BlockedOnRead:
653 case BlockedOnWrite:
654 #if defined(mingw32_HOST_OS)
655 case BlockedOnDoProc:
656 #endif
657 removeThreadFromDeQueue(cap, &blocked_queue_hd, &blocked_queue_tl, tso);
658 #if defined(mingw32_HOST_OS)
659 /* (Cooperatively) signal that the worker thread should abort
660 * the request.
661 */
662 abandonWorkRequest(tso->block_info.async_result->reqID);
663 #endif
664 goto done;
665
666 case BlockedOnDelay:
667 removeThreadFromQueue(cap, &sleeping_queue, tso);
668 goto done;
669 #endif
670
671 default:
672 barf("removeFromQueues: %d", tso->why_blocked);
673 }
674
675 done:
676 unblockOne(cap, tso);
677 }
678
679 /* -----------------------------------------------------------------------------
680 * raiseAsync()
681 *
682 * The following function implements the magic for raising an
683 * asynchronous exception in an existing thread.
684 *
685 * We first remove the thread from any queue on which it might be
686 * blocked. The possible blockages are MVARs and BLACKHOLE_BQs.
687 *
688 * We strip the stack down to the innermost CATCH_FRAME, building
689 * thunks in the heap for all the active computations, so they can
690 * be restarted if necessary. When we reach a CATCH_FRAME, we build
691 * an application of the handler to the exception, and push it on
692 * the top of the stack.
693 *
694 * How exactly do we save all the active computations? We create an
695 * AP_STACK for every UpdateFrame on the stack. Entering one of these
696 * AP_STACKs pushes everything from the corresponding update frame
697 * upwards onto the stack. (Actually, it pushes everything up to the
698 * next update frame plus a pointer to the next AP_STACK object.
699 * Entering the next AP_STACK object pushes more onto the stack until we
700 * reach the last AP_STACK object - at which point the stack should look
701 * exactly as it did when we killed the TSO and we can continue
702 * execution by entering the closure on top of the stack.
703 *
704 * We can also kill a thread entirely - this happens if either (a) the
705 * exception passed to raiseAsync is NULL, or (b) there's no
706 * CATCH_FRAME on the stack. In either case, we strip the entire
707 * stack and replace the thread with a zombie.
708 *
709 * ToDo: in THREADED_RTS mode, this function is only safe if either
710 * (a) we hold all the Capabilities (eg. in GC, or if there is only
711 * one Capability), or (b) we own the Capability that the TSO is
712 * currently blocked on or on the run queue of.
713 *
714 * -------------------------------------------------------------------------- */
715
716 static void
717 raiseAsync(Capability *cap, StgTSO *tso, StgClosure *exception,
718 rtsBool stop_at_atomically, StgUpdateFrame *stop_here)
719 {
720 StgRetInfoTable *info;
721 StgPtr sp, frame;
722 StgClosure *updatee;
723 nat i;
724
725 debugTrace(DEBUG_sched,
726 "raising exception in thread %ld.", (long)tso->id);
727
728 #if defined(PROFILING)
729 /*
730 * Debugging tool: on raising an exception, show where we are.
731 * See also Exception.cmm:raisezh_fast.
732 * This wasn't done for asynchronous exceptions originally; see #1450
733 */
734 if (RtsFlags.ProfFlags.showCCSOnException)
735 {
736 fprintCCS_stderr(tso->prof.CCCS);
737 }
738 #endif
739
740 // mark it dirty; we're about to change its stack.
741 dirty_TSO(cap, tso);
742
743 sp = tso->sp;
744
745 // ASSUMES: the thread is not already complete or dead. Upper
746 // layers should deal with that.
747 ASSERT(tso->what_next != ThreadComplete && tso->what_next != ThreadKilled);
748
749 if (stop_here != NULL) {
750 updatee = stop_here->updatee;
751 } else {
752 updatee = NULL;
753 }
754
755 // The stack freezing code assumes there's a closure pointer on
756 // the top of the stack, so we have to arrange that this is the case...
757 //
758 if (sp[0] == (W_)&stg_enter_info) {
759 sp++;
760 } else {
761 sp--;
762 sp[0] = (W_)&stg_dummy_ret_closure;
763 }
764
765 frame = sp + 1;
766 while (stop_here == NULL || frame < (StgPtr)stop_here) {
767
768 // 1. Let the top of the stack be the "current closure"
769 //
770 // 2. Walk up the stack until we find either an UPDATE_FRAME or a
771 // CATCH_FRAME.
772 //
773 // 3. If it's an UPDATE_FRAME, then make an AP_STACK containing the
774 // current closure applied to the chunk of stack up to (but not
775 // including) the update frame. This closure becomes the "current
776 // closure". Go back to step 2.
777 //
778 // 4. If it's a CATCH_FRAME, then leave the exception handler on
779 // top of the stack applied to the exception.
780 //
781 // 5. If it's a STOP_FRAME, then kill the thread.
782 //
783 // NB: if we pass an ATOMICALLY_FRAME then abort the associated
784 // transaction
785
786 info = get_ret_itbl((StgClosure *)frame);
787
788 switch (info->i.type) {
789
790 case UPDATE_FRAME:
791 {
792 StgAP_STACK * ap;
793 nat words;
794
795 // First build an AP_STACK consisting of the stack chunk above the
796 // current update frame, with the top word on the stack as the
797 // fun field.
798 //
799 words = frame - sp - 1;
800 ap = (StgAP_STACK *)allocateLocal(cap,AP_STACK_sizeW(words));
801
802 ap->size = words;
803 ap->fun = (StgClosure *)sp[0];
804 sp++;
805 for(i=0; i < (nat)words; ++i) {
806 ap->payload[i] = (StgClosure *)*sp++;
807 }
808
809 SET_HDR(ap,&stg_AP_STACK_info,
810 ((StgClosure *)frame)->header.prof.ccs /* ToDo */);
811 TICK_ALLOC_UP_THK(words+1,0);
812
813 //IF_DEBUG(scheduler,
814 // debugBelch("sched: Updating ");
815 // printPtr((P_)((StgUpdateFrame *)frame)->updatee);
816 // debugBelch(" with ");
817 // printObj((StgClosure *)ap);
818 // );
819
820 if (((StgUpdateFrame *)frame)->updatee == updatee) {
821 // If this update frame points to the same closure as
822 // the update frame further down the stack
823 // (stop_here), then don't perform the update. We
824 // want to keep the blackhole in this case, so we can
825 // detect and report the loop (#2783).
826 ap = (StgAP_STACK*)updatee;
827 } else {
828 // Perform the update
829 // TODO: this may waste some work, if the thunk has
830 // already been updated by another thread.
831 UPD_IND(((StgUpdateFrame *)frame)->updatee, (StgClosure *)ap);
832 }
833
834 sp += sizeofW(StgUpdateFrame) - 1;
835 sp[0] = (W_)ap; // push onto stack
836 frame = sp + 1;
837 continue; //no need to bump frame
838 }
839
840 case STOP_FRAME:
841 {
842 // We've stripped the entire stack, the thread is now dead.
843 tso->what_next = ThreadKilled;
844 tso->sp = frame + sizeofW(StgStopFrame);
845 return;
846 }
847
848 case CATCH_FRAME:
849 // If we find a CATCH_FRAME, and we've got an exception to raise,
850 // then build the THUNK raise(exception), and leave it on
851 // top of the CATCH_FRAME ready to enter.
852 //
853 {
854 #ifdef PROFILING
855 StgCatchFrame *cf = (StgCatchFrame *)frame;
856 #endif
857 StgThunk *raise;
858
859 if (exception == NULL) break;
860
861 // we've got an exception to raise, so let's pass it to the
862 // handler in this frame.
863 //
864 raise = (StgThunk *)allocateLocal(cap,sizeofW(StgThunk)+1);
865 TICK_ALLOC_SE_THK(1,0);
866 SET_HDR(raise,&stg_raise_info,cf->header.prof.ccs);
867 raise->payload[0] = exception;
868
869 // throw away the stack from Sp up to the CATCH_FRAME.
870 //
871 sp = frame - 1;
872
873 /* Ensure that async excpetions are blocked now, so we don't get
874 * a surprise exception before we get around to executing the
875 * handler.
876 */
877 tso->flags |= TSO_BLOCKEX | TSO_INTERRUPTIBLE;
878
879 /* Put the newly-built THUNK on top of the stack, ready to execute
880 * when the thread restarts.
881 */
882 sp[0] = (W_)raise;
883 sp[-1] = (W_)&stg_enter_info;
884 tso->sp = sp-1;
885 tso->what_next = ThreadRunGHC;
886 IF_DEBUG(sanity, checkTSO(tso));
887 return;
888 }
889
890 case ATOMICALLY_FRAME:
891 if (stop_at_atomically) {
892 ASSERT(stmGetEnclosingTRec(tso->trec) == NO_TREC);
893 stmCondemnTransaction(cap, tso -> trec);
894 tso->sp = frame;
895 tso->what_next = ThreadRunGHC;
896 return;
897 }
898 // Not stop_at_atomically... fall through and abort the
899 // transaction.
900
901 case CATCH_STM_FRAME:
902 case CATCH_RETRY_FRAME:
903 // IF we find an ATOMICALLY_FRAME then we abort the
904 // current transaction and propagate the exception. In
905 // this case (unlike ordinary exceptions) we do not care
906 // whether the transaction is valid or not because its
907 // possible validity cannot have caused the exception
908 // and will not be visible after the abort.
909
910 {
911 StgTRecHeader *trec = tso -> trec;
912 StgTRecHeader *outer = stmGetEnclosingTRec(trec);
913 debugTrace(DEBUG_stm,
914 "found atomically block delivering async exception");
915 stmAbortTransaction(cap, trec);
916 stmFreeAbortedTRec(cap, trec);
917 tso -> trec = outer;
918 break;
919 };
920
921 default:
922 break;
923 }
924
925 // move on to the next stack frame
926 frame += stack_frame_sizeW((StgClosure *)frame);
927 }
928
929 // if we got here, then we stopped at stop_here
930 ASSERT(stop_here != NULL);
931 }
932
933