Allow throwTo() to be called without a source thread
[ghc.git] / rts / RaiseAsync.c
1 /* ---------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 1998-2006
4 *
5 * Asynchronous exceptions
6 *
7 * --------------------------------------------------------------------------*/
8
9 #include "PosixSource.h"
10 #include "Rts.h"
11
12 #include "sm/Storage.h"
13 #include "Threads.h"
14 #include "Trace.h"
15 #include "RaiseAsync.h"
16 #include "Schedule.h"
17 #include "Updates.h"
18 #include "STM.h"
19 #include "sm/Sanity.h"
20 #include "Profiling.h"
21 #if defined(mingw32_HOST_OS)
22 #include "win32/IOManager.h"
23 #endif
24
25 static void raiseAsync (Capability *cap,
26 StgTSO *tso,
27 StgClosure *exception,
28 rtsBool stop_at_atomically,
29 StgUpdateFrame *stop_here);
30
31 static void removeFromQueues(Capability *cap, StgTSO *tso);
32
33 static void blockedThrowTo (Capability *cap, StgTSO *source, StgTSO *target);
34
35 static void performBlockedException (Capability *cap,
36 StgTSO *source, StgTSO *target);
37
38 /* -----------------------------------------------------------------------------
39 throwToSingleThreaded
40
41 This version of throwTo is safe to use if and only if one of the
42 following holds:
43
44 - !THREADED_RTS
45
46 - all the other threads in the system are stopped (eg. during GC).
47
48 - we surely own the target TSO (eg. we just took it from the
49 run queue of the current capability, or we are running it).
50
51 It doesn't cater for blocking the source thread until the exception
52 has been raised.
53 -------------------------------------------------------------------------- */
54
55 void
56 throwToSingleThreaded(Capability *cap, StgTSO *tso, StgClosure *exception)
57 {
58 throwToSingleThreaded_(cap, tso, exception, rtsFalse);
59 }
60
61 void
62 throwToSingleThreaded_(Capability *cap, StgTSO *tso, StgClosure *exception,
63 rtsBool stop_at_atomically)
64 {
65 // Thread already dead?
66 if (tso->what_next == ThreadComplete || tso->what_next == ThreadKilled) {
67 return;
68 }
69
70 // Remove it from any blocking queues
71 removeFromQueues(cap,tso);
72
73 raiseAsync(cap, tso, exception, stop_at_atomically, NULL);
74 }
75
76 void
77 suspendComputation(Capability *cap, StgTSO *tso, StgUpdateFrame *stop_here)
78 {
79 // Thread already dead?
80 if (tso->what_next == ThreadComplete || tso->what_next == ThreadKilled) {
81 return;
82 }
83
84 // Remove it from any blocking queues
85 removeFromQueues(cap,tso);
86
87 raiseAsync(cap, tso, NULL, rtsFalse, stop_here);
88 }
89
90 /* -----------------------------------------------------------------------------
91 throwTo
92
93 This function may be used to throw an exception from one thread to
94 another, during the course of normal execution. This is a tricky
95 task: the target thread might be running on another CPU, or it
96 may be blocked and could be woken up at any point by another CPU.
97 We have some delicate synchronisation to do.
98
99 There is a completely safe fallback scheme: it is always possible
100 to just block the source TSO on the target TSO's blocked_exceptions
101 queue. This queue is locked using lockTSO()/unlockTSO(). It is
102 checked at regular intervals: before and after running a thread
103 (schedule() and threadPaused() respectively), and just before GC
104 (scheduleDoGC()). Activating a thread on this queue should be done
105 using maybePerformBlockedException(): this is done in the context
106 of the target thread, so the exception can be raised eagerly.
107
108 This fallback scheme works even if the target thread is complete or
109 killed: scheduleDoGC() will discover the blocked thread before the
110 target is GC'd.
111
112 Blocking the source thread on the target thread's blocked_exception
113 queue is also employed when the target thread is currently blocking
114 exceptions (ie. inside Control.Exception.block).
115
116 We could use the safe fallback scheme exclusively, but that
117 wouldn't be ideal: most calls to throwTo would block immediately,
118 possibly until the next GC, which might require the deadlock
119 detection mechanism to kick in. So we try to provide promptness
120 wherever possible.
121
122 We can promptly deliver the exception if the target thread is:
123
124 - runnable, on the same Capability as the source thread (because
125 we own the run queue and therefore the target thread).
126
127 - blocked, and we can obtain exclusive access to it. Obtaining
128 exclusive access to the thread depends on how it is blocked.
129
130 We must also be careful to not trip over threadStackOverflow(),
131 which might be moving the TSO to enlarge its stack.
132 lockTSO()/unlockTSO() are used here too.
133
134 Returns:
135
136 THROWTO_SUCCESS exception was raised, ok to continue
137
138 THROWTO_BLOCKED exception was not raised; block the source
139 thread then call throwToReleaseTarget() when
140 the source thread is properly tidied away.
141
142 -------------------------------------------------------------------------- */
143
144 nat
145 throwTo (Capability *cap, // the Capability we hold
146 StgTSO *source, // the TSO sending the exception (or NULL)
147 StgTSO *target, // the TSO receiving the exception
148 StgClosure *exception, // the exception closure
149 /*[out]*/ void **out USED_IF_THREADS)
150 {
151 StgWord status;
152
153 ASSERT(target != END_TSO_QUEUE);
154
155 // follow ThreadRelocated links in the target first
156 while (target->what_next == ThreadRelocated) {
157 target = target->_link;
158 // No, it might be a WHITEHOLE:
159 // ASSERT(get_itbl(target)->type == TSO);
160 }
161
162 if (source != NULL) {
163 debugTrace(DEBUG_sched, "throwTo: from thread %lu to thread %lu",
164 (unsigned long)source->id, (unsigned long)target->id);
165 } else {
166 debugTrace(DEBUG_sched, "throwTo: from RTS to thread %lu",
167 (unsigned long)target->id);
168 }
169
170 #ifdef DEBUG
171 traceThreadStatus(DEBUG_sched, target);
172 #endif
173
174 goto check_target;
175 retry:
176 debugTrace(DEBUG_sched, "throwTo: retrying...");
177
178 check_target:
179 ASSERT(target != END_TSO_QUEUE);
180
181 // Thread already dead?
182 if (target->what_next == ThreadComplete
183 || target->what_next == ThreadKilled) {
184 return THROWTO_SUCCESS;
185 }
186
187 status = target->why_blocked;
188
189 switch (status) {
190 case NotBlocked:
191 /* if status==NotBlocked, and target->cap == cap, then
192 we own this TSO and can raise the exception.
193
194 How do we establish this condition? Very carefully.
195
196 Let
197 P = (status == NotBlocked)
198 Q = (tso->cap == cap)
199
200 Now, if P & Q are true, then the TSO is locked and owned by
201 this capability. No other OS thread can steal it.
202
203 If P==0 and Q==1: the TSO is blocked, but attached to this
204 capabilty, and it can be stolen by another capability.
205
206 If P==1 and Q==0: the TSO is runnable on another
207 capability. At any time, the TSO may change from runnable
208 to blocked and vice versa, while it remains owned by
209 another capability.
210
211 Suppose we test like this:
212
213 p = P
214 q = Q
215 if (p && q) ...
216
217 this is defeated by another capability stealing a blocked
218 TSO from us to wake it up (Schedule.c:unblockOne()). The
219 other thread is doing
220
221 Q = 0
222 P = 1
223
224 assuming arbitrary reordering, we could see this
225 interleaving:
226
227 start: P==0 && Q==1
228 P = 1
229 p = P
230 q = Q
231 Q = 0
232 if (p && q) ...
233
234 so we need a memory barrier:
235
236 p = P
237 mb()
238 q = Q
239 if (p && q) ...
240
241 this avoids the problematic case. There are other cases
242 to consider, but this is the tricky one.
243
244 Note that we must be sure that unblockOne() does the
245 writes in the correct order: Q before P. The memory
246 barrier ensures that if we have seen the write to P, we
247 have also seen the write to Q.
248 */
249 {
250 Capability *target_cap;
251
252 write_barrier();
253 target_cap = target->cap;
254 if (target_cap == cap && (target->flags & TSO_BLOCKEX) == 0) {
255 // It's on our run queue and not blocking exceptions
256 raiseAsync(cap, target, exception, rtsFalse, NULL);
257 return THROWTO_SUCCESS;
258 } else {
259 // Otherwise, just block on the blocked_exceptions queue
260 // of the target thread. The queue will get looked at
261 // soon enough: it is checked before and after running a
262 // thread, and during GC.
263 lockTSO(target);
264
265 // Avoid race with threadStackOverflow, which may have
266 // just moved this TSO.
267 if (target->what_next == ThreadRelocated) {
268 unlockTSO(target);
269 target = target->_link;
270 goto retry;
271 }
272 // check again for ThreadComplete and ThreadKilled. This
273 // cooperates with scheduleHandleThreadFinished to ensure
274 // that we never miss any threads that are throwing an
275 // exception to a thread in the process of terminating.
276 if (target->what_next == ThreadComplete
277 || target->what_next == ThreadKilled) {
278 unlockTSO(target);
279 return THROWTO_SUCCESS;
280 }
281 blockedThrowTo(cap,source,target);
282 *out = target;
283 return THROWTO_BLOCKED;
284 }
285 }
286
287 case BlockedOnMVar:
288 {
289 /*
290 To establish ownership of this TSO, we need to acquire a
291 lock on the MVar that it is blocked on.
292 */
293 StgMVar *mvar;
294 StgInfoTable *info USED_IF_THREADS;
295
296 mvar = (StgMVar *)target->block_info.closure;
297
298 // ASSUMPTION: tso->block_info must always point to a
299 // closure. In the threaded RTS it does.
300 switch (get_itbl(mvar)->type) {
301 case MVAR_CLEAN:
302 case MVAR_DIRTY:
303 break;
304 default:
305 goto retry;
306 }
307
308 info = lockClosure((StgClosure *)mvar);
309
310 if (target->what_next == ThreadRelocated) {
311 target = target->_link;
312 unlockClosure((StgClosure *)mvar,info);
313 goto retry;
314 }
315 // we have the MVar, let's check whether the thread
316 // is still blocked on the same MVar.
317 if (target->why_blocked != BlockedOnMVar
318 || (StgMVar *)target->block_info.closure != mvar) {
319 unlockClosure((StgClosure *)mvar, info);
320 goto retry;
321 }
322
323 if ((target->flags & TSO_BLOCKEX) &&
324 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
325 lockClosure((StgClosure *)target);
326 blockedThrowTo(cap,source,target);
327 unlockClosure((StgClosure *)mvar, info);
328 *out = target;
329 return THROWTO_BLOCKED; // caller releases TSO
330 } else {
331 removeThreadFromMVarQueue(cap, mvar, target);
332 raiseAsync(cap, target, exception, rtsFalse, NULL);
333 unblockOne(cap, target);
334 unlockClosure((StgClosure *)mvar, info);
335 return THROWTO_SUCCESS;
336 }
337 }
338
339 case BlockedOnBlackHole:
340 {
341 ACQUIRE_LOCK(&sched_mutex);
342 // double checking the status after the memory barrier:
343 if (target->why_blocked != BlockedOnBlackHole) {
344 RELEASE_LOCK(&sched_mutex);
345 goto retry;
346 }
347
348 if (target->flags & TSO_BLOCKEX) {
349 lockTSO(target);
350 blockedThrowTo(cap,source,target);
351 RELEASE_LOCK(&sched_mutex);
352 *out = target;
353 return THROWTO_BLOCKED; // caller releases TSO
354 } else {
355 removeThreadFromQueue(cap, &blackhole_queue, target);
356 raiseAsync(cap, target, exception, rtsFalse, NULL);
357 unblockOne(cap, target);
358 RELEASE_LOCK(&sched_mutex);
359 return THROWTO_SUCCESS;
360 }
361 }
362
363 case BlockedOnException:
364 {
365 StgTSO *target2;
366 StgInfoTable *info;
367
368 /*
369 To obtain exclusive access to a BlockedOnException thread,
370 we must call lockClosure() on the TSO on which it is blocked.
371 Since the TSO might change underneath our feet, after we
372 call lockClosure() we must check that
373
374 (a) the closure we locked is actually a TSO
375 (b) the original thread is still BlockedOnException,
376 (c) the original thread is still blocked on the TSO we locked
377 and (d) the target thread has not been relocated.
378
379 We synchronise with threadStackOverflow() (which relocates
380 threads) using lockClosure()/unlockClosure().
381 */
382 target2 = target->block_info.tso;
383
384 info = lockClosure((StgClosure *)target2);
385 if (info != &stg_TSO_info) {
386 unlockClosure((StgClosure *)target2, info);
387 goto retry;
388 }
389 if (target->what_next == ThreadRelocated) {
390 target = target->_link;
391 unlockTSO(target2);
392 goto retry;
393 }
394 if (target2->what_next == ThreadRelocated) {
395 target->block_info.tso = target2->_link;
396 unlockTSO(target2);
397 goto retry;
398 }
399 if (target->why_blocked != BlockedOnException
400 || target->block_info.tso != target2) {
401 unlockTSO(target2);
402 goto retry;
403 }
404
405 /*
406 Now we have exclusive rights to the target TSO...
407
408 If it is blocking exceptions, add the source TSO to its
409 blocked_exceptions queue. Otherwise, raise the exception.
410 */
411 if ((target->flags & TSO_BLOCKEX) &&
412 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
413 lockTSO(target);
414 blockedThrowTo(cap,source,target);
415 unlockTSO(target2);
416 *out = target;
417 return THROWTO_BLOCKED;
418 } else {
419 removeThreadFromQueue(cap, &target2->blocked_exceptions, target);
420 raiseAsync(cap, target, exception, rtsFalse, NULL);
421 unblockOne(cap, target);
422 unlockTSO(target2);
423 return THROWTO_SUCCESS;
424 }
425 }
426
427 case BlockedOnSTM:
428 lockTSO(target);
429 // Unblocking BlockedOnSTM threads requires the TSO to be
430 // locked; see STM.c:unpark_tso().
431 if (target->why_blocked != BlockedOnSTM) {
432 unlockTSO(target);
433 goto retry;
434 }
435 if ((target->flags & TSO_BLOCKEX) &&
436 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
437 blockedThrowTo(cap,source,target);
438 *out = target;
439 return THROWTO_BLOCKED;
440 } else {
441 raiseAsync(cap, target, exception, rtsFalse, NULL);
442 unblockOne(cap, target);
443 unlockTSO(target);
444 return THROWTO_SUCCESS;
445 }
446
447 case BlockedOnCCall:
448 case BlockedOnCCall_NoUnblockExc:
449 // I don't think it's possible to acquire ownership of a
450 // BlockedOnCCall thread. We just assume that the target
451 // thread is blocking exceptions, and block on its
452 // blocked_exception queue.
453 lockTSO(target);
454 if (target->why_blocked != BlockedOnCCall &&
455 target->why_blocked != BlockedOnCCall_NoUnblockExc) {
456 unlockTSO(target);
457 goto retry;
458 }
459 blockedThrowTo(cap,source,target);
460 *out = target;
461 return THROWTO_BLOCKED;
462
463 #ifndef THREADEDED_RTS
464 case BlockedOnRead:
465 case BlockedOnWrite:
466 case BlockedOnDelay:
467 #if defined(mingw32_HOST_OS)
468 case BlockedOnDoProc:
469 #endif
470 if ((target->flags & TSO_BLOCKEX) &&
471 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
472 blockedThrowTo(cap,source,target);
473 return THROWTO_BLOCKED;
474 } else {
475 removeFromQueues(cap,target);
476 raiseAsync(cap, target, exception, rtsFalse, NULL);
477 return THROWTO_SUCCESS;
478 }
479 #endif
480
481 default:
482 barf("throwTo: unrecognised why_blocked value");
483 }
484 barf("throwTo");
485 }
486
487 // Block a TSO on another TSO's blocked_exceptions queue.
488 // Precondition: we hold an exclusive lock on the target TSO (this is
489 // complex to achieve as there's no single lock on a TSO; see
490 // throwTo()).
491 static void
492 blockedThrowTo (Capability *cap, StgTSO *source, StgTSO *target)
493 {
494 if (source != NULL) {
495 debugTrace(DEBUG_sched, "throwTo: blocking on thread %lu", (unsigned long)target->id);
496 setTSOLink(cap, source, target->blocked_exceptions);
497 target->blocked_exceptions = source;
498 dirty_TSO(cap,target); // we modified the blocked_exceptions queue
499
500 source->block_info.tso = target;
501 write_barrier(); // throwTo_exception *must* be visible if BlockedOnException is.
502 source->why_blocked = BlockedOnException;
503 }
504 }
505
506
507 #ifdef THREADED_RTS
508 void
509 throwToReleaseTarget (void *tso)
510 {
511 unlockTSO((StgTSO *)tso);
512 }
513 #endif
514
515 /* -----------------------------------------------------------------------------
516 Waking up threads blocked in throwTo
517
518 There are two ways to do this: maybePerformBlockedException() will
519 perform the throwTo() for the thread at the head of the queue
520 immediately, and leave the other threads on the queue.
521 maybePerformBlockedException() also checks the TSO_BLOCKEX flag
522 before raising an exception.
523
524 awakenBlockedExceptionQueue() will wake up all the threads in the
525 queue, but not perform any throwTo() immediately. This might be
526 more appropriate when the target thread is the one actually running
527 (see Exception.cmm).
528
529 Returns: non-zero if an exception was raised, zero otherwise.
530 -------------------------------------------------------------------------- */
531
532 int
533 maybePerformBlockedException (Capability *cap, StgTSO *tso)
534 {
535 StgTSO *source;
536
537 if (tso->what_next == ThreadComplete || tso->what_next == ThreadFinished) {
538 if (tso->blocked_exceptions != END_TSO_QUEUE) {
539 awakenBlockedExceptionQueue(cap,tso);
540 return 1;
541 } else {
542 return 0;
543 }
544 }
545
546 if (tso->blocked_exceptions != END_TSO_QUEUE &&
547 (tso->flags & TSO_BLOCKEX) != 0) {
548 debugTrace(DEBUG_sched, "throwTo: thread %lu has blocked exceptions but is inside block", (unsigned long)tso->id);
549 }
550
551 if (tso->blocked_exceptions != END_TSO_QUEUE
552 && ((tso->flags & TSO_BLOCKEX) == 0
553 || ((tso->flags & TSO_INTERRUPTIBLE) && interruptible(tso)))) {
554
555 // Lock the TSO, this gives us exclusive access to the queue
556 lockTSO(tso);
557
558 // Check the queue again; it might have changed before we
559 // locked it.
560 if (tso->blocked_exceptions == END_TSO_QUEUE) {
561 unlockTSO(tso);
562 return 0;
563 }
564
565 // We unblock just the first thread on the queue, and perform
566 // its throw immediately.
567 source = tso->blocked_exceptions;
568 performBlockedException(cap, source, tso);
569 tso->blocked_exceptions = unblockOne_(cap, source,
570 rtsFalse/*no migrate*/);
571 unlockTSO(tso);
572 return 1;
573 }
574 return 0;
575 }
576
577 // awakenBlockedExceptionQueue(): Just wake up the whole queue of
578 // blocked exceptions and let them try again.
579
580 void
581 awakenBlockedExceptionQueue (Capability *cap, StgTSO *tso)
582 {
583 lockTSO(tso);
584 awakenBlockedQueue(cap, tso->blocked_exceptions);
585 tso->blocked_exceptions = END_TSO_QUEUE;
586 unlockTSO(tso);
587 }
588
589 static void
590 performBlockedException (Capability *cap, StgTSO *source, StgTSO *target)
591 {
592 StgClosure *exception;
593
594 ASSERT(source->why_blocked == BlockedOnException);
595 ASSERT(source->block_info.tso->id == target->id);
596 ASSERT(source->sp[0] == (StgWord)&stg_block_throwto_info);
597 ASSERT(((StgTSO *)source->sp[1])->id == target->id);
598 // check ids not pointers, because the thread might be relocated
599
600 exception = (StgClosure *)source->sp[2];
601 throwToSingleThreaded(cap, target, exception);
602 source->sp += 3;
603 }
604
605 /* -----------------------------------------------------------------------------
606 Remove a thread from blocking queues.
607
608 This is for use when we raise an exception in another thread, which
609 may be blocked.
610
611 Precondition: we have exclusive access to the TSO, via the same set
612 of conditions as throwToSingleThreaded() (c.f.).
613 -------------------------------------------------------------------------- */
614
615 static void
616 removeFromQueues(Capability *cap, StgTSO *tso)
617 {
618 switch (tso->why_blocked) {
619
620 case NotBlocked:
621 return;
622
623 case BlockedOnSTM:
624 // Be careful: nothing to do here! We tell the scheduler that the
625 // thread is runnable and we leave it to the stack-walking code to
626 // abort the transaction while unwinding the stack. We should
627 // perhaps have a debugging test to make sure that this really
628 // happens and that the 'zombie' transaction does not get
629 // committed.
630 goto done;
631
632 case BlockedOnMVar:
633 removeThreadFromMVarQueue(cap, (StgMVar *)tso->block_info.closure, tso);
634 goto done;
635
636 case BlockedOnBlackHole:
637 removeThreadFromQueue(cap, &blackhole_queue, tso);
638 goto done;
639
640 case BlockedOnException:
641 {
642 StgTSO *target = tso->block_info.tso;
643
644 // NO: when called by threadPaused(), we probably have this
645 // TSO already locked (WHITEHOLEd) because we just placed
646 // ourselves on its queue.
647 // ASSERT(get_itbl(target)->type == TSO);
648
649 while (target->what_next == ThreadRelocated) {
650 target = target->_link;
651 }
652
653 removeThreadFromQueue(cap, &target->blocked_exceptions, tso);
654 goto done;
655 }
656
657 #if !defined(THREADED_RTS)
658 case BlockedOnRead:
659 case BlockedOnWrite:
660 #if defined(mingw32_HOST_OS)
661 case BlockedOnDoProc:
662 #endif
663 removeThreadFromDeQueue(cap, &blocked_queue_hd, &blocked_queue_tl, tso);
664 #if defined(mingw32_HOST_OS)
665 /* (Cooperatively) signal that the worker thread should abort
666 * the request.
667 */
668 abandonWorkRequest(tso->block_info.async_result->reqID);
669 #endif
670 goto done;
671
672 case BlockedOnDelay:
673 removeThreadFromQueue(cap, &sleeping_queue, tso);
674 goto done;
675 #endif
676
677 default:
678 barf("removeFromQueues: %d", tso->why_blocked);
679 }
680
681 done:
682 unblockOne(cap, tso);
683 }
684
685 /* -----------------------------------------------------------------------------
686 * raiseAsync()
687 *
688 * The following function implements the magic for raising an
689 * asynchronous exception in an existing thread.
690 *
691 * We first remove the thread from any queue on which it might be
692 * blocked. The possible blockages are MVARs and BLACKHOLE_BQs.
693 *
694 * We strip the stack down to the innermost CATCH_FRAME, building
695 * thunks in the heap for all the active computations, so they can
696 * be restarted if necessary. When we reach a CATCH_FRAME, we build
697 * an application of the handler to the exception, and push it on
698 * the top of the stack.
699 *
700 * How exactly do we save all the active computations? We create an
701 * AP_STACK for every UpdateFrame on the stack. Entering one of these
702 * AP_STACKs pushes everything from the corresponding update frame
703 * upwards onto the stack. (Actually, it pushes everything up to the
704 * next update frame plus a pointer to the next AP_STACK object.
705 * Entering the next AP_STACK object pushes more onto the stack until we
706 * reach the last AP_STACK object - at which point the stack should look
707 * exactly as it did when we killed the TSO and we can continue
708 * execution by entering the closure on top of the stack.
709 *
710 * We can also kill a thread entirely - this happens if either (a) the
711 * exception passed to raiseAsync is NULL, or (b) there's no
712 * CATCH_FRAME on the stack. In either case, we strip the entire
713 * stack and replace the thread with a zombie.
714 *
715 * ToDo: in THREADED_RTS mode, this function is only safe if either
716 * (a) we hold all the Capabilities (eg. in GC, or if there is only
717 * one Capability), or (b) we own the Capability that the TSO is
718 * currently blocked on or on the run queue of.
719 *
720 * -------------------------------------------------------------------------- */
721
722 static void
723 raiseAsync(Capability *cap, StgTSO *tso, StgClosure *exception,
724 rtsBool stop_at_atomically, StgUpdateFrame *stop_here)
725 {
726 StgRetInfoTable *info;
727 StgPtr sp, frame;
728 StgClosure *updatee;
729 nat i;
730
731 debugTrace(DEBUG_sched,
732 "raising exception in thread %ld.", (long)tso->id);
733
734 #if defined(PROFILING)
735 /*
736 * Debugging tool: on raising an exception, show where we are.
737 * See also Exception.cmm:stg_raisezh.
738 * This wasn't done for asynchronous exceptions originally; see #1450
739 */
740 if (RtsFlags.ProfFlags.showCCSOnException)
741 {
742 fprintCCS_stderr(tso->prof.CCCS);
743 }
744 #endif
745
746 // mark it dirty; we're about to change its stack.
747 dirty_TSO(cap, tso);
748
749 sp = tso->sp;
750
751 // ASSUMES: the thread is not already complete or dead. Upper
752 // layers should deal with that.
753 ASSERT(tso->what_next != ThreadComplete && tso->what_next != ThreadKilled);
754
755 if (stop_here != NULL) {
756 updatee = stop_here->updatee;
757 } else {
758 updatee = NULL;
759 }
760
761 // The stack freezing code assumes there's a closure pointer on
762 // the top of the stack, so we have to arrange that this is the case...
763 //
764 if (sp[0] == (W_)&stg_enter_info) {
765 sp++;
766 } else {
767 sp--;
768 sp[0] = (W_)&stg_dummy_ret_closure;
769 }
770
771 frame = sp + 1;
772 while (stop_here == NULL || frame < (StgPtr)stop_here) {
773
774 // 1. Let the top of the stack be the "current closure"
775 //
776 // 2. Walk up the stack until we find either an UPDATE_FRAME or a
777 // CATCH_FRAME.
778 //
779 // 3. If it's an UPDATE_FRAME, then make an AP_STACK containing the
780 // current closure applied to the chunk of stack up to (but not
781 // including) the update frame. This closure becomes the "current
782 // closure". Go back to step 2.
783 //
784 // 4. If it's a CATCH_FRAME, then leave the exception handler on
785 // top of the stack applied to the exception.
786 //
787 // 5. If it's a STOP_FRAME, then kill the thread.
788 //
789 // NB: if we pass an ATOMICALLY_FRAME then abort the associated
790 // transaction
791
792 info = get_ret_itbl((StgClosure *)frame);
793
794 switch (info->i.type) {
795
796 case UPDATE_FRAME:
797 {
798 StgAP_STACK * ap;
799 nat words;
800
801 // First build an AP_STACK consisting of the stack chunk above the
802 // current update frame, with the top word on the stack as the
803 // fun field.
804 //
805 words = frame - sp - 1;
806 ap = (StgAP_STACK *)allocate(cap,AP_STACK_sizeW(words));
807
808 ap->size = words;
809 ap->fun = (StgClosure *)sp[0];
810 sp++;
811 for(i=0; i < (nat)words; ++i) {
812 ap->payload[i] = (StgClosure *)*sp++;
813 }
814
815 SET_HDR(ap,&stg_AP_STACK_info,
816 ((StgClosure *)frame)->header.prof.ccs /* ToDo */);
817 TICK_ALLOC_UP_THK(words+1,0);
818
819 //IF_DEBUG(scheduler,
820 // debugBelch("sched: Updating ");
821 // printPtr((P_)((StgUpdateFrame *)frame)->updatee);
822 // debugBelch(" with ");
823 // printObj((StgClosure *)ap);
824 // );
825
826 if (((StgUpdateFrame *)frame)->updatee == updatee) {
827 // If this update frame points to the same closure as
828 // the update frame further down the stack
829 // (stop_here), then don't perform the update. We
830 // want to keep the blackhole in this case, so we can
831 // detect and report the loop (#2783).
832 ap = (StgAP_STACK*)updatee;
833 } else {
834 // Perform the update
835 // TODO: this may waste some work, if the thunk has
836 // already been updated by another thread.
837 UPD_IND(((StgUpdateFrame *)frame)->updatee, (StgClosure *)ap);
838 }
839
840 sp += sizeofW(StgUpdateFrame) - 1;
841 sp[0] = (W_)ap; // push onto stack
842 frame = sp + 1;
843 continue; //no need to bump frame
844 }
845
846 case STOP_FRAME:
847 {
848 // We've stripped the entire stack, the thread is now dead.
849 tso->what_next = ThreadKilled;
850 tso->sp = frame + sizeofW(StgStopFrame);
851 return;
852 }
853
854 case CATCH_FRAME:
855 // If we find a CATCH_FRAME, and we've got an exception to raise,
856 // then build the THUNK raise(exception), and leave it on
857 // top of the CATCH_FRAME ready to enter.
858 //
859 {
860 #ifdef PROFILING
861 StgCatchFrame *cf = (StgCatchFrame *)frame;
862 #endif
863 StgThunk *raise;
864
865 if (exception == NULL) break;
866
867 // we've got an exception to raise, so let's pass it to the
868 // handler in this frame.
869 //
870 raise = (StgThunk *)allocate(cap,sizeofW(StgThunk)+1);
871 TICK_ALLOC_SE_THK(1,0);
872 SET_HDR(raise,&stg_raise_info,cf->header.prof.ccs);
873 raise->payload[0] = exception;
874
875 // throw away the stack from Sp up to the CATCH_FRAME.
876 //
877 sp = frame - 1;
878
879 /* Ensure that async excpetions are blocked now, so we don't get
880 * a surprise exception before we get around to executing the
881 * handler.
882 */
883 tso->flags |= TSO_BLOCKEX | TSO_INTERRUPTIBLE;
884
885 /* Put the newly-built THUNK on top of the stack, ready to execute
886 * when the thread restarts.
887 */
888 sp[0] = (W_)raise;
889 sp[-1] = (W_)&stg_enter_info;
890 tso->sp = sp-1;
891 tso->what_next = ThreadRunGHC;
892 IF_DEBUG(sanity, checkTSO(tso));
893 return;
894 }
895
896 case ATOMICALLY_FRAME:
897 if (stop_at_atomically) {
898 ASSERT(tso->trec->enclosing_trec == NO_TREC);
899 stmCondemnTransaction(cap, tso -> trec);
900 tso->sp = frame - 2;
901 // The ATOMICALLY_FRAME expects to be returned a
902 // result from the transaction, which it stores in the
903 // stack frame. Hence we arrange to return a dummy
904 // result, so that the GC doesn't get upset (#3578).
905 // Perhaps a better way would be to have a different
906 // ATOMICALLY_FRAME instance for condemned
907 // transactions, but I don't fully understand the
908 // interaction with STM invariants.
909 tso->sp[1] = (W_)&stg_NO_TREC_closure;
910 tso->sp[0] = (W_)&stg_gc_unpt_r1_info;
911 tso->what_next = ThreadRunGHC;
912 return;
913 }
914 // Not stop_at_atomically... fall through and abort the
915 // transaction.
916
917 case CATCH_STM_FRAME:
918 case CATCH_RETRY_FRAME:
919 // IF we find an ATOMICALLY_FRAME then we abort the
920 // current transaction and propagate the exception. In
921 // this case (unlike ordinary exceptions) we do not care
922 // whether the transaction is valid or not because its
923 // possible validity cannot have caused the exception
924 // and will not be visible after the abort.
925
926 {
927 StgTRecHeader *trec = tso -> trec;
928 StgTRecHeader *outer = trec -> enclosing_trec;
929 debugTrace(DEBUG_stm,
930 "found atomically block delivering async exception");
931 stmAbortTransaction(cap, trec);
932 stmFreeAbortedTRec(cap, trec);
933 tso -> trec = outer;
934 break;
935 };
936
937 default:
938 break;
939 }
940
941 // move on to the next stack frame
942 frame += stack_frame_sizeW((StgClosure *)frame);
943 }
944
945 // if we got here, then we stopped at stop_here
946 ASSERT(stop_here != NULL);
947 }
948
949