Use message-passing to implement throwTo in the RTS
[ghc.git] / rts / RaiseAsync.c
1 /* ---------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 1998-2006
4 *
5 * Asynchronous exceptions
6 *
7 * --------------------------------------------------------------------------*/
8
9 #include "PosixSource.h"
10 #include "Rts.h"
11
12 #include "sm/Storage.h"
13 #include "Threads.h"
14 #include "Trace.h"
15 #include "RaiseAsync.h"
16 #include "Schedule.h"
17 #include "Updates.h"
18 #include "STM.h"
19 #include "sm/Sanity.h"
20 #include "Profiling.h"
21 #if defined(mingw32_HOST_OS)
22 #include "win32/IOManager.h"
23 #endif
24
25 static void raiseAsync (Capability *cap,
26 StgTSO *tso,
27 StgClosure *exception,
28 rtsBool stop_at_atomically,
29 StgUpdateFrame *stop_here);
30
31 static void removeFromQueues(Capability *cap, StgTSO *tso);
32
33 static void blockedThrowTo (Capability *cap,
34 StgTSO *target, MessageThrowTo *msg);
35
36 static void throwToSendMsg (Capability *cap USED_IF_THREADS,
37 Capability *target_cap USED_IF_THREADS,
38 MessageThrowTo *msg USED_IF_THREADS);
39
40 static void performBlockedException (Capability *cap, MessageThrowTo *msg);
41
42 /* -----------------------------------------------------------------------------
43 throwToSingleThreaded
44
45 This version of throwTo is safe to use if and only if one of the
46 following holds:
47
48 - !THREADED_RTS
49
50 - all the other threads in the system are stopped (eg. during GC).
51
52 - we surely own the target TSO (eg. we just took it from the
53 run queue of the current capability, or we are running it).
54
55 It doesn't cater for blocking the source thread until the exception
56 has been raised.
57 -------------------------------------------------------------------------- */
58
59 void
60 throwToSingleThreaded(Capability *cap, StgTSO *tso, StgClosure *exception)
61 {
62 throwToSingleThreaded_(cap, tso, exception, rtsFalse);
63 }
64
65 void
66 throwToSingleThreaded_(Capability *cap, StgTSO *tso, StgClosure *exception,
67 rtsBool stop_at_atomically)
68 {
69 // Thread already dead?
70 if (tso->what_next == ThreadComplete || tso->what_next == ThreadKilled) {
71 return;
72 }
73
74 // Remove it from any blocking queues
75 removeFromQueues(cap,tso);
76
77 raiseAsync(cap, tso, exception, stop_at_atomically, NULL);
78 }
79
80 void
81 suspendComputation(Capability *cap, StgTSO *tso, StgUpdateFrame *stop_here)
82 {
83 // Thread already dead?
84 if (tso->what_next == ThreadComplete || tso->what_next == ThreadKilled) {
85 return;
86 }
87
88 // Remove it from any blocking queues
89 removeFromQueues(cap,tso);
90
91 raiseAsync(cap, tso, NULL, rtsFalse, stop_here);
92 }
93
94 /* -----------------------------------------------------------------------------
95 throwTo
96
97 This function may be used to throw an exception from one thread to
98 another, during the course of normal execution. This is a tricky
99 task: the target thread might be running on another CPU, or it
100 may be blocked and could be woken up at any point by another CPU.
101 We have some delicate synchronisation to do.
102
103 The underlying scheme when multiple Capabilities are in use is
104 message passing: when the target of a throwTo is on another
105 Capability, we send a message (a MessageThrowTo closure) to that
106 Capability.
107
108 If the throwTo needs to block because the target TSO is masking
109 exceptions (the TSO_BLOCKEX flag), then the message is placed on
110 the blocked_exceptions queue attached to the target TSO. When the
111 target TSO enters the unmasked state again, it must check the
112 queue. The blocked_exceptions queue is not locked; only the
113 Capability owning the TSO may modify it.
114
115 To make things simpler for throwTo, we always create the message
116 first before deciding what to do. The message may get sent, or it
117 may get attached to a TSO's blocked_exceptions queue, or the
118 exception may get thrown immediately and the message dropped,
119 depending on the current state of the target.
120
121 Currently we send a message if the target belongs to another
122 Capability, and it is
123
124 - NotBlocked, BlockedOnMsgWakeup, BlockedOnMsgThrowTo,
125 BlockedOnCCall
126
127 - or it is masking exceptions (TSO_BLOCKEX)
128
129 Currently, if the target is BlockedOnMVar, BlockedOnSTM, or
130 BlockedOnBlackHole then we acquire ownership of the TSO by locking
131 its parent container (e.g. the MVar) and then raise the exception.
132 We might change these cases to be more message-passing-like in the
133 future.
134
135 Returns:
136
137 NULL exception was raised, ok to continue
138
139 MessageThrowTo * exception was not raised; the source TSO
140 should now put itself in the state
141 BlockedOnMsgThrowTo, and when it is ready
142 it should unlock the mssage using
143 unlockClosure(msg, &stg_MSG_THROWTO_info);
144 If it decides not to raise the exception after
145 all, it can revoke it safely with
146 unlockClosure(msg, &stg_IND_info);
147
148 -------------------------------------------------------------------------- */
149
150 MessageThrowTo *
151 throwTo (Capability *cap, // the Capability we hold
152 StgTSO *source, // the TSO sending the exception (or NULL)
153 StgTSO *target, // the TSO receiving the exception
154 StgClosure *exception) // the exception closure
155 {
156 MessageThrowTo *msg;
157
158 msg = (MessageThrowTo *) allocate(cap, sizeofW(MessageThrowTo));
159 // message starts locked; the caller has to unlock it when it is
160 // ready.
161 msg->header.info = &stg_WHITEHOLE_info;
162 msg->source = source;
163 msg->target = target;
164 msg->exception = exception;
165
166 switch (throwToMsg(cap, msg))
167 {
168 case THROWTO_SUCCESS:
169 return NULL;
170 case THROWTO_BLOCKED:
171 default:
172 return msg;
173 }
174 }
175
176
177 nat
178 throwToMsg (Capability *cap, MessageThrowTo *msg)
179 {
180 StgWord status;
181 StgTSO *target = msg->target;
182
183 ASSERT(target != END_TSO_QUEUE);
184
185 // follow ThreadRelocated links in the target first
186 while (target->what_next == ThreadRelocated) {
187 target = target->_link;
188 // No, it might be a WHITEHOLE:
189 // ASSERT(get_itbl(target)->type == TSO);
190 }
191
192 debugTraceCap(DEBUG_sched, cap,
193 "throwTo: from thread %lu to thread %lu",
194 (unsigned long)msg->source->id,
195 (unsigned long)msg->target->id);
196
197 #ifdef DEBUG
198 traceThreadStatus(DEBUG_sched, target);
199 #endif
200
201 goto check_target;
202 retry:
203 write_barrier();
204 debugTrace(DEBUG_sched, "throwTo: retrying...");
205
206 check_target:
207 ASSERT(target != END_TSO_QUEUE);
208
209 // Thread already dead?
210 if (target->what_next == ThreadComplete
211 || target->what_next == ThreadKilled) {
212 return THROWTO_SUCCESS;
213 }
214
215 status = target->why_blocked;
216
217 switch (status) {
218 case NotBlocked:
219 case BlockedOnMsgWakeup:
220 /* if status==NotBlocked, and target->cap == cap, then
221 we own this TSO and can raise the exception.
222
223 How do we establish this condition? Very carefully.
224
225 Let
226 P = (status == NotBlocked)
227 Q = (tso->cap == cap)
228
229 Now, if P & Q are true, then the TSO is locked and owned by
230 this capability. No other OS thread can steal it.
231
232 If P==0 and Q==1: the TSO is blocked, but attached to this
233 capabilty, and it can be stolen by another capability.
234
235 If P==1 and Q==0: the TSO is runnable on another
236 capability. At any time, the TSO may change from runnable
237 to blocked and vice versa, while it remains owned by
238 another capability.
239
240 Suppose we test like this:
241
242 p = P
243 q = Q
244 if (p && q) ...
245
246 this is defeated by another capability stealing a blocked
247 TSO from us to wake it up (Schedule.c:unblockOne()). The
248 other thread is doing
249
250 Q = 0
251 P = 1
252
253 assuming arbitrary reordering, we could see this
254 interleaving:
255
256 start: P==0 && Q==1
257 P = 1
258 p = P
259 q = Q
260 Q = 0
261 if (p && q) ...
262
263 so we need a memory barrier:
264
265 p = P
266 mb()
267 q = Q
268 if (p && q) ...
269
270 this avoids the problematic case. There are other cases
271 to consider, but this is the tricky one.
272
273 Note that we must be sure that unblockOne() does the
274 writes in the correct order: Q before P. The memory
275 barrier ensures that if we have seen the write to P, we
276 have also seen the write to Q.
277 */
278 {
279 Capability *target_cap;
280
281 write_barrier();
282 target_cap = target->cap;
283 if (target_cap != cap) {
284 throwToSendMsg(cap, target_cap, msg);
285 return THROWTO_BLOCKED;
286 } else {
287 if ((target->flags & TSO_BLOCKEX) == 0) {
288 // It's on our run queue and not blocking exceptions
289 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
290 return THROWTO_SUCCESS;
291 } else {
292 blockedThrowTo(cap,target,msg);
293 return THROWTO_BLOCKED;
294 }
295 }
296 }
297
298 case BlockedOnMsgThrowTo:
299 {
300 Capability *target_cap;
301 const StgInfoTable *i;
302 MessageThrowTo *m;
303
304 m = target->block_info.throwto;
305
306 // target is local to this cap, but has sent a throwto
307 // message to another cap.
308 //
309 // The source message is locked. We need to revoke the
310 // target's message so that we can raise the exception, so
311 // we attempt to lock it.
312
313 // There's a possibility of a deadlock if two threads are both
314 // trying to throwTo each other (or more generally, a cycle of
315 // threads). To break the symmetry we compare the addresses
316 // of the MessageThrowTo objects, and the one for which m <
317 // msg gets to spin, while the other can only try to lock
318 // once, but must then back off and unlock both before trying
319 // again.
320 if (m < msg) {
321 i = lockClosure((StgClosure *)m);
322 } else {
323 i = tryLockClosure((StgClosure *)m);
324 if (i == NULL) {
325 // debugBelch("collision\n");
326 throwToSendMsg(cap, target->cap, msg);
327 return THROWTO_BLOCKED;
328 }
329 }
330
331 if (i != &stg_MSG_THROWTO_info) {
332 // if it's an IND, this TSO has been woken up by another Cap
333 unlockClosure((StgClosure*)m, i);
334 goto retry;
335 }
336
337 target_cap = target->cap;
338 if (target_cap != cap) {
339 unlockClosure((StgClosure*)m, i);
340 throwToSendMsg(cap, target_cap, msg);
341 return THROWTO_BLOCKED;
342 }
343
344 if ((target->flags & TSO_BLOCKEX) &&
345 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
346 unlockClosure((StgClosure*)m, i);
347 blockedThrowTo(cap,target,msg);
348 return THROWTO_BLOCKED;
349 }
350
351 // nobody else can wake up this TSO after we claim the message
352 unlockClosure((StgClosure*)m, &stg_IND_info);
353
354 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
355 unblockOne(cap, target);
356 return THROWTO_SUCCESS;
357 }
358
359 case BlockedOnMVar:
360 {
361 /*
362 To establish ownership of this TSO, we need to acquire a
363 lock on the MVar that it is blocked on.
364 */
365 StgMVar *mvar;
366 StgInfoTable *info USED_IF_THREADS;
367
368 mvar = (StgMVar *)target->block_info.closure;
369
370 // ASSUMPTION: tso->block_info must always point to a
371 // closure. In the threaded RTS it does.
372 switch (get_itbl(mvar)->type) {
373 case MVAR_CLEAN:
374 case MVAR_DIRTY:
375 break;
376 default:
377 goto retry;
378 }
379
380 info = lockClosure((StgClosure *)mvar);
381
382 if (target->what_next == ThreadRelocated) {
383 target = target->_link;
384 unlockClosure((StgClosure *)mvar,info);
385 goto retry;
386 }
387 // we have the MVar, let's check whether the thread
388 // is still blocked on the same MVar.
389 if (target->why_blocked != BlockedOnMVar
390 || (StgMVar *)target->block_info.closure != mvar) {
391 unlockClosure((StgClosure *)mvar, info);
392 goto retry;
393 }
394
395 if ((target->flags & TSO_BLOCKEX) &&
396 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
397 Capability *target_cap = target->cap;
398 if (target->cap != cap) {
399 throwToSendMsg(cap,target_cap,msg);
400 } else {
401 blockedThrowTo(cap,target,msg);
402 }
403 unlockClosure((StgClosure *)mvar, info);
404 return THROWTO_BLOCKED;
405 } else {
406 removeThreadFromMVarQueue(cap, mvar, target);
407 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
408 unblockOne(cap, target);
409 unlockClosure((StgClosure *)mvar, info);
410 return THROWTO_SUCCESS;
411 }
412 }
413
414 case BlockedOnBlackHole:
415 {
416 ACQUIRE_LOCK(&sched_mutex);
417 // double checking the status after the memory barrier:
418 if (target->why_blocked != BlockedOnBlackHole) {
419 RELEASE_LOCK(&sched_mutex);
420 goto retry;
421 }
422
423 if (target->flags & TSO_BLOCKEX) {
424 Capability *target_cap = target->cap;
425 if (target->cap != cap) {
426 throwToSendMsg(cap,target_cap,msg);
427 } else {
428 blockedThrowTo(cap,target,msg);
429 }
430 RELEASE_LOCK(&sched_mutex);
431 return THROWTO_BLOCKED; // caller releases lock
432 } else {
433 removeThreadFromQueue(cap, &blackhole_queue, target);
434 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
435 unblockOne(cap, target);
436 RELEASE_LOCK(&sched_mutex);
437 return THROWTO_SUCCESS;
438 }
439 }
440
441 case BlockedOnSTM:
442 lockTSO(target);
443 // Unblocking BlockedOnSTM threads requires the TSO to be
444 // locked; see STM.c:unpark_tso().
445 if (target->why_blocked != BlockedOnSTM) {
446 unlockTSO(target);
447 goto retry;
448 }
449 if ((target->flags & TSO_BLOCKEX) &&
450 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
451 Capability *target_cap = target->cap;
452 if (target->cap != cap) {
453 throwToSendMsg(cap,target_cap,msg);
454 } else {
455 blockedThrowTo(cap,target,msg);
456 }
457 unlockTSO(target);
458 return THROWTO_BLOCKED;
459 } else {
460 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
461 unblockOne(cap, target);
462 unlockTSO(target);
463 return THROWTO_SUCCESS;
464 }
465
466 case BlockedOnCCall:
467 case BlockedOnCCall_NoUnblockExc:
468 {
469 Capability *target_cap;
470
471 target_cap = target->cap;
472 if (target_cap != cap) {
473 throwToSendMsg(cap, target_cap, msg);
474 return THROWTO_BLOCKED;
475 }
476
477 blockedThrowTo(cap,target,msg);
478 return THROWTO_BLOCKED;
479 }
480
481 #ifndef THREADEDED_RTS
482 case BlockedOnRead:
483 case BlockedOnWrite:
484 case BlockedOnDelay:
485 #if defined(mingw32_HOST_OS)
486 case BlockedOnDoProc:
487 #endif
488 if ((target->flags & TSO_BLOCKEX) &&
489 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
490 blockedThrowTo(cap,target,msg);
491 return THROWTO_BLOCKED;
492 } else {
493 removeFromQueues(cap,target);
494 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
495 return THROWTO_SUCCESS;
496 }
497 #endif
498
499 default:
500 barf("throwTo: unrecognised why_blocked value");
501 }
502 barf("throwTo");
503 }
504
505 static void
506 throwToSendMsg (Capability *cap STG_UNUSED,
507 Capability *target_cap USED_IF_THREADS,
508 MessageThrowTo *msg USED_IF_THREADS)
509
510 {
511 #ifdef THREADED_RTS
512 debugTrace(DEBUG_sched, "throwTo: sending a throwto message to cap %lu", (unsigned long)target_cap->no);
513
514 sendMessage(target_cap, (Message*)msg);
515 #endif
516 }
517
518 // Block a throwTo message on the target TSO's blocked_exceptions
519 // queue. The current Capability must own the target TSO in order to
520 // modify the blocked_exceptions queue.
521 static void
522 blockedThrowTo (Capability *cap, StgTSO *target, MessageThrowTo *msg)
523 {
524 debugTraceCap(DEBUG_sched, cap, "throwTo: blocking on thread %lu",
525 (unsigned long)target->id);
526
527 ASSERT(target->cap == cap);
528
529 msg->link = (Message*)target->blocked_exceptions;
530 target->blocked_exceptions = msg;
531 dirty_TSO(cap,target); // we modified the blocked_exceptions queue
532 }
533
534 /* -----------------------------------------------------------------------------
535 Waking up threads blocked in throwTo
536
537 There are two ways to do this: maybePerformBlockedException() will
538 perform the throwTo() for the thread at the head of the queue
539 immediately, and leave the other threads on the queue.
540 maybePerformBlockedException() also checks the TSO_BLOCKEX flag
541 before raising an exception.
542
543 awakenBlockedExceptionQueue() will wake up all the threads in the
544 queue, but not perform any throwTo() immediately. This might be
545 more appropriate when the target thread is the one actually running
546 (see Exception.cmm).
547
548 Returns: non-zero if an exception was raised, zero otherwise.
549 -------------------------------------------------------------------------- */
550
551 int
552 maybePerformBlockedException (Capability *cap, StgTSO *tso)
553 {
554 MessageThrowTo *msg;
555 const StgInfoTable *i;
556
557 if (tso->what_next == ThreadComplete || tso->what_next == ThreadFinished) {
558 if (tso->blocked_exceptions != END_BLOCKED_EXCEPTIONS_QUEUE) {
559 awakenBlockedExceptionQueue(cap,tso);
560 return 1;
561 } else {
562 return 0;
563 }
564 }
565
566 if (tso->blocked_exceptions != END_BLOCKED_EXCEPTIONS_QUEUE &&
567 (tso->flags & TSO_BLOCKEX) != 0) {
568 debugTrace(DEBUG_sched, "throwTo: thread %lu has blocked exceptions but is inside block", (unsigned long)tso->id);
569 }
570
571 if (tso->blocked_exceptions != END_BLOCKED_EXCEPTIONS_QUEUE
572 && ((tso->flags & TSO_BLOCKEX) == 0
573 || ((tso->flags & TSO_INTERRUPTIBLE) && interruptible(tso)))) {
574
575 // We unblock just the first thread on the queue, and perform
576 // its throw immediately.
577 loop:
578 msg = tso->blocked_exceptions;
579 if (msg == END_BLOCKED_EXCEPTIONS_QUEUE) return 0;
580 i = lockClosure((StgClosure*)msg);
581 tso->blocked_exceptions = (MessageThrowTo*)msg->link;
582 if (i == &stg_IND_info) {
583 unlockClosure((StgClosure*)msg,i);
584 goto loop;
585 }
586
587 performBlockedException(cap, msg);
588 unblockOne_(cap, msg->source, rtsFalse/*no migrate*/);
589 unlockClosure((StgClosure*)msg,&stg_IND_info);
590 return 1;
591 }
592 return 0;
593 }
594
595 // awakenBlockedExceptionQueue(): Just wake up the whole queue of
596 // blocked exceptions and let them try again.
597
598 void
599 awakenBlockedExceptionQueue (Capability *cap, StgTSO *tso)
600 {
601 MessageThrowTo *msg;
602 const StgInfoTable *i;
603
604 for (msg = tso->blocked_exceptions; msg != END_BLOCKED_EXCEPTIONS_QUEUE;
605 msg = (MessageThrowTo*)msg->link) {
606 i = lockClosure((StgClosure *)msg);
607 if (i != &stg_IND_info) {
608 unblockOne_(cap, msg->source, rtsFalse/*no migrate*/);
609 }
610 unlockClosure((StgClosure *)msg,i);
611 }
612 tso->blocked_exceptions = END_BLOCKED_EXCEPTIONS_QUEUE;
613 }
614
615 static void
616 performBlockedException (Capability *cap, MessageThrowTo *msg)
617 {
618 StgTSO *source;
619
620 source = msg->source;
621
622 ASSERT(source->why_blocked == BlockedOnMsgThrowTo);
623 ASSERT(source->block_info.closure == (StgClosure *)msg);
624 ASSERT(source->sp[0] == (StgWord)&stg_block_throwto_info);
625 ASSERT(((StgTSO *)source->sp[1])->id == msg->target->id);
626 // check ids not pointers, because the thread might be relocated
627
628 throwToSingleThreaded(cap, msg->target, msg->exception);
629 source->sp += 3;
630 }
631
632 /* -----------------------------------------------------------------------------
633 Remove a thread from blocking queues.
634
635 This is for use when we raise an exception in another thread, which
636 may be blocked.
637
638 Precondition: we have exclusive access to the TSO, via the same set
639 of conditions as throwToSingleThreaded() (c.f.).
640 -------------------------------------------------------------------------- */
641
642 static void
643 removeFromQueues(Capability *cap, StgTSO *tso)
644 {
645 switch (tso->why_blocked) {
646
647 case NotBlocked:
648 return;
649
650 case BlockedOnSTM:
651 // Be careful: nothing to do here! We tell the scheduler that the
652 // thread is runnable and we leave it to the stack-walking code to
653 // abort the transaction while unwinding the stack. We should
654 // perhaps have a debugging test to make sure that this really
655 // happens and that the 'zombie' transaction does not get
656 // committed.
657 goto done;
658
659 case BlockedOnMVar:
660 removeThreadFromMVarQueue(cap, (StgMVar *)tso->block_info.closure, tso);
661 goto done;
662
663 case BlockedOnBlackHole:
664 removeThreadFromQueue(cap, &blackhole_queue, tso);
665 goto done;
666
667 case BlockedOnMsgWakeup:
668 {
669 // kill the message, atomically:
670 tso->block_info.wakeup->header.info = &stg_IND_info;
671 break;
672 }
673
674 case BlockedOnMsgThrowTo:
675 {
676 MessageThrowTo *m = tso->block_info.throwto;
677 // The message is locked by us, unless we got here via
678 // deleteAllThreads(), in which case we own all the
679 // capabilities.
680 // ASSERT(m->header.info == &stg_WHITEHOLE_info);
681
682 // unlock and revoke it at the same time
683 unlockClosure((StgClosure*)m,&stg_IND_info);
684 break;
685 }
686
687 #if !defined(THREADED_RTS)
688 case BlockedOnRead:
689 case BlockedOnWrite:
690 #if defined(mingw32_HOST_OS)
691 case BlockedOnDoProc:
692 #endif
693 removeThreadFromDeQueue(cap, &blocked_queue_hd, &blocked_queue_tl, tso);
694 #if defined(mingw32_HOST_OS)
695 /* (Cooperatively) signal that the worker thread should abort
696 * the request.
697 */
698 abandonWorkRequest(tso->block_info.async_result->reqID);
699 #endif
700 goto done;
701
702 case BlockedOnDelay:
703 removeThreadFromQueue(cap, &sleeping_queue, tso);
704 goto done;
705 #endif
706
707 default:
708 barf("removeFromQueues: %d", tso->why_blocked);
709 }
710
711 done:
712 unblockOne(cap, tso);
713 }
714
715 /* -----------------------------------------------------------------------------
716 * raiseAsync()
717 *
718 * The following function implements the magic for raising an
719 * asynchronous exception in an existing thread.
720 *
721 * We first remove the thread from any queue on which it might be
722 * blocked. The possible blockages are MVARs and BLACKHOLE_BQs.
723 *
724 * We strip the stack down to the innermost CATCH_FRAME, building
725 * thunks in the heap for all the active computations, so they can
726 * be restarted if necessary. When we reach a CATCH_FRAME, we build
727 * an application of the handler to the exception, and push it on
728 * the top of the stack.
729 *
730 * How exactly do we save all the active computations? We create an
731 * AP_STACK for every UpdateFrame on the stack. Entering one of these
732 * AP_STACKs pushes everything from the corresponding update frame
733 * upwards onto the stack. (Actually, it pushes everything up to the
734 * next update frame plus a pointer to the next AP_STACK object.
735 * Entering the next AP_STACK object pushes more onto the stack until we
736 * reach the last AP_STACK object - at which point the stack should look
737 * exactly as it did when we killed the TSO and we can continue
738 * execution by entering the closure on top of the stack.
739 *
740 * We can also kill a thread entirely - this happens if either (a) the
741 * exception passed to raiseAsync is NULL, or (b) there's no
742 * CATCH_FRAME on the stack. In either case, we strip the entire
743 * stack and replace the thread with a zombie.
744 *
745 * ToDo: in THREADED_RTS mode, this function is only safe if either
746 * (a) we hold all the Capabilities (eg. in GC, or if there is only
747 * one Capability), or (b) we own the Capability that the TSO is
748 * currently blocked on or on the run queue of.
749 *
750 * -------------------------------------------------------------------------- */
751
752 static void
753 raiseAsync(Capability *cap, StgTSO *tso, StgClosure *exception,
754 rtsBool stop_at_atomically, StgUpdateFrame *stop_here)
755 {
756 StgRetInfoTable *info;
757 StgPtr sp, frame;
758 StgClosure *updatee;
759 nat i;
760
761 debugTrace(DEBUG_sched,
762 "raising exception in thread %ld.", (long)tso->id);
763
764 #if defined(PROFILING)
765 /*
766 * Debugging tool: on raising an exception, show where we are.
767 * See also Exception.cmm:stg_raisezh.
768 * This wasn't done for asynchronous exceptions originally; see #1450
769 */
770 if (RtsFlags.ProfFlags.showCCSOnException)
771 {
772 fprintCCS_stderr(tso->prof.CCCS);
773 }
774 #endif
775
776 while (tso->what_next == ThreadRelocated) {
777 tso = tso->_link;
778 }
779
780 // mark it dirty; we're about to change its stack.
781 dirty_TSO(cap, tso);
782
783 sp = tso->sp;
784
785 // ASSUMES: the thread is not already complete or dead. Upper
786 // layers should deal with that.
787 ASSERT(tso->what_next != ThreadComplete && tso->what_next != ThreadKilled);
788
789 if (stop_here != NULL) {
790 updatee = stop_here->updatee;
791 } else {
792 updatee = NULL;
793 }
794
795 // The stack freezing code assumes there's a closure pointer on
796 // the top of the stack, so we have to arrange that this is the case...
797 //
798 if (sp[0] == (W_)&stg_enter_info) {
799 sp++;
800 } else {
801 sp--;
802 sp[0] = (W_)&stg_dummy_ret_closure;
803 }
804
805 frame = sp + 1;
806 while (stop_here == NULL || frame < (StgPtr)stop_here) {
807
808 // 1. Let the top of the stack be the "current closure"
809 //
810 // 2. Walk up the stack until we find either an UPDATE_FRAME or a
811 // CATCH_FRAME.
812 //
813 // 3. If it's an UPDATE_FRAME, then make an AP_STACK containing the
814 // current closure applied to the chunk of stack up to (but not
815 // including) the update frame. This closure becomes the "current
816 // closure". Go back to step 2.
817 //
818 // 4. If it's a CATCH_FRAME, then leave the exception handler on
819 // top of the stack applied to the exception.
820 //
821 // 5. If it's a STOP_FRAME, then kill the thread.
822 //
823 // NB: if we pass an ATOMICALLY_FRAME then abort the associated
824 // transaction
825
826 info = get_ret_itbl((StgClosure *)frame);
827
828 switch (info->i.type) {
829
830 case UPDATE_FRAME:
831 {
832 StgAP_STACK * ap;
833 nat words;
834
835 // First build an AP_STACK consisting of the stack chunk above the
836 // current update frame, with the top word on the stack as the
837 // fun field.
838 //
839 words = frame - sp - 1;
840 ap = (StgAP_STACK *)allocate(cap,AP_STACK_sizeW(words));
841
842 ap->size = words;
843 ap->fun = (StgClosure *)sp[0];
844 sp++;
845 for(i=0; i < (nat)words; ++i) {
846 ap->payload[i] = (StgClosure *)*sp++;
847 }
848
849 SET_HDR(ap,&stg_AP_STACK_info,
850 ((StgClosure *)frame)->header.prof.ccs /* ToDo */);
851 TICK_ALLOC_UP_THK(words+1,0);
852
853 //IF_DEBUG(scheduler,
854 // debugBelch("sched: Updating ");
855 // printPtr((P_)((StgUpdateFrame *)frame)->updatee);
856 // debugBelch(" with ");
857 // printObj((StgClosure *)ap);
858 // );
859
860 if (((StgUpdateFrame *)frame)->updatee == updatee) {
861 // If this update frame points to the same closure as
862 // the update frame further down the stack
863 // (stop_here), then don't perform the update. We
864 // want to keep the blackhole in this case, so we can
865 // detect and report the loop (#2783).
866 ap = (StgAP_STACK*)updatee;
867 } else {
868 // Perform the update
869 // TODO: this may waste some work, if the thunk has
870 // already been updated by another thread.
871 UPD_IND(cap, ((StgUpdateFrame *)frame)->updatee, (StgClosure *)ap);
872 }
873
874 sp += sizeofW(StgUpdateFrame) - 1;
875 sp[0] = (W_)ap; // push onto stack
876 frame = sp + 1;
877 continue; //no need to bump frame
878 }
879
880 case STOP_FRAME:
881 {
882 // We've stripped the entire stack, the thread is now dead.
883 tso->what_next = ThreadKilled;
884 tso->sp = frame + sizeofW(StgStopFrame);
885 return;
886 }
887
888 case CATCH_FRAME:
889 // If we find a CATCH_FRAME, and we've got an exception to raise,
890 // then build the THUNK raise(exception), and leave it on
891 // top of the CATCH_FRAME ready to enter.
892 //
893 {
894 #ifdef PROFILING
895 StgCatchFrame *cf = (StgCatchFrame *)frame;
896 #endif
897 StgThunk *raise;
898
899 if (exception == NULL) break;
900
901 // we've got an exception to raise, so let's pass it to the
902 // handler in this frame.
903 //
904 raise = (StgThunk *)allocate(cap,sizeofW(StgThunk)+1);
905 TICK_ALLOC_SE_THK(1,0);
906 SET_HDR(raise,&stg_raise_info,cf->header.prof.ccs);
907 raise->payload[0] = exception;
908
909 // throw away the stack from Sp up to the CATCH_FRAME.
910 //
911 sp = frame - 1;
912
913 /* Ensure that async excpetions are blocked now, so we don't get
914 * a surprise exception before we get around to executing the
915 * handler.
916 */
917 tso->flags |= TSO_BLOCKEX | TSO_INTERRUPTIBLE;
918
919 /* Put the newly-built THUNK on top of the stack, ready to execute
920 * when the thread restarts.
921 */
922 sp[0] = (W_)raise;
923 sp[-1] = (W_)&stg_enter_info;
924 tso->sp = sp-1;
925 tso->what_next = ThreadRunGHC;
926 IF_DEBUG(sanity, checkTSO(tso));
927 return;
928 }
929
930 case ATOMICALLY_FRAME:
931 if (stop_at_atomically) {
932 ASSERT(tso->trec->enclosing_trec == NO_TREC);
933 stmCondemnTransaction(cap, tso -> trec);
934 tso->sp = frame - 2;
935 // The ATOMICALLY_FRAME expects to be returned a
936 // result from the transaction, which it stores in the
937 // stack frame. Hence we arrange to return a dummy
938 // result, so that the GC doesn't get upset (#3578).
939 // Perhaps a better way would be to have a different
940 // ATOMICALLY_FRAME instance for condemned
941 // transactions, but I don't fully understand the
942 // interaction with STM invariants.
943 tso->sp[1] = (W_)&stg_NO_TREC_closure;
944 tso->sp[0] = (W_)&stg_gc_unpt_r1_info;
945 tso->what_next = ThreadRunGHC;
946 return;
947 }
948 // Not stop_at_atomically... fall through and abort the
949 // transaction.
950
951 case CATCH_STM_FRAME:
952 case CATCH_RETRY_FRAME:
953 // IF we find an ATOMICALLY_FRAME then we abort the
954 // current transaction and propagate the exception. In
955 // this case (unlike ordinary exceptions) we do not care
956 // whether the transaction is valid or not because its
957 // possible validity cannot have caused the exception
958 // and will not be visible after the abort.
959
960 {
961 StgTRecHeader *trec = tso -> trec;
962 StgTRecHeader *outer = trec -> enclosing_trec;
963 debugTrace(DEBUG_stm,
964 "found atomically block delivering async exception");
965 stmAbortTransaction(cap, trec);
966 stmFreeAbortedTRec(cap, trec);
967 tso -> trec = outer;
968 break;
969 };
970
971 default:
972 break;
973 }
974
975 // move on to the next stack frame
976 frame += stack_frame_sizeW((StgClosure *)frame);
977 }
978
979 // if we got here, then we stopped at stop_here
980 ASSERT(stop_here != NULL);
981 }
982
983