Fix a couple of bugs in the throwTo handling, exposed by conc016(threaded2)
[ghc.git] / rts / RaiseAsync.c
1 /* ---------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 1998-2006
4 *
5 * Asynchronous exceptions
6 *
7 * --------------------------------------------------------------------------*/
8
9 #include "PosixSource.h"
10 #include "Rts.h"
11
12 #include "sm/Storage.h"
13 #include "Threads.h"
14 #include "Trace.h"
15 #include "RaiseAsync.h"
16 #include "Schedule.h"
17 #include "Updates.h"
18 #include "STM.h"
19 #include "sm/Sanity.h"
20 #include "Profiling.h"
21 #if defined(mingw32_HOST_OS)
22 #include "win32/IOManager.h"
23 #endif
24
25 static void raiseAsync (Capability *cap,
26 StgTSO *tso,
27 StgClosure *exception,
28 rtsBool stop_at_atomically,
29 StgUpdateFrame *stop_here);
30
31 static void removeFromQueues(Capability *cap, StgTSO *tso);
32
33 static void blockedThrowTo (Capability *cap,
34 StgTSO *target, MessageThrowTo *msg);
35
36 static void throwToSendMsg (Capability *cap USED_IF_THREADS,
37 Capability *target_cap USED_IF_THREADS,
38 MessageThrowTo *msg USED_IF_THREADS);
39
40 static void performBlockedException (Capability *cap, MessageThrowTo *msg);
41
42 /* -----------------------------------------------------------------------------
43 throwToSingleThreaded
44
45 This version of throwTo is safe to use if and only if one of the
46 following holds:
47
48 - !THREADED_RTS
49
50 - all the other threads in the system are stopped (eg. during GC).
51
52 - we surely own the target TSO (eg. we just took it from the
53 run queue of the current capability, or we are running it).
54
55 It doesn't cater for blocking the source thread until the exception
56 has been raised.
57 -------------------------------------------------------------------------- */
58
59 void
60 throwToSingleThreaded(Capability *cap, StgTSO *tso, StgClosure *exception)
61 {
62 throwToSingleThreaded_(cap, tso, exception, rtsFalse);
63 }
64
65 void
66 throwToSingleThreaded_(Capability *cap, StgTSO *tso, StgClosure *exception,
67 rtsBool stop_at_atomically)
68 {
69 // Thread already dead?
70 if (tso->what_next == ThreadComplete || tso->what_next == ThreadKilled) {
71 return;
72 }
73 while (tso->what_next == ThreadRelocated) {
74 tso = tso->_link;
75 }
76
77 // Remove it from any blocking queues
78 removeFromQueues(cap,tso);
79
80 raiseAsync(cap, tso, exception, stop_at_atomically, NULL);
81 }
82
83 void
84 suspendComputation(Capability *cap, StgTSO *tso, StgUpdateFrame *stop_here)
85 {
86 // Thread already dead?
87 if (tso->what_next == ThreadComplete || tso->what_next == ThreadKilled) {
88 return;
89 }
90 while (tso->what_next == ThreadRelocated) {
91 tso = tso->_link;
92 }
93
94 // Remove it from any blocking queues
95 removeFromQueues(cap,tso);
96
97 raiseAsync(cap, tso, NULL, rtsFalse, stop_here);
98 }
99
100 /* -----------------------------------------------------------------------------
101 throwTo
102
103 This function may be used to throw an exception from one thread to
104 another, during the course of normal execution. This is a tricky
105 task: the target thread might be running on another CPU, or it
106 may be blocked and could be woken up at any point by another CPU.
107 We have some delicate synchronisation to do.
108
109 The underlying scheme when multiple Capabilities are in use is
110 message passing: when the target of a throwTo is on another
111 Capability, we send a message (a MessageThrowTo closure) to that
112 Capability.
113
114 If the throwTo needs to block because the target TSO is masking
115 exceptions (the TSO_BLOCKEX flag), then the message is placed on
116 the blocked_exceptions queue attached to the target TSO. When the
117 target TSO enters the unmasked state again, it must check the
118 queue. The blocked_exceptions queue is not locked; only the
119 Capability owning the TSO may modify it.
120
121 To make things simpler for throwTo, we always create the message
122 first before deciding what to do. The message may get sent, or it
123 may get attached to a TSO's blocked_exceptions queue, or the
124 exception may get thrown immediately and the message dropped,
125 depending on the current state of the target.
126
127 Currently we send a message if the target belongs to another
128 Capability, and it is
129
130 - NotBlocked, BlockedOnMsgWakeup, BlockedOnMsgThrowTo,
131 BlockedOnCCall
132
133 - or it is masking exceptions (TSO_BLOCKEX)
134
135 Currently, if the target is BlockedOnMVar, BlockedOnSTM, or
136 BlockedOnBlackHole then we acquire ownership of the TSO by locking
137 its parent container (e.g. the MVar) and then raise the exception.
138 We might change these cases to be more message-passing-like in the
139 future.
140
141 Returns:
142
143 NULL exception was raised, ok to continue
144
145 MessageThrowTo * exception was not raised; the source TSO
146 should now put itself in the state
147 BlockedOnMsgThrowTo, and when it is ready
148 it should unlock the mssage using
149 unlockClosure(msg, &stg_MSG_THROWTO_info);
150 If it decides not to raise the exception after
151 all, it can revoke it safely with
152 unlockClosure(msg, &stg_IND_info);
153
154 -------------------------------------------------------------------------- */
155
156 MessageThrowTo *
157 throwTo (Capability *cap, // the Capability we hold
158 StgTSO *source, // the TSO sending the exception (or NULL)
159 StgTSO *target, // the TSO receiving the exception
160 StgClosure *exception) // the exception closure
161 {
162 MessageThrowTo *msg;
163
164 msg = (MessageThrowTo *) allocate(cap, sizeofW(MessageThrowTo));
165 // message starts locked; the caller has to unlock it when it is
166 // ready.
167 msg->header.info = &stg_WHITEHOLE_info;
168 msg->source = source;
169 msg->target = target;
170 msg->exception = exception;
171
172 switch (throwToMsg(cap, msg))
173 {
174 case THROWTO_SUCCESS:
175 return NULL;
176 case THROWTO_BLOCKED:
177 default:
178 return msg;
179 }
180 }
181
182
183 nat
184 throwToMsg (Capability *cap, MessageThrowTo *msg)
185 {
186 StgWord status;
187 StgTSO *target = msg->target;
188
189 ASSERT(target != END_TSO_QUEUE);
190
191 // follow ThreadRelocated links in the target first
192 while (target->what_next == ThreadRelocated) {
193 target = target->_link;
194 // No, it might be a WHITEHOLE:
195 // ASSERT(get_itbl(target)->type == TSO);
196 }
197
198 debugTraceCap(DEBUG_sched, cap,
199 "throwTo: from thread %lu to thread %lu",
200 (unsigned long)msg->source->id,
201 (unsigned long)msg->target->id);
202
203 #ifdef DEBUG
204 traceThreadStatus(DEBUG_sched, target);
205 #endif
206
207 goto check_target;
208 retry:
209 write_barrier();
210 debugTrace(DEBUG_sched, "throwTo: retrying...");
211
212 check_target:
213 ASSERT(target != END_TSO_QUEUE);
214
215 // Thread already dead?
216 if (target->what_next == ThreadComplete
217 || target->what_next == ThreadKilled) {
218 return THROWTO_SUCCESS;
219 }
220
221 status = target->why_blocked;
222
223 switch (status) {
224 case NotBlocked:
225 case BlockedOnMsgWakeup:
226 /* if status==NotBlocked, and target->cap == cap, then
227 we own this TSO and can raise the exception.
228
229 How do we establish this condition? Very carefully.
230
231 Let
232 P = (status == NotBlocked)
233 Q = (tso->cap == cap)
234
235 Now, if P & Q are true, then the TSO is locked and owned by
236 this capability. No other OS thread can steal it.
237
238 If P==0 and Q==1: the TSO is blocked, but attached to this
239 capabilty, and it can be stolen by another capability.
240
241 If P==1 and Q==0: the TSO is runnable on another
242 capability. At any time, the TSO may change from runnable
243 to blocked and vice versa, while it remains owned by
244 another capability.
245
246 Suppose we test like this:
247
248 p = P
249 q = Q
250 if (p && q) ...
251
252 this is defeated by another capability stealing a blocked
253 TSO from us to wake it up (Schedule.c:unblockOne()). The
254 other thread is doing
255
256 Q = 0
257 P = 1
258
259 assuming arbitrary reordering, we could see this
260 interleaving:
261
262 start: P==0 && Q==1
263 P = 1
264 p = P
265 q = Q
266 Q = 0
267 if (p && q) ...
268
269 so we need a memory barrier:
270
271 p = P
272 mb()
273 q = Q
274 if (p && q) ...
275
276 this avoids the problematic case. There are other cases
277 to consider, but this is the tricky one.
278
279 Note that we must be sure that unblockOne() does the
280 writes in the correct order: Q before P. The memory
281 barrier ensures that if we have seen the write to P, we
282 have also seen the write to Q.
283 */
284 {
285 Capability *target_cap;
286
287 write_barrier();
288 target_cap = target->cap;
289 if (target_cap != cap) {
290 throwToSendMsg(cap, target_cap, msg);
291 return THROWTO_BLOCKED;
292 } else {
293 if ((target->flags & TSO_BLOCKEX) == 0) {
294 // It's on our run queue and not blocking exceptions
295 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
296 return THROWTO_SUCCESS;
297 } else {
298 blockedThrowTo(cap,target,msg);
299 return THROWTO_BLOCKED;
300 }
301 }
302 }
303
304 case BlockedOnMsgThrowTo:
305 {
306 Capability *target_cap;
307 const StgInfoTable *i;
308 MessageThrowTo *m;
309
310 m = target->block_info.throwto;
311
312 // target is local to this cap, but has sent a throwto
313 // message to another cap.
314 //
315 // The source message is locked. We need to revoke the
316 // target's message so that we can raise the exception, so
317 // we attempt to lock it.
318
319 // There's a possibility of a deadlock if two threads are both
320 // trying to throwTo each other (or more generally, a cycle of
321 // threads). To break the symmetry we compare the addresses
322 // of the MessageThrowTo objects, and the one for which m <
323 // msg gets to spin, while the other can only try to lock
324 // once, but must then back off and unlock both before trying
325 // again.
326 if (m < msg) {
327 i = lockClosure((StgClosure *)m);
328 } else {
329 i = tryLockClosure((StgClosure *)m);
330 if (i == NULL) {
331 // debugBelch("collision\n");
332 throwToSendMsg(cap, target->cap, msg);
333 return THROWTO_BLOCKED;
334 }
335 }
336
337 if (i != &stg_MSG_THROWTO_info) {
338 // if it's an IND, this TSO has been woken up by another Cap
339 unlockClosure((StgClosure*)m, i);
340 goto retry;
341 }
342
343 target_cap = target->cap;
344 if (target_cap != cap) {
345 unlockClosure((StgClosure*)m, i);
346 throwToSendMsg(cap, target_cap, msg);
347 return THROWTO_BLOCKED;
348 }
349
350 if ((target->flags & TSO_BLOCKEX) &&
351 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
352 unlockClosure((StgClosure*)m, i);
353 blockedThrowTo(cap,target,msg);
354 return THROWTO_BLOCKED;
355 }
356
357 // nobody else can wake up this TSO after we claim the message
358 unlockClosure((StgClosure*)m, &stg_IND_info);
359
360 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
361 unblockOne(cap, target);
362 return THROWTO_SUCCESS;
363 }
364
365 case BlockedOnMVar:
366 {
367 /*
368 To establish ownership of this TSO, we need to acquire a
369 lock on the MVar that it is blocked on.
370 */
371 StgMVar *mvar;
372 StgInfoTable *info USED_IF_THREADS;
373
374 mvar = (StgMVar *)target->block_info.closure;
375
376 // ASSUMPTION: tso->block_info must always point to a
377 // closure. In the threaded RTS it does.
378 switch (get_itbl(mvar)->type) {
379 case MVAR_CLEAN:
380 case MVAR_DIRTY:
381 break;
382 default:
383 goto retry;
384 }
385
386 info = lockClosure((StgClosure *)mvar);
387
388 if (target->what_next == ThreadRelocated) {
389 target = target->_link;
390 unlockClosure((StgClosure *)mvar,info);
391 goto retry;
392 }
393 // we have the MVar, let's check whether the thread
394 // is still blocked on the same MVar.
395 if (target->why_blocked != BlockedOnMVar
396 || (StgMVar *)target->block_info.closure != mvar) {
397 unlockClosure((StgClosure *)mvar, info);
398 goto retry;
399 }
400
401 if ((target->flags & TSO_BLOCKEX) &&
402 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
403 Capability *target_cap = target->cap;
404 if (target->cap != cap) {
405 throwToSendMsg(cap,target_cap,msg);
406 } else {
407 blockedThrowTo(cap,target,msg);
408 }
409 unlockClosure((StgClosure *)mvar, info);
410 return THROWTO_BLOCKED;
411 } else {
412 removeThreadFromMVarQueue(cap, mvar, target);
413 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
414 unblockOne(cap, target);
415 unlockClosure((StgClosure *)mvar, info);
416 return THROWTO_SUCCESS;
417 }
418 }
419
420 case BlockedOnBlackHole:
421 {
422 ACQUIRE_LOCK(&sched_mutex);
423 // double checking the status after the memory barrier:
424 if (target->why_blocked != BlockedOnBlackHole) {
425 RELEASE_LOCK(&sched_mutex);
426 goto retry;
427 }
428
429 if (target->flags & TSO_BLOCKEX) {
430 Capability *target_cap = target->cap;
431 if (target->cap != cap) {
432 throwToSendMsg(cap,target_cap,msg);
433 } else {
434 blockedThrowTo(cap,target,msg);
435 }
436 RELEASE_LOCK(&sched_mutex);
437 return THROWTO_BLOCKED; // caller releases lock
438 } else {
439 removeThreadFromQueue(cap, &blackhole_queue, target);
440 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
441 unblockOne(cap, target);
442 RELEASE_LOCK(&sched_mutex);
443 return THROWTO_SUCCESS;
444 }
445 }
446
447 case BlockedOnSTM:
448 lockTSO(target);
449 // Unblocking BlockedOnSTM threads requires the TSO to be
450 // locked; see STM.c:unpark_tso().
451 if (target->why_blocked != BlockedOnSTM) {
452 unlockTSO(target);
453 goto retry;
454 }
455 if ((target->flags & TSO_BLOCKEX) &&
456 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
457 Capability *target_cap = target->cap;
458 if (target->cap != cap) {
459 throwToSendMsg(cap,target_cap,msg);
460 } else {
461 blockedThrowTo(cap,target,msg);
462 }
463 unlockTSO(target);
464 return THROWTO_BLOCKED;
465 } else {
466 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
467 unblockOne(cap, target);
468 unlockTSO(target);
469 return THROWTO_SUCCESS;
470 }
471
472 case BlockedOnCCall:
473 case BlockedOnCCall_NoUnblockExc:
474 {
475 Capability *target_cap;
476
477 target_cap = target->cap;
478 if (target_cap != cap) {
479 throwToSendMsg(cap, target_cap, msg);
480 return THROWTO_BLOCKED;
481 }
482
483 blockedThrowTo(cap,target,msg);
484 return THROWTO_BLOCKED;
485 }
486
487 #ifndef THREADEDED_RTS
488 case BlockedOnRead:
489 case BlockedOnWrite:
490 case BlockedOnDelay:
491 #if defined(mingw32_HOST_OS)
492 case BlockedOnDoProc:
493 #endif
494 if ((target->flags & TSO_BLOCKEX) &&
495 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
496 blockedThrowTo(cap,target,msg);
497 return THROWTO_BLOCKED;
498 } else {
499 removeFromQueues(cap,target);
500 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
501 return THROWTO_SUCCESS;
502 }
503 #endif
504
505 default:
506 barf("throwTo: unrecognised why_blocked value");
507 }
508 barf("throwTo");
509 }
510
511 static void
512 throwToSendMsg (Capability *cap STG_UNUSED,
513 Capability *target_cap USED_IF_THREADS,
514 MessageThrowTo *msg USED_IF_THREADS)
515
516 {
517 #ifdef THREADED_RTS
518 debugTrace(DEBUG_sched, "throwTo: sending a throwto message to cap %lu", (unsigned long)target_cap->no);
519
520 sendMessage(target_cap, (Message*)msg);
521 #endif
522 }
523
524 // Block a throwTo message on the target TSO's blocked_exceptions
525 // queue. The current Capability must own the target TSO in order to
526 // modify the blocked_exceptions queue.
527 static void
528 blockedThrowTo (Capability *cap, StgTSO *target, MessageThrowTo *msg)
529 {
530 debugTraceCap(DEBUG_sched, cap, "throwTo: blocking on thread %lu",
531 (unsigned long)target->id);
532
533 ASSERT(target->cap == cap);
534
535 msg->link = (Message*)target->blocked_exceptions;
536 target->blocked_exceptions = msg;
537 dirty_TSO(cap,target); // we modified the blocked_exceptions queue
538 }
539
540 /* -----------------------------------------------------------------------------
541 Waking up threads blocked in throwTo
542
543 There are two ways to do this: maybePerformBlockedException() will
544 perform the throwTo() for the thread at the head of the queue
545 immediately, and leave the other threads on the queue.
546 maybePerformBlockedException() also checks the TSO_BLOCKEX flag
547 before raising an exception.
548
549 awakenBlockedExceptionQueue() will wake up all the threads in the
550 queue, but not perform any throwTo() immediately. This might be
551 more appropriate when the target thread is the one actually running
552 (see Exception.cmm).
553
554 Returns: non-zero if an exception was raised, zero otherwise.
555 -------------------------------------------------------------------------- */
556
557 int
558 maybePerformBlockedException (Capability *cap, StgTSO *tso)
559 {
560 MessageThrowTo *msg;
561 const StgInfoTable *i;
562
563 if (tso->what_next == ThreadComplete || tso->what_next == ThreadFinished) {
564 if (tso->blocked_exceptions != END_BLOCKED_EXCEPTIONS_QUEUE) {
565 awakenBlockedExceptionQueue(cap,tso);
566 return 1;
567 } else {
568 return 0;
569 }
570 }
571
572 if (tso->blocked_exceptions != END_BLOCKED_EXCEPTIONS_QUEUE &&
573 (tso->flags & TSO_BLOCKEX) != 0) {
574 debugTrace(DEBUG_sched, "throwTo: thread %lu has blocked exceptions but is inside block", (unsigned long)tso->id);
575 }
576
577 if (tso->blocked_exceptions != END_BLOCKED_EXCEPTIONS_QUEUE
578 && ((tso->flags & TSO_BLOCKEX) == 0
579 || ((tso->flags & TSO_INTERRUPTIBLE) && interruptible(tso)))) {
580
581 // We unblock just the first thread on the queue, and perform
582 // its throw immediately.
583 loop:
584 msg = tso->blocked_exceptions;
585 if (msg == END_BLOCKED_EXCEPTIONS_QUEUE) return 0;
586 i = lockClosure((StgClosure*)msg);
587 tso->blocked_exceptions = (MessageThrowTo*)msg->link;
588 if (i == &stg_IND_info) {
589 unlockClosure((StgClosure*)msg,i);
590 goto loop;
591 }
592
593 performBlockedException(cap, msg);
594 unblockOne_(cap, msg->source, rtsFalse/*no migrate*/);
595 unlockClosure((StgClosure*)msg,&stg_IND_info);
596 return 1;
597 }
598 return 0;
599 }
600
601 // awakenBlockedExceptionQueue(): Just wake up the whole queue of
602 // blocked exceptions and let them try again.
603
604 void
605 awakenBlockedExceptionQueue (Capability *cap, StgTSO *tso)
606 {
607 MessageThrowTo *msg;
608 const StgInfoTable *i;
609
610 for (msg = tso->blocked_exceptions; msg != END_BLOCKED_EXCEPTIONS_QUEUE;
611 msg = (MessageThrowTo*)msg->link) {
612 i = lockClosure((StgClosure *)msg);
613 if (i != &stg_IND_info) {
614 unblockOne_(cap, msg->source, rtsFalse/*no migrate*/);
615 }
616 unlockClosure((StgClosure *)msg,i);
617 }
618 tso->blocked_exceptions = END_BLOCKED_EXCEPTIONS_QUEUE;
619 }
620
621 static void
622 performBlockedException (Capability *cap, MessageThrowTo *msg)
623 {
624 StgTSO *source;
625
626 source = msg->source;
627
628 ASSERT(source->why_blocked == BlockedOnMsgThrowTo);
629 ASSERT(source->block_info.closure == (StgClosure *)msg);
630 ASSERT(source->sp[0] == (StgWord)&stg_block_throwto_info);
631 ASSERT(((StgTSO *)source->sp[1])->id == msg->target->id);
632 // check ids not pointers, because the thread might be relocated
633
634 throwToSingleThreaded(cap, msg->target, msg->exception);
635 source->sp += 3;
636 }
637
638 /* -----------------------------------------------------------------------------
639 Remove a thread from blocking queues.
640
641 This is for use when we raise an exception in another thread, which
642 may be blocked.
643
644 Precondition: we have exclusive access to the TSO, via the same set
645 of conditions as throwToSingleThreaded() (c.f.).
646 -------------------------------------------------------------------------- */
647
648 static void
649 removeFromQueues(Capability *cap, StgTSO *tso)
650 {
651 switch (tso->why_blocked) {
652
653 case NotBlocked:
654 return;
655
656 case BlockedOnSTM:
657 // Be careful: nothing to do here! We tell the scheduler that the
658 // thread is runnable and we leave it to the stack-walking code to
659 // abort the transaction while unwinding the stack. We should
660 // perhaps have a debugging test to make sure that this really
661 // happens and that the 'zombie' transaction does not get
662 // committed.
663 goto done;
664
665 case BlockedOnMVar:
666 removeThreadFromMVarQueue(cap, (StgMVar *)tso->block_info.closure, tso);
667 goto done;
668
669 case BlockedOnBlackHole:
670 removeThreadFromQueue(cap, &blackhole_queue, tso);
671 goto done;
672
673 case BlockedOnMsgWakeup:
674 {
675 // kill the message, atomically:
676 tso->block_info.wakeup->header.info = &stg_IND_info;
677 break;
678 }
679
680 case BlockedOnMsgThrowTo:
681 {
682 MessageThrowTo *m = tso->block_info.throwto;
683 // The message is locked by us, unless we got here via
684 // deleteAllThreads(), in which case we own all the
685 // capabilities.
686 // ASSERT(m->header.info == &stg_WHITEHOLE_info);
687
688 // unlock and revoke it at the same time
689 unlockClosure((StgClosure*)m,&stg_IND_info);
690 break;
691 }
692
693 #if !defined(THREADED_RTS)
694 case BlockedOnRead:
695 case BlockedOnWrite:
696 #if defined(mingw32_HOST_OS)
697 case BlockedOnDoProc:
698 #endif
699 removeThreadFromDeQueue(cap, &blocked_queue_hd, &blocked_queue_tl, tso);
700 #if defined(mingw32_HOST_OS)
701 /* (Cooperatively) signal that the worker thread should abort
702 * the request.
703 */
704 abandonWorkRequest(tso->block_info.async_result->reqID);
705 #endif
706 goto done;
707
708 case BlockedOnDelay:
709 removeThreadFromQueue(cap, &sleeping_queue, tso);
710 goto done;
711 #endif
712
713 default:
714 barf("removeFromQueues: %d", tso->why_blocked);
715 }
716
717 done:
718 unblockOne(cap, tso);
719 }
720
721 /* -----------------------------------------------------------------------------
722 * raiseAsync()
723 *
724 * The following function implements the magic for raising an
725 * asynchronous exception in an existing thread.
726 *
727 * We first remove the thread from any queue on which it might be
728 * blocked. The possible blockages are MVARs and BLACKHOLE_BQs.
729 *
730 * We strip the stack down to the innermost CATCH_FRAME, building
731 * thunks in the heap for all the active computations, so they can
732 * be restarted if necessary. When we reach a CATCH_FRAME, we build
733 * an application of the handler to the exception, and push it on
734 * the top of the stack.
735 *
736 * How exactly do we save all the active computations? We create an
737 * AP_STACK for every UpdateFrame on the stack. Entering one of these
738 * AP_STACKs pushes everything from the corresponding update frame
739 * upwards onto the stack. (Actually, it pushes everything up to the
740 * next update frame plus a pointer to the next AP_STACK object.
741 * Entering the next AP_STACK object pushes more onto the stack until we
742 * reach the last AP_STACK object - at which point the stack should look
743 * exactly as it did when we killed the TSO and we can continue
744 * execution by entering the closure on top of the stack.
745 *
746 * We can also kill a thread entirely - this happens if either (a) the
747 * exception passed to raiseAsync is NULL, or (b) there's no
748 * CATCH_FRAME on the stack. In either case, we strip the entire
749 * stack and replace the thread with a zombie.
750 *
751 * ToDo: in THREADED_RTS mode, this function is only safe if either
752 * (a) we hold all the Capabilities (eg. in GC, or if there is only
753 * one Capability), or (b) we own the Capability that the TSO is
754 * currently blocked on or on the run queue of.
755 *
756 * -------------------------------------------------------------------------- */
757
758 static void
759 raiseAsync(Capability *cap, StgTSO *tso, StgClosure *exception,
760 rtsBool stop_at_atomically, StgUpdateFrame *stop_here)
761 {
762 StgRetInfoTable *info;
763 StgPtr sp, frame;
764 StgClosure *updatee;
765 nat i;
766
767 debugTrace(DEBUG_sched,
768 "raising exception in thread %ld.", (long)tso->id);
769
770 #if defined(PROFILING)
771 /*
772 * Debugging tool: on raising an exception, show where we are.
773 * See also Exception.cmm:stg_raisezh.
774 * This wasn't done for asynchronous exceptions originally; see #1450
775 */
776 if (RtsFlags.ProfFlags.showCCSOnException)
777 {
778 fprintCCS_stderr(tso->prof.CCCS);
779 }
780 #endif
781 // ASSUMES: the thread is not already complete or dead, or
782 // ThreadRelocated. Upper layers should deal with that.
783 ASSERT(tso->what_next != ThreadComplete &&
784 tso->what_next != ThreadKilled &&
785 tso->what_next != ThreadRelocated);
786
787 // mark it dirty; we're about to change its stack.
788 dirty_TSO(cap, tso);
789
790 sp = tso->sp;
791
792 if (stop_here != NULL) {
793 updatee = stop_here->updatee;
794 } else {
795 updatee = NULL;
796 }
797
798 // The stack freezing code assumes there's a closure pointer on
799 // the top of the stack, so we have to arrange that this is the case...
800 //
801 if (sp[0] == (W_)&stg_enter_info) {
802 sp++;
803 } else {
804 sp--;
805 sp[0] = (W_)&stg_dummy_ret_closure;
806 }
807
808 frame = sp + 1;
809 while (stop_here == NULL || frame < (StgPtr)stop_here) {
810
811 // 1. Let the top of the stack be the "current closure"
812 //
813 // 2. Walk up the stack until we find either an UPDATE_FRAME or a
814 // CATCH_FRAME.
815 //
816 // 3. If it's an UPDATE_FRAME, then make an AP_STACK containing the
817 // current closure applied to the chunk of stack up to (but not
818 // including) the update frame. This closure becomes the "current
819 // closure". Go back to step 2.
820 //
821 // 4. If it's a CATCH_FRAME, then leave the exception handler on
822 // top of the stack applied to the exception.
823 //
824 // 5. If it's a STOP_FRAME, then kill the thread.
825 //
826 // NB: if we pass an ATOMICALLY_FRAME then abort the associated
827 // transaction
828
829 info = get_ret_itbl((StgClosure *)frame);
830
831 switch (info->i.type) {
832
833 case UPDATE_FRAME:
834 {
835 StgAP_STACK * ap;
836 nat words;
837
838 // First build an AP_STACK consisting of the stack chunk above the
839 // current update frame, with the top word on the stack as the
840 // fun field.
841 //
842 words = frame - sp - 1;
843 ap = (StgAP_STACK *)allocate(cap,AP_STACK_sizeW(words));
844
845 ap->size = words;
846 ap->fun = (StgClosure *)sp[0];
847 sp++;
848 for(i=0; i < (nat)words; ++i) {
849 ap->payload[i] = (StgClosure *)*sp++;
850 }
851
852 SET_HDR(ap,&stg_AP_STACK_info,
853 ((StgClosure *)frame)->header.prof.ccs /* ToDo */);
854 TICK_ALLOC_UP_THK(words+1,0);
855
856 //IF_DEBUG(scheduler,
857 // debugBelch("sched: Updating ");
858 // printPtr((P_)((StgUpdateFrame *)frame)->updatee);
859 // debugBelch(" with ");
860 // printObj((StgClosure *)ap);
861 // );
862
863 if (((StgUpdateFrame *)frame)->updatee == updatee) {
864 // If this update frame points to the same closure as
865 // the update frame further down the stack
866 // (stop_here), then don't perform the update. We
867 // want to keep the blackhole in this case, so we can
868 // detect and report the loop (#2783).
869 ap = (StgAP_STACK*)updatee;
870 } else {
871 // Perform the update
872 // TODO: this may waste some work, if the thunk has
873 // already been updated by another thread.
874 UPD_IND(cap, ((StgUpdateFrame *)frame)->updatee, (StgClosure *)ap);
875 }
876
877 sp += sizeofW(StgUpdateFrame) - 1;
878 sp[0] = (W_)ap; // push onto stack
879 frame = sp + 1;
880 continue; //no need to bump frame
881 }
882
883 case STOP_FRAME:
884 {
885 // We've stripped the entire stack, the thread is now dead.
886 tso->what_next = ThreadKilled;
887 tso->sp = frame + sizeofW(StgStopFrame);
888 return;
889 }
890
891 case CATCH_FRAME:
892 // If we find a CATCH_FRAME, and we've got an exception to raise,
893 // then build the THUNK raise(exception), and leave it on
894 // top of the CATCH_FRAME ready to enter.
895 //
896 {
897 #ifdef PROFILING
898 StgCatchFrame *cf = (StgCatchFrame *)frame;
899 #endif
900 StgThunk *raise;
901
902 if (exception == NULL) break;
903
904 // we've got an exception to raise, so let's pass it to the
905 // handler in this frame.
906 //
907 raise = (StgThunk *)allocate(cap,sizeofW(StgThunk)+1);
908 TICK_ALLOC_SE_THK(1,0);
909 SET_HDR(raise,&stg_raise_info,cf->header.prof.ccs);
910 raise->payload[0] = exception;
911
912 // throw away the stack from Sp up to the CATCH_FRAME.
913 //
914 sp = frame - 1;
915
916 /* Ensure that async excpetions are blocked now, so we don't get
917 * a surprise exception before we get around to executing the
918 * handler.
919 */
920 tso->flags |= TSO_BLOCKEX | TSO_INTERRUPTIBLE;
921
922 /* Put the newly-built THUNK on top of the stack, ready to execute
923 * when the thread restarts.
924 */
925 sp[0] = (W_)raise;
926 sp[-1] = (W_)&stg_enter_info;
927 tso->sp = sp-1;
928 tso->what_next = ThreadRunGHC;
929 IF_DEBUG(sanity, checkTSO(tso));
930 return;
931 }
932
933 case ATOMICALLY_FRAME:
934 if (stop_at_atomically) {
935 ASSERT(tso->trec->enclosing_trec == NO_TREC);
936 stmCondemnTransaction(cap, tso -> trec);
937 tso->sp = frame - 2;
938 // The ATOMICALLY_FRAME expects to be returned a
939 // result from the transaction, which it stores in the
940 // stack frame. Hence we arrange to return a dummy
941 // result, so that the GC doesn't get upset (#3578).
942 // Perhaps a better way would be to have a different
943 // ATOMICALLY_FRAME instance for condemned
944 // transactions, but I don't fully understand the
945 // interaction with STM invariants.
946 tso->sp[1] = (W_)&stg_NO_TREC_closure;
947 tso->sp[0] = (W_)&stg_gc_unpt_r1_info;
948 tso->what_next = ThreadRunGHC;
949 return;
950 }
951 // Not stop_at_atomically... fall through and abort the
952 // transaction.
953
954 case CATCH_STM_FRAME:
955 case CATCH_RETRY_FRAME:
956 // IF we find an ATOMICALLY_FRAME then we abort the
957 // current transaction and propagate the exception. In
958 // this case (unlike ordinary exceptions) we do not care
959 // whether the transaction is valid or not because its
960 // possible validity cannot have caused the exception
961 // and will not be visible after the abort.
962
963 {
964 StgTRecHeader *trec = tso -> trec;
965 StgTRecHeader *outer = trec -> enclosing_trec;
966 debugTrace(DEBUG_stm,
967 "found atomically block delivering async exception");
968 stmAbortTransaction(cap, trec);
969 stmFreeAbortedTRec(cap, trec);
970 tso -> trec = outer;
971 break;
972 };
973
974 default:
975 break;
976 }
977
978 // move on to the next stack frame
979 frame += stack_frame_sizeW((StgClosure *)frame);
980 }
981
982 // if we got here, then we stopped at stop_here
983 ASSERT(stop_here != NULL);
984 }
985
986