Don't interrupt when task blocks exceptions, don't immediately start exception.
[ghc.git] / rts / RaiseAsync.c
1 /* ---------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 1998-2006
4 *
5 * Asynchronous exceptions
6 *
7 * --------------------------------------------------------------------------*/
8
9 #include "PosixSource.h"
10 #include "Rts.h"
11
12 #include "sm/Storage.h"
13 #include "Threads.h"
14 #include "Trace.h"
15 #include "RaiseAsync.h"
16 #include "Schedule.h"
17 #include "Updates.h"
18 #include "STM.h"
19 #include "sm/Sanity.h"
20 #include "Profiling.h"
21 #include "Messages.h"
22 #if defined(mingw32_HOST_OS)
23 #include "win32/IOManager.h"
24 #endif
25
26 static void raiseAsync (Capability *cap,
27 StgTSO *tso,
28 StgClosure *exception,
29 rtsBool stop_at_atomically,
30 StgUpdateFrame *stop_here);
31
32 static void removeFromQueues(Capability *cap, StgTSO *tso);
33
34 static void removeFromMVarBlockedQueue (StgTSO *tso);
35
36 static void blockedThrowTo (Capability *cap,
37 StgTSO *target, MessageThrowTo *msg);
38
39 static void throwToSendMsg (Capability *cap USED_IF_THREADS,
40 Capability *target_cap USED_IF_THREADS,
41 MessageThrowTo *msg USED_IF_THREADS);
42
43 /* -----------------------------------------------------------------------------
44 throwToSingleThreaded
45
46 This version of throwTo is safe to use if and only if one of the
47 following holds:
48
49 - !THREADED_RTS
50
51 - all the other threads in the system are stopped (eg. during GC).
52
53 - we surely own the target TSO (eg. we just took it from the
54 run queue of the current capability, or we are running it).
55
56 It doesn't cater for blocking the source thread until the exception
57 has been raised.
58 -------------------------------------------------------------------------- */
59
60 void
61 throwToSingleThreaded(Capability *cap, StgTSO *tso, StgClosure *exception)
62 {
63 throwToSingleThreaded_(cap, tso, exception, rtsFalse);
64 }
65
66 void
67 throwToSingleThreaded_(Capability *cap, StgTSO *tso, StgClosure *exception,
68 rtsBool stop_at_atomically)
69 {
70 tso = deRefTSO(tso);
71
72 // Thread already dead?
73 if (tso->what_next == ThreadComplete || tso->what_next == ThreadKilled) {
74 return;
75 }
76
77 // Remove it from any blocking queues
78 removeFromQueues(cap,tso);
79
80 raiseAsync(cap, tso, exception, stop_at_atomically, NULL);
81 }
82
83 void
84 suspendComputation(Capability *cap, StgTSO *tso, StgUpdateFrame *stop_here)
85 {
86 tso = deRefTSO(tso);
87
88 // Thread already dead?
89 if (tso->what_next == ThreadComplete || tso->what_next == ThreadKilled) {
90 return;
91 }
92
93 // Remove it from any blocking queues
94 removeFromQueues(cap,tso);
95
96 raiseAsync(cap, tso, NULL, rtsFalse, stop_here);
97 }
98
99 /* -----------------------------------------------------------------------------
100 throwTo
101
102 This function may be used to throw an exception from one thread to
103 another, during the course of normal execution. This is a tricky
104 task: the target thread might be running on another CPU, or it
105 may be blocked and could be woken up at any point by another CPU.
106 We have some delicate synchronisation to do.
107
108 The underlying scheme when multiple Capabilities are in use is
109 message passing: when the target of a throwTo is on another
110 Capability, we send a message (a MessageThrowTo closure) to that
111 Capability.
112
113 If the throwTo needs to block because the target TSO is masking
114 exceptions (the TSO_BLOCKEX flag), then the message is placed on
115 the blocked_exceptions queue attached to the target TSO. When the
116 target TSO enters the unmasked state again, it must check the
117 queue. The blocked_exceptions queue is not locked; only the
118 Capability owning the TSO may modify it.
119
120 To make things simpler for throwTo, we always create the message
121 first before deciding what to do. The message may get sent, or it
122 may get attached to a TSO's blocked_exceptions queue, or the
123 exception may get thrown immediately and the message dropped,
124 depending on the current state of the target.
125
126 Currently we send a message if the target belongs to another
127 Capability, and it is
128
129 - NotBlocked, BlockedOnMsgThrowTo,
130 BlockedOnCCall_Interruptible
131
132 - or it is masking exceptions (TSO_BLOCKEX)
133
134 Currently, if the target is BlockedOnMVar, BlockedOnSTM, or
135 BlockedOnBlackHole then we acquire ownership of the TSO by locking
136 its parent container (e.g. the MVar) and then raise the exception.
137 We might change these cases to be more message-passing-like in the
138 future.
139
140 Returns:
141
142 NULL exception was raised, ok to continue
143
144 MessageThrowTo * exception was not raised; the source TSO
145 should now put itself in the state
146 BlockedOnMsgThrowTo, and when it is ready
147 it should unlock the mssage using
148 unlockClosure(msg, &stg_MSG_THROWTO_info);
149 If it decides not to raise the exception after
150 all, it can revoke it safely with
151 unlockClosure(msg, &stg_MSG_NULL_info);
152
153 -------------------------------------------------------------------------- */
154
155 MessageThrowTo *
156 throwTo (Capability *cap, // the Capability we hold
157 StgTSO *source, // the TSO sending the exception (or NULL)
158 StgTSO *target, // the TSO receiving the exception
159 StgClosure *exception) // the exception closure
160 {
161 MessageThrowTo *msg;
162
163 msg = (MessageThrowTo *) allocate(cap, sizeofW(MessageThrowTo));
164 // message starts locked; the caller has to unlock it when it is
165 // ready.
166 SET_HDR(msg, &stg_WHITEHOLE_info, CCS_SYSTEM);
167 msg->source = source;
168 msg->target = target;
169 msg->exception = exception;
170
171 switch (throwToMsg(cap, msg))
172 {
173 case THROWTO_SUCCESS:
174 return NULL;
175 case THROWTO_BLOCKED:
176 default:
177 return msg;
178 }
179 }
180
181
182 nat
183 throwToMsg (Capability *cap, MessageThrowTo *msg)
184 {
185 StgWord status;
186 StgTSO *target = msg->target;
187 Capability *target_cap;
188
189 goto check_target;
190
191 retry:
192 write_barrier();
193 debugTrace(DEBUG_sched, "throwTo: retrying...");
194
195 check_target:
196 ASSERT(target != END_TSO_QUEUE);
197
198 // follow ThreadRelocated links in the target first
199 target = deRefTSO(target);
200
201 // Thread already dead?
202 if (target->what_next == ThreadComplete
203 || target->what_next == ThreadKilled) {
204 return THROWTO_SUCCESS;
205 }
206
207 debugTraceCap(DEBUG_sched, cap,
208 "throwTo: from thread %lu to thread %lu",
209 (unsigned long)msg->source->id,
210 (unsigned long)msg->target->id);
211
212 #ifdef DEBUG
213 traceThreadStatus(DEBUG_sched, target);
214 #endif
215
216 target_cap = target->cap;
217 if (target->cap != cap) {
218 throwToSendMsg(cap, target_cap, msg);
219 return THROWTO_BLOCKED;
220 }
221
222 status = target->why_blocked;
223
224 switch (status) {
225 case NotBlocked:
226 {
227 if ((target->flags & TSO_BLOCKEX) == 0) {
228 // It's on our run queue and not blocking exceptions
229 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
230 return THROWTO_SUCCESS;
231 } else {
232 blockedThrowTo(cap,target,msg);
233 return THROWTO_BLOCKED;
234 }
235 }
236
237 case BlockedOnMsgThrowTo:
238 {
239 const StgInfoTable *i;
240 MessageThrowTo *m;
241
242 m = target->block_info.throwto;
243
244 // target is local to this cap, but has sent a throwto
245 // message to another cap.
246 //
247 // The source message is locked. We need to revoke the
248 // target's message so that we can raise the exception, so
249 // we attempt to lock it.
250
251 // There's a possibility of a deadlock if two threads are both
252 // trying to throwTo each other (or more generally, a cycle of
253 // threads). To break the symmetry we compare the addresses
254 // of the MessageThrowTo objects, and the one for which m <
255 // msg gets to spin, while the other can only try to lock
256 // once, but must then back off and unlock both before trying
257 // again.
258 if (m < msg) {
259 i = lockClosure((StgClosure *)m);
260 } else {
261 i = tryLockClosure((StgClosure *)m);
262 if (i == NULL) {
263 // debugBelch("collision\n");
264 throwToSendMsg(cap, target->cap, msg);
265 return THROWTO_BLOCKED;
266 }
267 }
268
269 if (i == &stg_MSG_NULL_info) {
270 // we know there's a MSG_TRY_WAKEUP on the way, so we
271 // might as well just do it now. The message will
272 // be a no-op when it arrives.
273 unlockClosure((StgClosure*)m, i);
274 tryWakeupThread_(cap, target);
275 goto retry;
276 }
277
278 if (i != &stg_MSG_THROWTO_info) {
279 // if it's a MSG_NULL, this TSO has been woken up by another Cap
280 unlockClosure((StgClosure*)m, i);
281 goto retry;
282 }
283
284 if ((target->flags & TSO_BLOCKEX) &&
285 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
286 unlockClosure((StgClosure*)m, i);
287 blockedThrowTo(cap,target,msg);
288 return THROWTO_BLOCKED;
289 }
290
291 // nobody else can wake up this TSO after we claim the message
292 unlockClosure((StgClosure*)m, &stg_MSG_NULL_info);
293
294 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
295 return THROWTO_SUCCESS;
296 }
297
298 case BlockedOnMVar:
299 {
300 /*
301 To establish ownership of this TSO, we need to acquire a
302 lock on the MVar that it is blocked on.
303 */
304 StgMVar *mvar;
305 StgInfoTable *info USED_IF_THREADS;
306
307 mvar = (StgMVar *)target->block_info.closure;
308
309 // ASSUMPTION: tso->block_info must always point to a
310 // closure. In the threaded RTS it does.
311 switch (get_itbl(mvar)->type) {
312 case MVAR_CLEAN:
313 case MVAR_DIRTY:
314 break;
315 default:
316 goto retry;
317 }
318
319 info = lockClosure((StgClosure *)mvar);
320
321 if (target->what_next == ThreadRelocated) {
322 target = target->_link;
323 unlockClosure((StgClosure *)mvar,info);
324 goto retry;
325 }
326 // we have the MVar, let's check whether the thread
327 // is still blocked on the same MVar.
328 if (target->why_blocked != BlockedOnMVar
329 || (StgMVar *)target->block_info.closure != mvar) {
330 unlockClosure((StgClosure *)mvar, info);
331 goto retry;
332 }
333
334 if (target->_link == END_TSO_QUEUE) {
335 // the MVar operation has already completed. There is a
336 // MSG_TRY_WAKEUP on the way, but we can just wake up the
337 // thread now anyway and ignore the message when it
338 // arrives.
339 unlockClosure((StgClosure *)mvar, info);
340 tryWakeupThread_(cap, target);
341 goto retry;
342 }
343
344 if ((target->flags & TSO_BLOCKEX) &&
345 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
346 blockedThrowTo(cap,target,msg);
347 unlockClosure((StgClosure *)mvar, info);
348 return THROWTO_BLOCKED;
349 } else {
350 // revoke the MVar operation
351 removeFromMVarBlockedQueue(target);
352 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
353 unlockClosure((StgClosure *)mvar, info);
354 return THROWTO_SUCCESS;
355 }
356 }
357
358 case BlockedOnBlackHole:
359 {
360 if (target->flags & TSO_BLOCKEX) {
361 // BlockedOnBlackHole is not interruptible.
362 blockedThrowTo(cap,target,msg);
363 return THROWTO_BLOCKED;
364 } else {
365 // Revoke the message by replacing it with IND. We're not
366 // locking anything here, so we might still get a TRY_WAKEUP
367 // message from the owner of the blackhole some time in the
368 // future, but that doesn't matter.
369 ASSERT(target->block_info.bh->header.info == &stg_MSG_BLACKHOLE_info);
370 OVERWRITE_INFO(target->block_info.bh, &stg_IND_info);
371 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
372 return THROWTO_SUCCESS;
373 }
374 }
375
376 case BlockedOnSTM:
377 lockTSO(target);
378 // Unblocking BlockedOnSTM threads requires the TSO to be
379 // locked; see STM.c:unpark_tso().
380 if (target->why_blocked != BlockedOnSTM) {
381 unlockTSO(target);
382 goto retry;
383 }
384 if ((target->flags & TSO_BLOCKEX) &&
385 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
386 blockedThrowTo(cap,target,msg);
387 unlockTSO(target);
388 return THROWTO_BLOCKED;
389 } else {
390 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
391 unlockTSO(target);
392 return THROWTO_SUCCESS;
393 }
394
395 case BlockedOnCCall_Interruptible:
396 #ifdef THREADED_RTS
397 {
398 Task *task = NULL;
399 // walk suspended_ccalls to find the correct worker thread
400 InCall *incall;
401 for (incall = cap->suspended_ccalls; incall != NULL; incall = incall->next) {
402 if (incall->suspended_tso == target) {
403 task = incall->task;
404 break;
405 }
406 }
407 if (task != NULL) {
408 blockedThrowTo(cap, target, msg);
409 if (!((target->flags & TSO_BLOCKEX) && ((target->flags & TSO_INTERRUPTIBLE) == 0))) {
410 interruptWorkerTask(task);
411 }
412 return THROWTO_BLOCKED;
413 } else {
414 debugTraceCap(DEBUG_sched, cap, "throwTo: could not find worker thread to kill");
415 }
416 // fall to next
417 }
418 #endif
419 case BlockedOnCCall:
420 blockedThrowTo(cap,target,msg);
421 return THROWTO_BLOCKED;
422
423 #ifndef THREADEDED_RTS
424 case BlockedOnRead:
425 case BlockedOnWrite:
426 case BlockedOnDelay:
427 #if defined(mingw32_HOST_OS)
428 case BlockedOnDoProc:
429 #endif
430 if ((target->flags & TSO_BLOCKEX) &&
431 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
432 blockedThrowTo(cap,target,msg);
433 return THROWTO_BLOCKED;
434 } else {
435 removeFromQueues(cap,target);
436 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
437 return THROWTO_SUCCESS;
438 }
439 #endif
440
441 default:
442 barf("throwTo: unrecognised why_blocked value");
443 }
444 barf("throwTo");
445 }
446
447 static void
448 throwToSendMsg (Capability *cap STG_UNUSED,
449 Capability *target_cap USED_IF_THREADS,
450 MessageThrowTo *msg USED_IF_THREADS)
451
452 {
453 #ifdef THREADED_RTS
454 debugTraceCap(DEBUG_sched, cap, "throwTo: sending a throwto message to cap %lu", (unsigned long)target_cap->no);
455
456 sendMessage(cap, target_cap, (Message*)msg);
457 #endif
458 }
459
460 // Block a throwTo message on the target TSO's blocked_exceptions
461 // queue. The current Capability must own the target TSO in order to
462 // modify the blocked_exceptions queue.
463 static void
464 blockedThrowTo (Capability *cap, StgTSO *target, MessageThrowTo *msg)
465 {
466 debugTraceCap(DEBUG_sched, cap, "throwTo: blocking on thread %lu",
467 (unsigned long)target->id);
468
469 ASSERT(target->cap == cap);
470
471 msg->link = target->blocked_exceptions;
472 target->blocked_exceptions = msg;
473 dirty_TSO(cap,target); // we modified the blocked_exceptions queue
474 }
475
476 /* -----------------------------------------------------------------------------
477 Waking up threads blocked in throwTo
478
479 There are two ways to do this: maybePerformBlockedException() will
480 perform the throwTo() for the thread at the head of the queue
481 immediately, and leave the other threads on the queue.
482 maybePerformBlockedException() also checks the TSO_BLOCKEX flag
483 before raising an exception.
484
485 awakenBlockedExceptionQueue() will wake up all the threads in the
486 queue, but not perform any throwTo() immediately. This might be
487 more appropriate when the target thread is the one actually running
488 (see Exception.cmm).
489
490 Returns: non-zero if an exception was raised, zero otherwise.
491 -------------------------------------------------------------------------- */
492
493 int
494 maybePerformBlockedException (Capability *cap, StgTSO *tso)
495 {
496 MessageThrowTo *msg;
497 const StgInfoTable *i;
498
499 if (tso->what_next == ThreadComplete || tso->what_next == ThreadFinished) {
500 if (tso->blocked_exceptions != END_BLOCKED_EXCEPTIONS_QUEUE) {
501 awakenBlockedExceptionQueue(cap,tso);
502 return 1;
503 } else {
504 return 0;
505 }
506 }
507
508 if (tso->blocked_exceptions != END_BLOCKED_EXCEPTIONS_QUEUE &&
509 (tso->flags & TSO_BLOCKEX) != 0) {
510 debugTraceCap(DEBUG_sched, cap, "throwTo: thread %lu has blocked exceptions but is inside block", (unsigned long)tso->id);
511 }
512
513 if (tso->blocked_exceptions != END_BLOCKED_EXCEPTIONS_QUEUE
514 && ((tso->flags & TSO_BLOCKEX) == 0
515 || ((tso->flags & TSO_INTERRUPTIBLE) && interruptible(tso)))) {
516
517 // We unblock just the first thread on the queue, and perform
518 // its throw immediately.
519 loop:
520 msg = tso->blocked_exceptions;
521 if (msg == END_BLOCKED_EXCEPTIONS_QUEUE) return 0;
522 i = lockClosure((StgClosure*)msg);
523 tso->blocked_exceptions = (MessageThrowTo*)msg->link;
524 if (i == &stg_MSG_NULL_info) {
525 unlockClosure((StgClosure*)msg,i);
526 goto loop;
527 }
528
529 throwToSingleThreaded(cap, msg->target, msg->exception);
530 unlockClosure((StgClosure*)msg,&stg_MSG_NULL_info);
531 tryWakeupThread(cap, msg->source);
532 return 1;
533 }
534 return 0;
535 }
536
537 // awakenBlockedExceptionQueue(): Just wake up the whole queue of
538 // blocked exceptions.
539
540 void
541 awakenBlockedExceptionQueue (Capability *cap, StgTSO *tso)
542 {
543 MessageThrowTo *msg;
544 const StgInfoTable *i;
545
546 for (msg = tso->blocked_exceptions; msg != END_BLOCKED_EXCEPTIONS_QUEUE;
547 msg = (MessageThrowTo*)msg->link) {
548 i = lockClosure((StgClosure *)msg);
549 if (i != &stg_MSG_NULL_info) {
550 unlockClosure((StgClosure *)msg,&stg_MSG_NULL_info);
551 tryWakeupThread(cap, msg->source);
552 } else {
553 unlockClosure((StgClosure *)msg,i);
554 }
555 }
556 tso->blocked_exceptions = END_BLOCKED_EXCEPTIONS_QUEUE;
557 }
558
559 /* -----------------------------------------------------------------------------
560 Remove a thread from blocking queues.
561
562 This is for use when we raise an exception in another thread, which
563 may be blocked.
564
565 Precondition: we have exclusive access to the TSO, via the same set
566 of conditions as throwToSingleThreaded() (c.f.).
567 -------------------------------------------------------------------------- */
568
569 static void
570 removeFromMVarBlockedQueue (StgTSO *tso)
571 {
572 StgMVar *mvar = (StgMVar*)tso->block_info.closure;
573 StgMVarTSOQueue *q = (StgMVarTSOQueue*)tso->_link;
574
575 if (q == (StgMVarTSOQueue*)END_TSO_QUEUE) {
576 // already removed from this MVar
577 return;
578 }
579
580 // Assume the MVar is locked. (not assertable; sometimes it isn't
581 // actually WHITEHOLE'd).
582
583 // We want to remove the MVAR_TSO_QUEUE object from the queue. It
584 // isn't doubly-linked so we can't actually remove it; instead we
585 // just overwrite it with an IND if possible and let the GC short
586 // it out. However, we have to be careful to maintain the deque
587 // structure:
588
589 if (mvar->head == q) {
590 mvar->head = q->link;
591 q->header.info = &stg_IND_info;
592 if (mvar->tail == q) {
593 mvar->tail = (StgMVarTSOQueue*)END_TSO_QUEUE;
594 }
595 }
596 else if (mvar->tail == q) {
597 // we can't replace it with an IND in this case, because then
598 // we lose the tail pointer when the GC shorts out the IND.
599 // So we use MSG_NULL as a kind of non-dupable indirection;
600 // these are ignored by takeMVar/putMVar.
601 q->header.info = &stg_MSG_NULL_info;
602 }
603 else {
604 q->header.info = &stg_IND_info;
605 }
606
607 // revoke the MVar operation
608 tso->_link = END_TSO_QUEUE;
609 }
610
611 static void
612 removeFromQueues(Capability *cap, StgTSO *tso)
613 {
614 switch (tso->why_blocked) {
615
616 case NotBlocked:
617 case ThreadMigrating:
618 return;
619
620 case BlockedOnSTM:
621 // Be careful: nothing to do here! We tell the scheduler that the
622 // thread is runnable and we leave it to the stack-walking code to
623 // abort the transaction while unwinding the stack. We should
624 // perhaps have a debugging test to make sure that this really
625 // happens and that the 'zombie' transaction does not get
626 // committed.
627 goto done;
628
629 case BlockedOnMVar:
630 removeFromMVarBlockedQueue(tso);
631 goto done;
632
633 case BlockedOnBlackHole:
634 // nothing to do
635 goto done;
636
637 case BlockedOnMsgThrowTo:
638 {
639 MessageThrowTo *m = tso->block_info.throwto;
640 // The message is locked by us, unless we got here via
641 // deleteAllThreads(), in which case we own all the
642 // capabilities.
643 // ASSERT(m->header.info == &stg_WHITEHOLE_info);
644
645 // unlock and revoke it at the same time
646 unlockClosure((StgClosure*)m,&stg_MSG_NULL_info);
647 break;
648 }
649
650 #if !defined(THREADED_RTS)
651 case BlockedOnRead:
652 case BlockedOnWrite:
653 #if defined(mingw32_HOST_OS)
654 case BlockedOnDoProc:
655 #endif
656 removeThreadFromDeQueue(cap, &blocked_queue_hd, &blocked_queue_tl, tso);
657 #if defined(mingw32_HOST_OS)
658 /* (Cooperatively) signal that the worker thread should abort
659 * the request.
660 */
661 abandonWorkRequest(tso->block_info.async_result->reqID);
662 #endif
663 goto done;
664
665 case BlockedOnDelay:
666 removeThreadFromQueue(cap, &sleeping_queue, tso);
667 goto done;
668 #endif
669
670 case BlockedOnCCall_Interruptible:
671 case BlockedOnCCall:
672 // ccall shouldn't be put on the run queue, because whenever
673 // we raise an exception for such a blocked thread, it's only
674 // when we're /exiting/ the call.
675 tso->why_blocked = NotBlocked;
676 return;
677
678 default:
679 barf("removeFromQueues: %d", tso->why_blocked);
680 }
681
682 done:
683 tso->why_blocked = NotBlocked;
684 appendToRunQueue(cap, tso);
685 }
686
687 /* -----------------------------------------------------------------------------
688 * raiseAsync()
689 *
690 * The following function implements the magic for raising an
691 * asynchronous exception in an existing thread.
692 *
693 * We first remove the thread from any queue on which it might be
694 * blocked. The possible blockages are MVARs, BLOCKING_QUEUESs, and
695 * TSO blocked_exception queues.
696 *
697 * We strip the stack down to the innermost CATCH_FRAME, building
698 * thunks in the heap for all the active computations, so they can
699 * be restarted if necessary. When we reach a CATCH_FRAME, we build
700 * an application of the handler to the exception, and push it on
701 * the top of the stack.
702 *
703 * How exactly do we save all the active computations? We create an
704 * AP_STACK for every UpdateFrame on the stack. Entering one of these
705 * AP_STACKs pushes everything from the corresponding update frame
706 * upwards onto the stack. (Actually, it pushes everything up to the
707 * next update frame plus a pointer to the next AP_STACK object.
708 * Entering the next AP_STACK object pushes more onto the stack until we
709 * reach the last AP_STACK object - at which point the stack should look
710 * exactly as it did when we killed the TSO and we can continue
711 * execution by entering the closure on top of the stack.
712 *
713 * We can also kill a thread entirely - this happens if either (a) the
714 * exception passed to raiseAsync is NULL, or (b) there's no
715 * CATCH_FRAME on the stack. In either case, we strip the entire
716 * stack and replace the thread with a zombie.
717 *
718 * ToDo: in THREADED_RTS mode, this function is only safe if either
719 * (a) we hold all the Capabilities (eg. in GC, or if there is only
720 * one Capability), or (b) we own the Capability that the TSO is
721 * currently blocked on or on the run queue of.
722 *
723 * -------------------------------------------------------------------------- */
724
725 static void
726 raiseAsync(Capability *cap, StgTSO *tso, StgClosure *exception,
727 rtsBool stop_at_atomically, StgUpdateFrame *stop_here)
728 {
729 StgRetInfoTable *info;
730 StgPtr sp, frame;
731 StgClosure *updatee;
732 nat i;
733
734 debugTraceCap(DEBUG_sched, cap,
735 "raising exception in thread %ld.", (long)tso->id);
736
737 #if defined(PROFILING)
738 /*
739 * Debugging tool: on raising an exception, show where we are.
740 * See also Exception.cmm:stg_raisezh.
741 * This wasn't done for asynchronous exceptions originally; see #1450
742 */
743 if (RtsFlags.ProfFlags.showCCSOnException)
744 {
745 fprintCCS_stderr(tso->prof.CCCS);
746 }
747 #endif
748 // ASSUMES: the thread is not already complete or dead, or
749 // ThreadRelocated. Upper layers should deal with that.
750 ASSERT(tso->what_next != ThreadComplete &&
751 tso->what_next != ThreadKilled &&
752 tso->what_next != ThreadRelocated);
753
754 // only if we own this TSO (except that deleteThread() calls this
755 ASSERT(tso->cap == cap);
756
757 // wake it up
758 if (tso->why_blocked != NotBlocked) {
759 tso->why_blocked = NotBlocked;
760 appendToRunQueue(cap,tso);
761 }
762
763 // mark it dirty; we're about to change its stack.
764 dirty_TSO(cap, tso);
765
766 sp = tso->sp;
767
768 if (stop_here != NULL) {
769 updatee = stop_here->updatee;
770 } else {
771 updatee = NULL;
772 }
773
774 // The stack freezing code assumes there's a closure pointer on
775 // the top of the stack, so we have to arrange that this is the case...
776 //
777 if (sp[0] == (W_)&stg_enter_info) {
778 sp++;
779 } else {
780 sp--;
781 sp[0] = (W_)&stg_dummy_ret_closure;
782 }
783
784 frame = sp + 1;
785 while (stop_here == NULL || frame < (StgPtr)stop_here) {
786
787 // 1. Let the top of the stack be the "current closure"
788 //
789 // 2. Walk up the stack until we find either an UPDATE_FRAME or a
790 // CATCH_FRAME.
791 //
792 // 3. If it's an UPDATE_FRAME, then make an AP_STACK containing the
793 // current closure applied to the chunk of stack up to (but not
794 // including) the update frame. This closure becomes the "current
795 // closure". Go back to step 2.
796 //
797 // 4. If it's a CATCH_FRAME, then leave the exception handler on
798 // top of the stack applied to the exception.
799 //
800 // 5. If it's a STOP_FRAME, then kill the thread.
801 //
802 // NB: if we pass an ATOMICALLY_FRAME then abort the associated
803 // transaction
804
805 info = get_ret_itbl((StgClosure *)frame);
806
807 switch (info->i.type) {
808
809 case UPDATE_FRAME:
810 {
811 StgAP_STACK * ap;
812 nat words;
813
814 // First build an AP_STACK consisting of the stack chunk above the
815 // current update frame, with the top word on the stack as the
816 // fun field.
817 //
818 words = frame - sp - 1;
819 ap = (StgAP_STACK *)allocate(cap,AP_STACK_sizeW(words));
820
821 ap->size = words;
822 ap->fun = (StgClosure *)sp[0];
823 sp++;
824 for(i=0; i < (nat)words; ++i) {
825 ap->payload[i] = (StgClosure *)*sp++;
826 }
827
828 SET_HDR(ap,&stg_AP_STACK_info,
829 ((StgClosure *)frame)->header.prof.ccs /* ToDo */);
830 TICK_ALLOC_UP_THK(words+1,0);
831
832 //IF_DEBUG(scheduler,
833 // debugBelch("sched: Updating ");
834 // printPtr((P_)((StgUpdateFrame *)frame)->updatee);
835 // debugBelch(" with ");
836 // printObj((StgClosure *)ap);
837 // );
838
839 if (((StgUpdateFrame *)frame)->updatee == updatee) {
840 // If this update frame points to the same closure as
841 // the update frame further down the stack
842 // (stop_here), then don't perform the update. We
843 // want to keep the blackhole in this case, so we can
844 // detect and report the loop (#2783).
845 ap = (StgAP_STACK*)updatee;
846 } else {
847 // Perform the update
848 // TODO: this may waste some work, if the thunk has
849 // already been updated by another thread.
850 updateThunk(cap, tso,
851 ((StgUpdateFrame *)frame)->updatee, (StgClosure *)ap);
852 }
853
854 sp += sizeofW(StgUpdateFrame) - 1;
855 sp[0] = (W_)ap; // push onto stack
856 frame = sp + 1;
857 continue; //no need to bump frame
858 }
859
860 case STOP_FRAME:
861 {
862 // We've stripped the entire stack, the thread is now dead.
863 tso->what_next = ThreadKilled;
864 tso->sp = frame + sizeofW(StgStopFrame);
865 return;
866 }
867
868 case CATCH_FRAME:
869 // If we find a CATCH_FRAME, and we've got an exception to raise,
870 // then build the THUNK raise(exception), and leave it on
871 // top of the CATCH_FRAME ready to enter.
872 //
873 {
874 StgCatchFrame *cf = (StgCatchFrame *)frame;
875 StgThunk *raise;
876
877 if (exception == NULL) break;
878
879 // we've got an exception to raise, so let's pass it to the
880 // handler in this frame.
881 //
882 raise = (StgThunk *)allocate(cap,sizeofW(StgThunk)+1);
883 TICK_ALLOC_SE_THK(1,0);
884 SET_HDR(raise,&stg_raise_info,cf->header.prof.ccs);
885 raise->payload[0] = exception;
886
887 // throw away the stack from Sp up to the CATCH_FRAME.
888 //
889 sp = frame - 1;
890
891 /* Ensure that async excpetions are blocked now, so we don't get
892 * a surprise exception before we get around to executing the
893 * handler.
894 */
895 tso->flags |= TSO_BLOCKEX;
896 if ((cf->exceptions_blocked & TSO_INTERRUPTIBLE) == 0) {
897 tso->flags &= ~TSO_INTERRUPTIBLE;
898 } else {
899 tso->flags |= TSO_INTERRUPTIBLE;
900 }
901
902 /* Put the newly-built THUNK on top of the stack, ready to execute
903 * when the thread restarts.
904 */
905 sp[0] = (W_)raise;
906 sp[-1] = (W_)&stg_enter_info;
907 tso->sp = sp-1;
908 tso->what_next = ThreadRunGHC;
909 IF_DEBUG(sanity, checkTSO(tso));
910 return;
911 }
912
913 case ATOMICALLY_FRAME:
914 if (stop_at_atomically) {
915 ASSERT(tso->trec->enclosing_trec == NO_TREC);
916 stmCondemnTransaction(cap, tso -> trec);
917 tso->sp = frame - 2;
918 // The ATOMICALLY_FRAME expects to be returned a
919 // result from the transaction, which it stores in the
920 // stack frame. Hence we arrange to return a dummy
921 // result, so that the GC doesn't get upset (#3578).
922 // Perhaps a better way would be to have a different
923 // ATOMICALLY_FRAME instance for condemned
924 // transactions, but I don't fully understand the
925 // interaction with STM invariants.
926 tso->sp[1] = (W_)&stg_NO_TREC_closure;
927 tso->sp[0] = (W_)&stg_gc_unpt_r1_info;
928 tso->what_next = ThreadRunGHC;
929 return;
930 }
931 // Not stop_at_atomically... fall through and abort the
932 // transaction.
933
934 case CATCH_STM_FRAME:
935 case CATCH_RETRY_FRAME:
936 // IF we find an ATOMICALLY_FRAME then we abort the
937 // current transaction and propagate the exception. In
938 // this case (unlike ordinary exceptions) we do not care
939 // whether the transaction is valid or not because its
940 // possible validity cannot have caused the exception
941 // and will not be visible after the abort.
942
943 {
944 StgTRecHeader *trec = tso -> trec;
945 StgTRecHeader *outer = trec -> enclosing_trec;
946 debugTraceCap(DEBUG_stm, cap,
947 "found atomically block delivering async exception");
948 stmAbortTransaction(cap, trec);
949 stmFreeAbortedTRec(cap, trec);
950 tso -> trec = outer;
951 break;
952 };
953
954 default:
955 break;
956 }
957
958 // move on to the next stack frame
959 frame += stack_frame_sizeW((StgClosure *)frame);
960 }
961
962 // if we got here, then we stopped at stop_here
963 ASSERT(stop_here != NULL);
964 }
965
966