handle ThreadMigrating in throwTo() (#4811)
[ghc.git] / rts / RaiseAsync.c
1 /* ---------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 1998-2006
4 *
5 * Asynchronous exceptions
6 *
7 * --------------------------------------------------------------------------*/
8
9 #include "PosixSource.h"
10 #include "Rts.h"
11
12 #include "sm/Storage.h"
13 #include "Threads.h"
14 #include "Trace.h"
15 #include "RaiseAsync.h"
16 #include "Schedule.h"
17 #include "Updates.h"
18 #include "STM.h"
19 #include "sm/Sanity.h"
20 #include "Profiling.h"
21 #include "Messages.h"
22 #if defined(mingw32_HOST_OS)
23 #include "win32/IOManager.h"
24 #endif
25
26 static void raiseAsync (Capability *cap,
27 StgTSO *tso,
28 StgClosure *exception,
29 rtsBool stop_at_atomically,
30 StgUpdateFrame *stop_here);
31
32 static void removeFromQueues(Capability *cap, StgTSO *tso);
33
34 static void removeFromMVarBlockedQueue (StgTSO *tso);
35
36 static void blockedThrowTo (Capability *cap,
37 StgTSO *target, MessageThrowTo *msg);
38
39 static void throwToSendMsg (Capability *cap USED_IF_THREADS,
40 Capability *target_cap USED_IF_THREADS,
41 MessageThrowTo *msg USED_IF_THREADS);
42
43 /* -----------------------------------------------------------------------------
44 throwToSingleThreaded
45
46 This version of throwTo is safe to use if and only if one of the
47 following holds:
48
49 - !THREADED_RTS
50
51 - all the other threads in the system are stopped (eg. during GC).
52
53 - we surely own the target TSO (eg. we just took it from the
54 run queue of the current capability, or we are running it).
55
56 It doesn't cater for blocking the source thread until the exception
57 has been raised.
58 -------------------------------------------------------------------------- */
59
60 static void
61 throwToSingleThreaded__ (Capability *cap, StgTSO *tso, StgClosure *exception,
62 rtsBool stop_at_atomically, StgUpdateFrame *stop_here)
63 {
64 tso = deRefTSO(tso);
65
66 // Thread already dead?
67 if (tso->what_next == ThreadComplete || tso->what_next == ThreadKilled) {
68 return;
69 }
70
71 // Remove it from any blocking queues
72 removeFromQueues(cap,tso);
73
74 raiseAsync(cap, tso, exception, stop_at_atomically, stop_here);
75 }
76
77 void
78 throwToSingleThreaded (Capability *cap, StgTSO *tso, StgClosure *exception)
79 {
80 throwToSingleThreaded__(cap, tso, exception, rtsFalse, NULL);
81 }
82
83 void
84 throwToSingleThreaded_ (Capability *cap, StgTSO *tso, StgClosure *exception,
85 rtsBool stop_at_atomically)
86 {
87 throwToSingleThreaded__ (cap, tso, exception, stop_at_atomically, NULL);
88 }
89
90 void
91 suspendComputation (Capability *cap, StgTSO *tso, StgUpdateFrame *stop_here)
92 {
93 throwToSingleThreaded__ (cap, tso, NULL, rtsFalse, stop_here);
94 }
95
96 /* -----------------------------------------------------------------------------
97 throwTo
98
99 This function may be used to throw an exception from one thread to
100 another, during the course of normal execution. This is a tricky
101 task: the target thread might be running on another CPU, or it
102 may be blocked and could be woken up at any point by another CPU.
103 We have some delicate synchronisation to do.
104
105 The underlying scheme when multiple Capabilities are in use is
106 message passing: when the target of a throwTo is on another
107 Capability, we send a message (a MessageThrowTo closure) to that
108 Capability.
109
110 If the throwTo needs to block because the target TSO is masking
111 exceptions (the TSO_BLOCKEX flag), then the message is placed on
112 the blocked_exceptions queue attached to the target TSO. When the
113 target TSO enters the unmasked state again, it must check the
114 queue. The blocked_exceptions queue is not locked; only the
115 Capability owning the TSO may modify it.
116
117 To make things simpler for throwTo, we always create the message
118 first before deciding what to do. The message may get sent, or it
119 may get attached to a TSO's blocked_exceptions queue, or the
120 exception may get thrown immediately and the message dropped,
121 depending on the current state of the target.
122
123 Currently we send a message if the target belongs to another
124 Capability, and it is
125
126 - NotBlocked, BlockedOnMsgThrowTo,
127 BlockedOnCCall_Interruptible
128
129 - or it is masking exceptions (TSO_BLOCKEX)
130
131 Currently, if the target is BlockedOnMVar, BlockedOnSTM, or
132 BlockedOnBlackHole then we acquire ownership of the TSO by locking
133 its parent container (e.g. the MVar) and then raise the exception.
134 We might change these cases to be more message-passing-like in the
135 future.
136
137 Returns:
138
139 NULL exception was raised, ok to continue
140
141 MessageThrowTo * exception was not raised; the source TSO
142 should now put itself in the state
143 BlockedOnMsgThrowTo, and when it is ready
144 it should unlock the mssage using
145 unlockClosure(msg, &stg_MSG_THROWTO_info);
146 If it decides not to raise the exception after
147 all, it can revoke it safely with
148 unlockClosure(msg, &stg_MSG_NULL_info);
149
150 -------------------------------------------------------------------------- */
151
152 MessageThrowTo *
153 throwTo (Capability *cap, // the Capability we hold
154 StgTSO *source, // the TSO sending the exception (or NULL)
155 StgTSO *target, // the TSO receiving the exception
156 StgClosure *exception) // the exception closure
157 {
158 MessageThrowTo *msg;
159
160 msg = (MessageThrowTo *) allocate(cap, sizeofW(MessageThrowTo));
161 // message starts locked; the caller has to unlock it when it is
162 // ready.
163 SET_HDR(msg, &stg_WHITEHOLE_info, CCS_SYSTEM);
164 msg->source = source;
165 msg->target = target;
166 msg->exception = exception;
167
168 switch (throwToMsg(cap, msg))
169 {
170 case THROWTO_SUCCESS:
171 return NULL;
172 case THROWTO_BLOCKED:
173 default:
174 return msg;
175 }
176 }
177
178
179 nat
180 throwToMsg (Capability *cap, MessageThrowTo *msg)
181 {
182 StgWord status;
183 StgTSO *target = msg->target;
184 Capability *target_cap;
185
186 goto check_target;
187
188 retry:
189 write_barrier();
190 debugTrace(DEBUG_sched, "throwTo: retrying...");
191
192 check_target:
193 ASSERT(target != END_TSO_QUEUE);
194
195 // follow ThreadRelocated links in the target first
196 target = deRefTSO(target);
197
198 // Thread already dead?
199 if (target->what_next == ThreadComplete
200 || target->what_next == ThreadKilled) {
201 return THROWTO_SUCCESS;
202 }
203
204 debugTraceCap(DEBUG_sched, cap,
205 "throwTo: from thread %lu to thread %lu",
206 (unsigned long)msg->source->id,
207 (unsigned long)msg->target->id);
208
209 #ifdef DEBUG
210 traceThreadStatus(DEBUG_sched, target);
211 #endif
212
213 target_cap = target->cap;
214 if (target->cap != cap) {
215 throwToSendMsg(cap, target_cap, msg);
216 return THROWTO_BLOCKED;
217 }
218
219 status = target->why_blocked;
220
221 switch (status) {
222 case NotBlocked:
223 {
224 if ((target->flags & TSO_BLOCKEX) == 0) {
225 // It's on our run queue and not blocking exceptions
226 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
227 return THROWTO_SUCCESS;
228 } else {
229 blockedThrowTo(cap,target,msg);
230 return THROWTO_BLOCKED;
231 }
232 }
233
234 case BlockedOnMsgThrowTo:
235 {
236 const StgInfoTable *i;
237 MessageThrowTo *m;
238
239 m = target->block_info.throwto;
240
241 // target is local to this cap, but has sent a throwto
242 // message to another cap.
243 //
244 // The source message is locked. We need to revoke the
245 // target's message so that we can raise the exception, so
246 // we attempt to lock it.
247
248 // There's a possibility of a deadlock if two threads are both
249 // trying to throwTo each other (or more generally, a cycle of
250 // threads). To break the symmetry we compare the addresses
251 // of the MessageThrowTo objects, and the one for which m <
252 // msg gets to spin, while the other can only try to lock
253 // once, but must then back off and unlock both before trying
254 // again.
255 if (m < msg) {
256 i = lockClosure((StgClosure *)m);
257 } else {
258 i = tryLockClosure((StgClosure *)m);
259 if (i == NULL) {
260 // debugBelch("collision\n");
261 throwToSendMsg(cap, target->cap, msg);
262 return THROWTO_BLOCKED;
263 }
264 }
265
266 if (i == &stg_MSG_NULL_info) {
267 // we know there's a MSG_TRY_WAKEUP on the way, so we
268 // might as well just do it now. The message will
269 // be a no-op when it arrives.
270 unlockClosure((StgClosure*)m, i);
271 tryWakeupThread_(cap, target);
272 goto retry;
273 }
274
275 if (i != &stg_MSG_THROWTO_info) {
276 // if it's a MSG_NULL, this TSO has been woken up by another Cap
277 unlockClosure((StgClosure*)m, i);
278 goto retry;
279 }
280
281 if ((target->flags & TSO_BLOCKEX) &&
282 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
283 unlockClosure((StgClosure*)m, i);
284 blockedThrowTo(cap,target,msg);
285 return THROWTO_BLOCKED;
286 }
287
288 // nobody else can wake up this TSO after we claim the message
289 unlockClosure((StgClosure*)m, &stg_MSG_NULL_info);
290
291 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
292 return THROWTO_SUCCESS;
293 }
294
295 case BlockedOnMVar:
296 {
297 /*
298 To establish ownership of this TSO, we need to acquire a
299 lock on the MVar that it is blocked on.
300 */
301 StgMVar *mvar;
302 StgInfoTable *info USED_IF_THREADS;
303
304 mvar = (StgMVar *)target->block_info.closure;
305
306 // ASSUMPTION: tso->block_info must always point to a
307 // closure. In the threaded RTS it does.
308 switch (get_itbl(mvar)->type) {
309 case MVAR_CLEAN:
310 case MVAR_DIRTY:
311 break;
312 default:
313 goto retry;
314 }
315
316 info = lockClosure((StgClosure *)mvar);
317
318 if (target->what_next == ThreadRelocated) {
319 target = target->_link;
320 unlockClosure((StgClosure *)mvar,info);
321 goto retry;
322 }
323 // we have the MVar, let's check whether the thread
324 // is still blocked on the same MVar.
325 if (target->why_blocked != BlockedOnMVar
326 || (StgMVar *)target->block_info.closure != mvar) {
327 unlockClosure((StgClosure *)mvar, info);
328 goto retry;
329 }
330
331 if (target->_link == END_TSO_QUEUE) {
332 // the MVar operation has already completed. There is a
333 // MSG_TRY_WAKEUP on the way, but we can just wake up the
334 // thread now anyway and ignore the message when it
335 // arrives.
336 unlockClosure((StgClosure *)mvar, info);
337 tryWakeupThread_(cap, target);
338 goto retry;
339 }
340
341 if ((target->flags & TSO_BLOCKEX) &&
342 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
343 blockedThrowTo(cap,target,msg);
344 unlockClosure((StgClosure *)mvar, info);
345 return THROWTO_BLOCKED;
346 } else {
347 // revoke the MVar operation
348 removeFromMVarBlockedQueue(target);
349 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
350 unlockClosure((StgClosure *)mvar, info);
351 return THROWTO_SUCCESS;
352 }
353 }
354
355 case BlockedOnBlackHole:
356 {
357 if (target->flags & TSO_BLOCKEX) {
358 // BlockedOnBlackHole is not interruptible.
359 blockedThrowTo(cap,target,msg);
360 return THROWTO_BLOCKED;
361 } else {
362 // Revoke the message by replacing it with IND. We're not
363 // locking anything here, so we might still get a TRY_WAKEUP
364 // message from the owner of the blackhole some time in the
365 // future, but that doesn't matter.
366 ASSERT(target->block_info.bh->header.info == &stg_MSG_BLACKHOLE_info);
367 OVERWRITE_INFO(target->block_info.bh, &stg_IND_info);
368 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
369 return THROWTO_SUCCESS;
370 }
371 }
372
373 case BlockedOnSTM:
374 lockTSO(target);
375 // Unblocking BlockedOnSTM threads requires the TSO to be
376 // locked; see STM.c:unpark_tso().
377 if (target->why_blocked != BlockedOnSTM) {
378 unlockTSO(target);
379 goto retry;
380 }
381 if ((target->flags & TSO_BLOCKEX) &&
382 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
383 blockedThrowTo(cap,target,msg);
384 unlockTSO(target);
385 return THROWTO_BLOCKED;
386 } else {
387 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
388 unlockTSO(target);
389 return THROWTO_SUCCESS;
390 }
391
392 case BlockedOnCCall_Interruptible:
393 #ifdef THREADED_RTS
394 {
395 Task *task = NULL;
396 // walk suspended_ccalls to find the correct worker thread
397 InCall *incall;
398 for (incall = cap->suspended_ccalls; incall != NULL; incall = incall->next) {
399 if (incall->suspended_tso == target) {
400 task = incall->task;
401 break;
402 }
403 }
404 if (task != NULL) {
405 blockedThrowTo(cap, target, msg);
406 if (!((target->flags & TSO_BLOCKEX) &&
407 ((target->flags & TSO_INTERRUPTIBLE) == 0))) {
408 interruptWorkerTask(task);
409 }
410 return THROWTO_BLOCKED;
411 } else {
412 debugTraceCap(DEBUG_sched, cap, "throwTo: could not find worker thread to kill");
413 }
414 // fall to next
415 }
416 #endif
417 case BlockedOnCCall:
418 blockedThrowTo(cap,target,msg);
419 return THROWTO_BLOCKED;
420
421 #ifndef THREADEDED_RTS
422 case BlockedOnRead:
423 case BlockedOnWrite:
424 case BlockedOnDelay:
425 #if defined(mingw32_HOST_OS)
426 case BlockedOnDoProc:
427 #endif
428 if ((target->flags & TSO_BLOCKEX) &&
429 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
430 blockedThrowTo(cap,target,msg);
431 return THROWTO_BLOCKED;
432 } else {
433 removeFromQueues(cap,target);
434 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
435 return THROWTO_SUCCESS;
436 }
437 #endif
438
439 case ThreadMigrating:
440 // if is is ThreadMigrating and tso->cap is ours, then it
441 // *must* be migrating *to* this capability. If it were
442 // migrating away from the capability, then tso->cap would
443 // point to the destination.
444 //
445 // There is a MSG_WAKEUP in the message queue for this thread,
446 // but we can just do it preemptively:
447 tryWakeupThread(cap, target);
448 // and now retry, the thread should be runnable.
449 goto retry;
450
451 default:
452 barf("throwTo: unrecognised why_blocked value");
453 }
454 barf("throwTo");
455 }
456
457 static void
458 throwToSendMsg (Capability *cap STG_UNUSED,
459 Capability *target_cap USED_IF_THREADS,
460 MessageThrowTo *msg USED_IF_THREADS)
461
462 {
463 #ifdef THREADED_RTS
464 debugTraceCap(DEBUG_sched, cap, "throwTo: sending a throwto message to cap %lu", (unsigned long)target_cap->no);
465
466 sendMessage(cap, target_cap, (Message*)msg);
467 #endif
468 }
469
470 // Block a throwTo message on the target TSO's blocked_exceptions
471 // queue. The current Capability must own the target TSO in order to
472 // modify the blocked_exceptions queue.
473 static void
474 blockedThrowTo (Capability *cap, StgTSO *target, MessageThrowTo *msg)
475 {
476 debugTraceCap(DEBUG_sched, cap, "throwTo: blocking on thread %lu",
477 (unsigned long)target->id);
478
479 ASSERT(target->cap == cap);
480
481 msg->link = target->blocked_exceptions;
482 target->blocked_exceptions = msg;
483 dirty_TSO(cap,target); // we modified the blocked_exceptions queue
484 }
485
486 /* -----------------------------------------------------------------------------
487 Waking up threads blocked in throwTo
488
489 There are two ways to do this: maybePerformBlockedException() will
490 perform the throwTo() for the thread at the head of the queue
491 immediately, and leave the other threads on the queue.
492 maybePerformBlockedException() also checks the TSO_BLOCKEX flag
493 before raising an exception.
494
495 awakenBlockedExceptionQueue() will wake up all the threads in the
496 queue, but not perform any throwTo() immediately. This might be
497 more appropriate when the target thread is the one actually running
498 (see Exception.cmm).
499
500 Returns: non-zero if an exception was raised, zero otherwise.
501 -------------------------------------------------------------------------- */
502
503 int
504 maybePerformBlockedException (Capability *cap, StgTSO *tso)
505 {
506 MessageThrowTo *msg;
507 const StgInfoTable *i;
508
509 if (tso->what_next == ThreadComplete || tso->what_next == ThreadFinished) {
510 if (tso->blocked_exceptions != END_BLOCKED_EXCEPTIONS_QUEUE) {
511 awakenBlockedExceptionQueue(cap,tso);
512 return 1;
513 } else {
514 return 0;
515 }
516 }
517
518 if (tso->blocked_exceptions != END_BLOCKED_EXCEPTIONS_QUEUE &&
519 (tso->flags & TSO_BLOCKEX) != 0) {
520 debugTraceCap(DEBUG_sched, cap, "throwTo: thread %lu has blocked exceptions but is inside block", (unsigned long)tso->id);
521 }
522
523 if (tso->blocked_exceptions != END_BLOCKED_EXCEPTIONS_QUEUE
524 && ((tso->flags & TSO_BLOCKEX) == 0
525 || ((tso->flags & TSO_INTERRUPTIBLE) && interruptible(tso)))) {
526
527 // We unblock just the first thread on the queue, and perform
528 // its throw immediately.
529 loop:
530 msg = tso->blocked_exceptions;
531 if (msg == END_BLOCKED_EXCEPTIONS_QUEUE) return 0;
532 i = lockClosure((StgClosure*)msg);
533 tso->blocked_exceptions = (MessageThrowTo*)msg->link;
534 if (i == &stg_MSG_NULL_info) {
535 unlockClosure((StgClosure*)msg,i);
536 goto loop;
537 }
538
539 throwToSingleThreaded(cap, msg->target, msg->exception);
540 unlockClosure((StgClosure*)msg,&stg_MSG_NULL_info);
541 tryWakeupThread(cap, msg->source);
542 return 1;
543 }
544 return 0;
545 }
546
547 // awakenBlockedExceptionQueue(): Just wake up the whole queue of
548 // blocked exceptions.
549
550 void
551 awakenBlockedExceptionQueue (Capability *cap, StgTSO *tso)
552 {
553 MessageThrowTo *msg;
554 const StgInfoTable *i;
555
556 for (msg = tso->blocked_exceptions; msg != END_BLOCKED_EXCEPTIONS_QUEUE;
557 msg = (MessageThrowTo*)msg->link) {
558 i = lockClosure((StgClosure *)msg);
559 if (i != &stg_MSG_NULL_info) {
560 unlockClosure((StgClosure *)msg,&stg_MSG_NULL_info);
561 tryWakeupThread(cap, msg->source);
562 } else {
563 unlockClosure((StgClosure *)msg,i);
564 }
565 }
566 tso->blocked_exceptions = END_BLOCKED_EXCEPTIONS_QUEUE;
567 }
568
569 /* -----------------------------------------------------------------------------
570 Remove a thread from blocking queues.
571
572 This is for use when we raise an exception in another thread, which
573 may be blocked.
574
575 Precondition: we have exclusive access to the TSO, via the same set
576 of conditions as throwToSingleThreaded() (c.f.).
577 -------------------------------------------------------------------------- */
578
579 static void
580 removeFromMVarBlockedQueue (StgTSO *tso)
581 {
582 StgMVar *mvar = (StgMVar*)tso->block_info.closure;
583 StgMVarTSOQueue *q = (StgMVarTSOQueue*)tso->_link;
584
585 if (q == (StgMVarTSOQueue*)END_TSO_QUEUE) {
586 // already removed from this MVar
587 return;
588 }
589
590 // Assume the MVar is locked. (not assertable; sometimes it isn't
591 // actually WHITEHOLE'd).
592
593 // We want to remove the MVAR_TSO_QUEUE object from the queue. It
594 // isn't doubly-linked so we can't actually remove it; instead we
595 // just overwrite it with an IND if possible and let the GC short
596 // it out. However, we have to be careful to maintain the deque
597 // structure:
598
599 if (mvar->head == q) {
600 mvar->head = q->link;
601 q->header.info = &stg_IND_info;
602 if (mvar->tail == q) {
603 mvar->tail = (StgMVarTSOQueue*)END_TSO_QUEUE;
604 }
605 }
606 else if (mvar->tail == q) {
607 // we can't replace it with an IND in this case, because then
608 // we lose the tail pointer when the GC shorts out the IND.
609 // So we use MSG_NULL as a kind of non-dupable indirection;
610 // these are ignored by takeMVar/putMVar.
611 q->header.info = &stg_MSG_NULL_info;
612 }
613 else {
614 q->header.info = &stg_IND_info;
615 }
616
617 // revoke the MVar operation
618 tso->_link = END_TSO_QUEUE;
619 }
620
621 static void
622 removeFromQueues(Capability *cap, StgTSO *tso)
623 {
624 switch (tso->why_blocked) {
625
626 case NotBlocked:
627 case ThreadMigrating:
628 return;
629
630 case BlockedOnSTM:
631 // Be careful: nothing to do here! We tell the scheduler that the
632 // thread is runnable and we leave it to the stack-walking code to
633 // abort the transaction while unwinding the stack. We should
634 // perhaps have a debugging test to make sure that this really
635 // happens and that the 'zombie' transaction does not get
636 // committed.
637 goto done;
638
639 case BlockedOnMVar:
640 removeFromMVarBlockedQueue(tso);
641 goto done;
642
643 case BlockedOnBlackHole:
644 // nothing to do
645 goto done;
646
647 case BlockedOnMsgThrowTo:
648 {
649 MessageThrowTo *m = tso->block_info.throwto;
650 // The message is locked by us, unless we got here via
651 // deleteAllThreads(), in which case we own all the
652 // capabilities.
653 // ASSERT(m->header.info == &stg_WHITEHOLE_info);
654
655 // unlock and revoke it at the same time
656 unlockClosure((StgClosure*)m,&stg_MSG_NULL_info);
657 break;
658 }
659
660 #if !defined(THREADED_RTS)
661 case BlockedOnRead:
662 case BlockedOnWrite:
663 #if defined(mingw32_HOST_OS)
664 case BlockedOnDoProc:
665 #endif
666 removeThreadFromDeQueue(cap, &blocked_queue_hd, &blocked_queue_tl, tso);
667 #if defined(mingw32_HOST_OS)
668 /* (Cooperatively) signal that the worker thread should abort
669 * the request.
670 */
671 abandonWorkRequest(tso->block_info.async_result->reqID);
672 #endif
673 goto done;
674
675 case BlockedOnDelay:
676 removeThreadFromQueue(cap, &sleeping_queue, tso);
677 goto done;
678 #endif
679
680 default:
681 barf("removeFromQueues: %d", tso->why_blocked);
682 }
683
684 done:
685 tso->why_blocked = NotBlocked;
686 appendToRunQueue(cap, tso);
687 }
688
689 /* -----------------------------------------------------------------------------
690 * raiseAsync()
691 *
692 * The following function implements the magic for raising an
693 * asynchronous exception in an existing thread.
694 *
695 * We first remove the thread from any queue on which it might be
696 * blocked. The possible blockages are MVARs, BLOCKING_QUEUESs, and
697 * TSO blocked_exception queues.
698 *
699 * We strip the stack down to the innermost CATCH_FRAME, building
700 * thunks in the heap for all the active computations, so they can
701 * be restarted if necessary. When we reach a CATCH_FRAME, we build
702 * an application of the handler to the exception, and push it on
703 * the top of the stack.
704 *
705 * How exactly do we save all the active computations? We create an
706 * AP_STACK for every UpdateFrame on the stack. Entering one of these
707 * AP_STACKs pushes everything from the corresponding update frame
708 * upwards onto the stack. (Actually, it pushes everything up to the
709 * next update frame plus a pointer to the next AP_STACK object.
710 * Entering the next AP_STACK object pushes more onto the stack until we
711 * reach the last AP_STACK object - at which point the stack should look
712 * exactly as it did when we killed the TSO and we can continue
713 * execution by entering the closure on top of the stack.
714 *
715 * We can also kill a thread entirely - this happens if either (a) the
716 * exception passed to raiseAsync is NULL, or (b) there's no
717 * CATCH_FRAME on the stack. In either case, we strip the entire
718 * stack and replace the thread with a zombie.
719 *
720 * ToDo: in THREADED_RTS mode, this function is only safe if either
721 * (a) we hold all the Capabilities (eg. in GC, or if there is only
722 * one Capability), or (b) we own the Capability that the TSO is
723 * currently blocked on or on the run queue of.
724 *
725 * -------------------------------------------------------------------------- */
726
727 static void
728 raiseAsync(Capability *cap, StgTSO *tso, StgClosure *exception,
729 rtsBool stop_at_atomically, StgUpdateFrame *stop_here)
730 {
731 StgRetInfoTable *info;
732 StgPtr sp, frame;
733 StgClosure *updatee;
734 nat i;
735
736 debugTraceCap(DEBUG_sched, cap,
737 "raising exception in thread %ld.", (long)tso->id);
738
739 #if defined(PROFILING)
740 /*
741 * Debugging tool: on raising an exception, show where we are.
742 * See also Exception.cmm:stg_raisezh.
743 * This wasn't done for asynchronous exceptions originally; see #1450
744 */
745 if (RtsFlags.ProfFlags.showCCSOnException)
746 {
747 fprintCCS_stderr(tso->prof.CCCS);
748 }
749 #endif
750 // ASSUMES: the thread is not already complete or dead, or
751 // ThreadRelocated. Upper layers should deal with that.
752 ASSERT(tso->what_next != ThreadComplete &&
753 tso->what_next != ThreadKilled &&
754 tso->what_next != ThreadRelocated);
755
756 // only if we own this TSO (except that deleteThread() calls this
757 ASSERT(tso->cap == cap);
758
759 // wake it up
760 if (tso->why_blocked != NotBlocked) {
761 tso->why_blocked = NotBlocked;
762 appendToRunQueue(cap,tso);
763 }
764
765 // mark it dirty; we're about to change its stack.
766 dirty_TSO(cap, tso);
767
768 sp = tso->sp;
769
770 if (stop_here != NULL) {
771 updatee = stop_here->updatee;
772 } else {
773 updatee = NULL;
774 }
775
776 // The stack freezing code assumes there's a closure pointer on
777 // the top of the stack, so we have to arrange that this is the case...
778 //
779 if (sp[0] == (W_)&stg_enter_info) {
780 sp++;
781 } else {
782 sp--;
783 sp[0] = (W_)&stg_dummy_ret_closure;
784 }
785
786 frame = sp + 1;
787 while (stop_here == NULL || frame < (StgPtr)stop_here) {
788
789 // 1. Let the top of the stack be the "current closure"
790 //
791 // 2. Walk up the stack until we find either an UPDATE_FRAME or a
792 // CATCH_FRAME.
793 //
794 // 3. If it's an UPDATE_FRAME, then make an AP_STACK containing the
795 // current closure applied to the chunk of stack up to (but not
796 // including) the update frame. This closure becomes the "current
797 // closure". Go back to step 2.
798 //
799 // 4. If it's a CATCH_FRAME, then leave the exception handler on
800 // top of the stack applied to the exception.
801 //
802 // 5. If it's a STOP_FRAME, then kill the thread.
803 //
804 // NB: if we pass an ATOMICALLY_FRAME then abort the associated
805 // transaction
806
807 info = get_ret_itbl((StgClosure *)frame);
808
809 switch (info->i.type) {
810
811 case UPDATE_FRAME:
812 {
813 StgAP_STACK * ap;
814 nat words;
815
816 // First build an AP_STACK consisting of the stack chunk above the
817 // current update frame, with the top word on the stack as the
818 // fun field.
819 //
820 words = frame - sp - 1;
821 ap = (StgAP_STACK *)allocate(cap,AP_STACK_sizeW(words));
822
823 ap->size = words;
824 ap->fun = (StgClosure *)sp[0];
825 sp++;
826 for(i=0; i < (nat)words; ++i) {
827 ap->payload[i] = (StgClosure *)*sp++;
828 }
829
830 SET_HDR(ap,&stg_AP_STACK_info,
831 ((StgClosure *)frame)->header.prof.ccs /* ToDo */);
832 TICK_ALLOC_UP_THK(words+1,0);
833
834 //IF_DEBUG(scheduler,
835 // debugBelch("sched: Updating ");
836 // printPtr((P_)((StgUpdateFrame *)frame)->updatee);
837 // debugBelch(" with ");
838 // printObj((StgClosure *)ap);
839 // );
840
841 if (((StgUpdateFrame *)frame)->updatee == updatee) {
842 // If this update frame points to the same closure as
843 // the update frame further down the stack
844 // (stop_here), then don't perform the update. We
845 // want to keep the blackhole in this case, so we can
846 // detect and report the loop (#2783).
847 ap = (StgAP_STACK*)updatee;
848 } else {
849 // Perform the update
850 // TODO: this may waste some work, if the thunk has
851 // already been updated by another thread.
852 updateThunk(cap, tso,
853 ((StgUpdateFrame *)frame)->updatee, (StgClosure *)ap);
854 }
855
856 sp += sizeofW(StgUpdateFrame) - 1;
857 sp[0] = (W_)ap; // push onto stack
858 frame = sp + 1;
859 continue; //no need to bump frame
860 }
861
862 case STOP_FRAME:
863 {
864 // We've stripped the entire stack, the thread is now dead.
865 tso->what_next = ThreadKilled;
866 tso->sp = frame + sizeofW(StgStopFrame);
867 return;
868 }
869
870 case CATCH_FRAME:
871 // If we find a CATCH_FRAME, and we've got an exception to raise,
872 // then build the THUNK raise(exception), and leave it on
873 // top of the CATCH_FRAME ready to enter.
874 //
875 {
876 StgCatchFrame *cf = (StgCatchFrame *)frame;
877 StgThunk *raise;
878
879 if (exception == NULL) break;
880
881 // we've got an exception to raise, so let's pass it to the
882 // handler in this frame.
883 //
884 raise = (StgThunk *)allocate(cap,sizeofW(StgThunk)+1);
885 TICK_ALLOC_SE_THK(1,0);
886 SET_HDR(raise,&stg_raise_info,cf->header.prof.ccs);
887 raise->payload[0] = exception;
888
889 // throw away the stack from Sp up to the CATCH_FRAME.
890 //
891 sp = frame - 1;
892
893 /* Ensure that async excpetions are blocked now, so we don't get
894 * a surprise exception before we get around to executing the
895 * handler.
896 */
897 tso->flags |= TSO_BLOCKEX;
898 if ((cf->exceptions_blocked & TSO_INTERRUPTIBLE) == 0) {
899 tso->flags &= ~TSO_INTERRUPTIBLE;
900 } else {
901 tso->flags |= TSO_INTERRUPTIBLE;
902 }
903
904 /* Put the newly-built THUNK on top of the stack, ready to execute
905 * when the thread restarts.
906 */
907 sp[0] = (W_)raise;
908 sp[-1] = (W_)&stg_enter_info;
909 tso->sp = sp-1;
910 tso->what_next = ThreadRunGHC;
911 IF_DEBUG(sanity, checkTSO(tso));
912 return;
913 }
914
915 case ATOMICALLY_FRAME:
916 if (stop_at_atomically) {
917 ASSERT(tso->trec->enclosing_trec == NO_TREC);
918 stmCondemnTransaction(cap, tso -> trec);
919 tso->sp = frame - 2;
920 // The ATOMICALLY_FRAME expects to be returned a
921 // result from the transaction, which it stores in the
922 // stack frame. Hence we arrange to return a dummy
923 // result, so that the GC doesn't get upset (#3578).
924 // Perhaps a better way would be to have a different
925 // ATOMICALLY_FRAME instance for condemned
926 // transactions, but I don't fully understand the
927 // interaction with STM invariants.
928 tso->sp[1] = (W_)&stg_NO_TREC_closure;
929 tso->sp[0] = (W_)&stg_gc_unpt_r1_info;
930 tso->what_next = ThreadRunGHC;
931 return;
932 }
933 // Not stop_at_atomically... fall through and abort the
934 // transaction.
935
936 case CATCH_STM_FRAME:
937 case CATCH_RETRY_FRAME:
938 // IF we find an ATOMICALLY_FRAME then we abort the
939 // current transaction and propagate the exception. In
940 // this case (unlike ordinary exceptions) we do not care
941 // whether the transaction is valid or not because its
942 // possible validity cannot have caused the exception
943 // and will not be visible after the abort.
944
945 {
946 StgTRecHeader *trec = tso -> trec;
947 StgTRecHeader *outer = trec -> enclosing_trec;
948 debugTraceCap(DEBUG_stm, cap,
949 "found atomically block delivering async exception");
950 stmAbortTransaction(cap, trec);
951 stmFreeAbortedTRec(cap, trec);
952 tso -> trec = outer;
953 break;
954 };
955
956 default:
957 break;
958 }
959
960 // move on to the next stack frame
961 frame += stack_frame_sizeW((StgClosure *)frame);
962 }
963
964 // if we got here, then we stopped at stop_here
965 ASSERT(stop_here != NULL);
966 }
967
968