New asynchronous exception control API (ghc parts)
[ghc.git] / rts / RaiseAsync.c
1 /* ---------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 1998-2006
4 *
5 * Asynchronous exceptions
6 *
7 * --------------------------------------------------------------------------*/
8
9 #include "PosixSource.h"
10 #include "Rts.h"
11
12 #include "sm/Storage.h"
13 #include "Threads.h"
14 #include "Trace.h"
15 #include "RaiseAsync.h"
16 #include "Schedule.h"
17 #include "Updates.h"
18 #include "STM.h"
19 #include "sm/Sanity.h"
20 #include "Profiling.h"
21 #include "Messages.h"
22 #if defined(mingw32_HOST_OS)
23 #include "win32/IOManager.h"
24 #endif
25
26 static void raiseAsync (Capability *cap,
27 StgTSO *tso,
28 StgClosure *exception,
29 rtsBool stop_at_atomically,
30 StgUpdateFrame *stop_here);
31
32 static void removeFromQueues(Capability *cap, StgTSO *tso);
33
34 static void removeFromMVarBlockedQueue (StgTSO *tso);
35
36 static void blockedThrowTo (Capability *cap,
37 StgTSO *target, MessageThrowTo *msg);
38
39 static void throwToSendMsg (Capability *cap USED_IF_THREADS,
40 Capability *target_cap USED_IF_THREADS,
41 MessageThrowTo *msg USED_IF_THREADS);
42
43 /* -----------------------------------------------------------------------------
44 throwToSingleThreaded
45
46 This version of throwTo is safe to use if and only if one of the
47 following holds:
48
49 - !THREADED_RTS
50
51 - all the other threads in the system are stopped (eg. during GC).
52
53 - we surely own the target TSO (eg. we just took it from the
54 run queue of the current capability, or we are running it).
55
56 It doesn't cater for blocking the source thread until the exception
57 has been raised.
58 -------------------------------------------------------------------------- */
59
60 void
61 throwToSingleThreaded(Capability *cap, StgTSO *tso, StgClosure *exception)
62 {
63 throwToSingleThreaded_(cap, tso, exception, rtsFalse);
64 }
65
66 void
67 throwToSingleThreaded_(Capability *cap, StgTSO *tso, StgClosure *exception,
68 rtsBool stop_at_atomically)
69 {
70 tso = deRefTSO(tso);
71
72 // Thread already dead?
73 if (tso->what_next == ThreadComplete || tso->what_next == ThreadKilled) {
74 return;
75 }
76
77 // Remove it from any blocking queues
78 removeFromQueues(cap,tso);
79
80 raiseAsync(cap, tso, exception, stop_at_atomically, NULL);
81 }
82
83 void
84 suspendComputation(Capability *cap, StgTSO *tso, StgUpdateFrame *stop_here)
85 {
86 tso = deRefTSO(tso);
87
88 // Thread already dead?
89 if (tso->what_next == ThreadComplete || tso->what_next == ThreadKilled) {
90 return;
91 }
92
93 // Remove it from any blocking queues
94 removeFromQueues(cap,tso);
95
96 raiseAsync(cap, tso, NULL, rtsFalse, stop_here);
97 }
98
99 /* -----------------------------------------------------------------------------
100 throwTo
101
102 This function may be used to throw an exception from one thread to
103 another, during the course of normal execution. This is a tricky
104 task: the target thread might be running on another CPU, or it
105 may be blocked and could be woken up at any point by another CPU.
106 We have some delicate synchronisation to do.
107
108 The underlying scheme when multiple Capabilities are in use is
109 message passing: when the target of a throwTo is on another
110 Capability, we send a message (a MessageThrowTo closure) to that
111 Capability.
112
113 If the throwTo needs to block because the target TSO is masking
114 exceptions (the TSO_BLOCKEX flag), then the message is placed on
115 the blocked_exceptions queue attached to the target TSO. When the
116 target TSO enters the unmasked state again, it must check the
117 queue. The blocked_exceptions queue is not locked; only the
118 Capability owning the TSO may modify it.
119
120 To make things simpler for throwTo, we always create the message
121 first before deciding what to do. The message may get sent, or it
122 may get attached to a TSO's blocked_exceptions queue, or the
123 exception may get thrown immediately and the message dropped,
124 depending on the current state of the target.
125
126 Currently we send a message if the target belongs to another
127 Capability, and it is
128
129 - NotBlocked, BlockedOnMsgThrowTo,
130 BlockedOnCCall
131
132 - or it is masking exceptions (TSO_BLOCKEX)
133
134 Currently, if the target is BlockedOnMVar, BlockedOnSTM, or
135 BlockedOnBlackHole then we acquire ownership of the TSO by locking
136 its parent container (e.g. the MVar) and then raise the exception.
137 We might change these cases to be more message-passing-like in the
138 future.
139
140 Returns:
141
142 NULL exception was raised, ok to continue
143
144 MessageThrowTo * exception was not raised; the source TSO
145 should now put itself in the state
146 BlockedOnMsgThrowTo, and when it is ready
147 it should unlock the mssage using
148 unlockClosure(msg, &stg_MSG_THROWTO_info);
149 If it decides not to raise the exception after
150 all, it can revoke it safely with
151 unlockClosure(msg, &stg_MSG_NULL_info);
152
153 -------------------------------------------------------------------------- */
154
155 MessageThrowTo *
156 throwTo (Capability *cap, // the Capability we hold
157 StgTSO *source, // the TSO sending the exception (or NULL)
158 StgTSO *target, // the TSO receiving the exception
159 StgClosure *exception) // the exception closure
160 {
161 MessageThrowTo *msg;
162
163 msg = (MessageThrowTo *) allocate(cap, sizeofW(MessageThrowTo));
164 // message starts locked; the caller has to unlock it when it is
165 // ready.
166 SET_HDR(msg, &stg_WHITEHOLE_info, CCS_SYSTEM);
167 msg->source = source;
168 msg->target = target;
169 msg->exception = exception;
170
171 switch (throwToMsg(cap, msg))
172 {
173 case THROWTO_SUCCESS:
174 return NULL;
175 case THROWTO_BLOCKED:
176 default:
177 return msg;
178 }
179 }
180
181
182 nat
183 throwToMsg (Capability *cap, MessageThrowTo *msg)
184 {
185 StgWord status;
186 StgTSO *target = msg->target;
187 Capability *target_cap;
188
189 goto check_target;
190
191 retry:
192 write_barrier();
193 debugTrace(DEBUG_sched, "throwTo: retrying...");
194
195 check_target:
196 ASSERT(target != END_TSO_QUEUE);
197
198 // follow ThreadRelocated links in the target first
199 target = deRefTSO(target);
200
201 // Thread already dead?
202 if (target->what_next == ThreadComplete
203 || target->what_next == ThreadKilled) {
204 return THROWTO_SUCCESS;
205 }
206
207 debugTraceCap(DEBUG_sched, cap,
208 "throwTo: from thread %lu to thread %lu",
209 (unsigned long)msg->source->id,
210 (unsigned long)msg->target->id);
211
212 #ifdef DEBUG
213 traceThreadStatus(DEBUG_sched, target);
214 #endif
215
216 target_cap = target->cap;
217 if (target->cap != cap) {
218 throwToSendMsg(cap, target_cap, msg);
219 return THROWTO_BLOCKED;
220 }
221
222 status = target->why_blocked;
223
224 switch (status) {
225 case NotBlocked:
226 {
227 if ((target->flags & TSO_BLOCKEX) == 0) {
228 // It's on our run queue and not blocking exceptions
229 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
230 return THROWTO_SUCCESS;
231 } else {
232 blockedThrowTo(cap,target,msg);
233 return THROWTO_BLOCKED;
234 }
235 }
236
237 case BlockedOnMsgThrowTo:
238 {
239 const StgInfoTable *i;
240 MessageThrowTo *m;
241
242 m = target->block_info.throwto;
243
244 // target is local to this cap, but has sent a throwto
245 // message to another cap.
246 //
247 // The source message is locked. We need to revoke the
248 // target's message so that we can raise the exception, so
249 // we attempt to lock it.
250
251 // There's a possibility of a deadlock if two threads are both
252 // trying to throwTo each other (or more generally, a cycle of
253 // threads). To break the symmetry we compare the addresses
254 // of the MessageThrowTo objects, and the one for which m <
255 // msg gets to spin, while the other can only try to lock
256 // once, but must then back off and unlock both before trying
257 // again.
258 if (m < msg) {
259 i = lockClosure((StgClosure *)m);
260 } else {
261 i = tryLockClosure((StgClosure *)m);
262 if (i == NULL) {
263 // debugBelch("collision\n");
264 throwToSendMsg(cap, target->cap, msg);
265 return THROWTO_BLOCKED;
266 }
267 }
268
269 if (i == &stg_MSG_NULL_info) {
270 // we know there's a MSG_TRY_WAKEUP on the way, so we
271 // might as well just do it now. The message will
272 // be a no-op when it arrives.
273 unlockClosure((StgClosure*)m, i);
274 tryWakeupThread_(cap, target);
275 goto retry;
276 }
277
278 if (i != &stg_MSG_THROWTO_info) {
279 // if it's a MSG_NULL, this TSO has been woken up by another Cap
280 unlockClosure((StgClosure*)m, i);
281 goto retry;
282 }
283
284 if ((target->flags & TSO_BLOCKEX) &&
285 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
286 unlockClosure((StgClosure*)m, i);
287 blockedThrowTo(cap,target,msg);
288 return THROWTO_BLOCKED;
289 }
290
291 // nobody else can wake up this TSO after we claim the message
292 unlockClosure((StgClosure*)m, &stg_MSG_NULL_info);
293
294 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
295 return THROWTO_SUCCESS;
296 }
297
298 case BlockedOnMVar:
299 {
300 /*
301 To establish ownership of this TSO, we need to acquire a
302 lock on the MVar that it is blocked on.
303 */
304 StgMVar *mvar;
305 StgInfoTable *info USED_IF_THREADS;
306
307 mvar = (StgMVar *)target->block_info.closure;
308
309 // ASSUMPTION: tso->block_info must always point to a
310 // closure. In the threaded RTS it does.
311 switch (get_itbl(mvar)->type) {
312 case MVAR_CLEAN:
313 case MVAR_DIRTY:
314 break;
315 default:
316 goto retry;
317 }
318
319 info = lockClosure((StgClosure *)mvar);
320
321 if (target->what_next == ThreadRelocated) {
322 target = target->_link;
323 unlockClosure((StgClosure *)mvar,info);
324 goto retry;
325 }
326 // we have the MVar, let's check whether the thread
327 // is still blocked on the same MVar.
328 if (target->why_blocked != BlockedOnMVar
329 || (StgMVar *)target->block_info.closure != mvar) {
330 unlockClosure((StgClosure *)mvar, info);
331 goto retry;
332 }
333
334 if (target->_link == END_TSO_QUEUE) {
335 // the MVar operation has already completed. There is a
336 // MSG_TRY_WAKEUP on the way, but we can just wake up the
337 // thread now anyway and ignore the message when it
338 // arrives.
339 unlockClosure((StgClosure *)mvar, info);
340 tryWakeupThread_(cap, target);
341 goto retry;
342 }
343
344 if ((target->flags & TSO_BLOCKEX) &&
345 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
346 blockedThrowTo(cap,target,msg);
347 unlockClosure((StgClosure *)mvar, info);
348 return THROWTO_BLOCKED;
349 } else {
350 // revoke the MVar operation
351 removeFromMVarBlockedQueue(target);
352 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
353 unlockClosure((StgClosure *)mvar, info);
354 return THROWTO_SUCCESS;
355 }
356 }
357
358 case BlockedOnBlackHole:
359 {
360 if (target->flags & TSO_BLOCKEX) {
361 // BlockedOnBlackHole is not interruptible.
362 blockedThrowTo(cap,target,msg);
363 return THROWTO_BLOCKED;
364 } else {
365 // Revoke the message by replacing it with IND. We're not
366 // locking anything here, so we might still get a TRY_WAKEUP
367 // message from the owner of the blackhole some time in the
368 // future, but that doesn't matter.
369 ASSERT(target->block_info.bh->header.info == &stg_MSG_BLACKHOLE_info);
370 OVERWRITE_INFO(target->block_info.bh, &stg_IND_info);
371 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
372 return THROWTO_SUCCESS;
373 }
374 }
375
376 case BlockedOnSTM:
377 lockTSO(target);
378 // Unblocking BlockedOnSTM threads requires the TSO to be
379 // locked; see STM.c:unpark_tso().
380 if (target->why_blocked != BlockedOnSTM) {
381 unlockTSO(target);
382 goto retry;
383 }
384 if ((target->flags & TSO_BLOCKEX) &&
385 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
386 blockedThrowTo(cap,target,msg);
387 unlockTSO(target);
388 return THROWTO_BLOCKED;
389 } else {
390 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
391 unlockTSO(target);
392 return THROWTO_SUCCESS;
393 }
394
395 case BlockedOnCCall:
396 case BlockedOnCCall_NoUnblockExc:
397 blockedThrowTo(cap,target,msg);
398 return THROWTO_BLOCKED;
399
400 #ifndef THREADEDED_RTS
401 case BlockedOnRead:
402 case BlockedOnWrite:
403 case BlockedOnDelay:
404 #if defined(mingw32_HOST_OS)
405 case BlockedOnDoProc:
406 #endif
407 if ((target->flags & TSO_BLOCKEX) &&
408 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
409 blockedThrowTo(cap,target,msg);
410 return THROWTO_BLOCKED;
411 } else {
412 removeFromQueues(cap,target);
413 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
414 return THROWTO_SUCCESS;
415 }
416 #endif
417
418 default:
419 barf("throwTo: unrecognised why_blocked value");
420 }
421 barf("throwTo");
422 }
423
424 static void
425 throwToSendMsg (Capability *cap STG_UNUSED,
426 Capability *target_cap USED_IF_THREADS,
427 MessageThrowTo *msg USED_IF_THREADS)
428
429 {
430 #ifdef THREADED_RTS
431 debugTraceCap(DEBUG_sched, cap, "throwTo: sending a throwto message to cap %lu", (unsigned long)target_cap->no);
432
433 sendMessage(cap, target_cap, (Message*)msg);
434 #endif
435 }
436
437 // Block a throwTo message on the target TSO's blocked_exceptions
438 // queue. The current Capability must own the target TSO in order to
439 // modify the blocked_exceptions queue.
440 static void
441 blockedThrowTo (Capability *cap, StgTSO *target, MessageThrowTo *msg)
442 {
443 debugTraceCap(DEBUG_sched, cap, "throwTo: blocking on thread %lu",
444 (unsigned long)target->id);
445
446 ASSERT(target->cap == cap);
447
448 msg->link = target->blocked_exceptions;
449 target->blocked_exceptions = msg;
450 dirty_TSO(cap,target); // we modified the blocked_exceptions queue
451 }
452
453 /* -----------------------------------------------------------------------------
454 Waking up threads blocked in throwTo
455
456 There are two ways to do this: maybePerformBlockedException() will
457 perform the throwTo() for the thread at the head of the queue
458 immediately, and leave the other threads on the queue.
459 maybePerformBlockedException() also checks the TSO_BLOCKEX flag
460 before raising an exception.
461
462 awakenBlockedExceptionQueue() will wake up all the threads in the
463 queue, but not perform any throwTo() immediately. This might be
464 more appropriate when the target thread is the one actually running
465 (see Exception.cmm).
466
467 Returns: non-zero if an exception was raised, zero otherwise.
468 -------------------------------------------------------------------------- */
469
470 int
471 maybePerformBlockedException (Capability *cap, StgTSO *tso)
472 {
473 MessageThrowTo *msg;
474 const StgInfoTable *i;
475
476 if (tso->what_next == ThreadComplete || tso->what_next == ThreadFinished) {
477 if (tso->blocked_exceptions != END_BLOCKED_EXCEPTIONS_QUEUE) {
478 awakenBlockedExceptionQueue(cap,tso);
479 return 1;
480 } else {
481 return 0;
482 }
483 }
484
485 if (tso->blocked_exceptions != END_BLOCKED_EXCEPTIONS_QUEUE &&
486 (tso->flags & TSO_BLOCKEX) != 0) {
487 debugTraceCap(DEBUG_sched, cap, "throwTo: thread %lu has blocked exceptions but is inside block", (unsigned long)tso->id);
488 }
489
490 if (tso->blocked_exceptions != END_BLOCKED_EXCEPTIONS_QUEUE
491 && ((tso->flags & TSO_BLOCKEX) == 0
492 || ((tso->flags & TSO_INTERRUPTIBLE) && interruptible(tso)))) {
493
494 // We unblock just the first thread on the queue, and perform
495 // its throw immediately.
496 loop:
497 msg = tso->blocked_exceptions;
498 if (msg == END_BLOCKED_EXCEPTIONS_QUEUE) return 0;
499 i = lockClosure((StgClosure*)msg);
500 tso->blocked_exceptions = (MessageThrowTo*)msg->link;
501 if (i == &stg_MSG_NULL_info) {
502 unlockClosure((StgClosure*)msg,i);
503 goto loop;
504 }
505
506 throwToSingleThreaded(cap, msg->target, msg->exception);
507 unlockClosure((StgClosure*)msg,&stg_MSG_NULL_info);
508 tryWakeupThread(cap, msg->source);
509 return 1;
510 }
511 return 0;
512 }
513
514 // awakenBlockedExceptionQueue(): Just wake up the whole queue of
515 // blocked exceptions.
516
517 void
518 awakenBlockedExceptionQueue (Capability *cap, StgTSO *tso)
519 {
520 MessageThrowTo *msg;
521 const StgInfoTable *i;
522
523 for (msg = tso->blocked_exceptions; msg != END_BLOCKED_EXCEPTIONS_QUEUE;
524 msg = (MessageThrowTo*)msg->link) {
525 i = lockClosure((StgClosure *)msg);
526 if (i != &stg_MSG_NULL_info) {
527 unlockClosure((StgClosure *)msg,&stg_MSG_NULL_info);
528 tryWakeupThread(cap, msg->source);
529 } else {
530 unlockClosure((StgClosure *)msg,i);
531 }
532 }
533 tso->blocked_exceptions = END_BLOCKED_EXCEPTIONS_QUEUE;
534 }
535
536 /* -----------------------------------------------------------------------------
537 Remove a thread from blocking queues.
538
539 This is for use when we raise an exception in another thread, which
540 may be blocked.
541
542 Precondition: we have exclusive access to the TSO, via the same set
543 of conditions as throwToSingleThreaded() (c.f.).
544 -------------------------------------------------------------------------- */
545
546 static void
547 removeFromMVarBlockedQueue (StgTSO *tso)
548 {
549 StgMVar *mvar = (StgMVar*)tso->block_info.closure;
550 StgMVarTSOQueue *q = (StgMVarTSOQueue*)tso->_link;
551
552 if (q == (StgMVarTSOQueue*)END_TSO_QUEUE) {
553 // already removed from this MVar
554 return;
555 }
556
557 // Assume the MVar is locked. (not assertable; sometimes it isn't
558 // actually WHITEHOLE'd).
559
560 // We want to remove the MVAR_TSO_QUEUE object from the queue. It
561 // isn't doubly-linked so we can't actually remove it; instead we
562 // just overwrite it with an IND if possible and let the GC short
563 // it out. However, we have to be careful to maintain the deque
564 // structure:
565
566 if (mvar->head == q) {
567 mvar->head = q->link;
568 q->header.info = &stg_IND_info;
569 if (mvar->tail == q) {
570 mvar->tail = (StgMVarTSOQueue*)END_TSO_QUEUE;
571 }
572 }
573 else if (mvar->tail == q) {
574 // we can't replace it with an IND in this case, because then
575 // we lose the tail pointer when the GC shorts out the IND.
576 // So we use MSG_NULL as a kind of non-dupable indirection;
577 // these are ignored by takeMVar/putMVar.
578 q->header.info = &stg_MSG_NULL_info;
579 }
580 else {
581 q->header.info = &stg_IND_info;
582 }
583
584 // revoke the MVar operation
585 tso->_link = END_TSO_QUEUE;
586 }
587
588 static void
589 removeFromQueues(Capability *cap, StgTSO *tso)
590 {
591 switch (tso->why_blocked) {
592
593 case NotBlocked:
594 case ThreadMigrating:
595 return;
596
597 case BlockedOnSTM:
598 // Be careful: nothing to do here! We tell the scheduler that the
599 // thread is runnable and we leave it to the stack-walking code to
600 // abort the transaction while unwinding the stack. We should
601 // perhaps have a debugging test to make sure that this really
602 // happens and that the 'zombie' transaction does not get
603 // committed.
604 goto done;
605
606 case BlockedOnMVar:
607 removeFromMVarBlockedQueue(tso);
608 goto done;
609
610 case BlockedOnBlackHole:
611 // nothing to do
612 goto done;
613
614 case BlockedOnMsgThrowTo:
615 {
616 MessageThrowTo *m = tso->block_info.throwto;
617 // The message is locked by us, unless we got here via
618 // deleteAllThreads(), in which case we own all the
619 // capabilities.
620 // ASSERT(m->header.info == &stg_WHITEHOLE_info);
621
622 // unlock and revoke it at the same time
623 unlockClosure((StgClosure*)m,&stg_MSG_NULL_info);
624 break;
625 }
626
627 #if !defined(THREADED_RTS)
628 case BlockedOnRead:
629 case BlockedOnWrite:
630 #if defined(mingw32_HOST_OS)
631 case BlockedOnDoProc:
632 #endif
633 removeThreadFromDeQueue(cap, &blocked_queue_hd, &blocked_queue_tl, tso);
634 #if defined(mingw32_HOST_OS)
635 /* (Cooperatively) signal that the worker thread should abort
636 * the request.
637 */
638 abandonWorkRequest(tso->block_info.async_result->reqID);
639 #endif
640 goto done;
641
642 case BlockedOnDelay:
643 removeThreadFromQueue(cap, &sleeping_queue, tso);
644 goto done;
645 #endif
646
647 default:
648 barf("removeFromQueues: %d", tso->why_blocked);
649 }
650
651 done:
652 tso->why_blocked = NotBlocked;
653 appendToRunQueue(cap, tso);
654 }
655
656 /* -----------------------------------------------------------------------------
657 * raiseAsync()
658 *
659 * The following function implements the magic for raising an
660 * asynchronous exception in an existing thread.
661 *
662 * We first remove the thread from any queue on which it might be
663 * blocked. The possible blockages are MVARs, BLOCKING_QUEUESs, and
664 * TSO blocked_exception queues.
665 *
666 * We strip the stack down to the innermost CATCH_FRAME, building
667 * thunks in the heap for all the active computations, so they can
668 * be restarted if necessary. When we reach a CATCH_FRAME, we build
669 * an application of the handler to the exception, and push it on
670 * the top of the stack.
671 *
672 * How exactly do we save all the active computations? We create an
673 * AP_STACK for every UpdateFrame on the stack. Entering one of these
674 * AP_STACKs pushes everything from the corresponding update frame
675 * upwards onto the stack. (Actually, it pushes everything up to the
676 * next update frame plus a pointer to the next AP_STACK object.
677 * Entering the next AP_STACK object pushes more onto the stack until we
678 * reach the last AP_STACK object - at which point the stack should look
679 * exactly as it did when we killed the TSO and we can continue
680 * execution by entering the closure on top of the stack.
681 *
682 * We can also kill a thread entirely - this happens if either (a) the
683 * exception passed to raiseAsync is NULL, or (b) there's no
684 * CATCH_FRAME on the stack. In either case, we strip the entire
685 * stack and replace the thread with a zombie.
686 *
687 * ToDo: in THREADED_RTS mode, this function is only safe if either
688 * (a) we hold all the Capabilities (eg. in GC, or if there is only
689 * one Capability), or (b) we own the Capability that the TSO is
690 * currently blocked on or on the run queue of.
691 *
692 * -------------------------------------------------------------------------- */
693
694 static void
695 raiseAsync(Capability *cap, StgTSO *tso, StgClosure *exception,
696 rtsBool stop_at_atomically, StgUpdateFrame *stop_here)
697 {
698 StgRetInfoTable *info;
699 StgPtr sp, frame;
700 StgClosure *updatee;
701 nat i;
702
703 debugTraceCap(DEBUG_sched, cap,
704 "raising exception in thread %ld.", (long)tso->id);
705
706 #if defined(PROFILING)
707 /*
708 * Debugging tool: on raising an exception, show where we are.
709 * See also Exception.cmm:stg_raisezh.
710 * This wasn't done for asynchronous exceptions originally; see #1450
711 */
712 if (RtsFlags.ProfFlags.showCCSOnException)
713 {
714 fprintCCS_stderr(tso->prof.CCCS);
715 }
716 #endif
717 // ASSUMES: the thread is not already complete or dead, or
718 // ThreadRelocated. Upper layers should deal with that.
719 ASSERT(tso->what_next != ThreadComplete &&
720 tso->what_next != ThreadKilled &&
721 tso->what_next != ThreadRelocated);
722
723 // only if we own this TSO (except that deleteThread() calls this
724 ASSERT(tso->cap == cap);
725
726 // wake it up
727 if (tso->why_blocked != NotBlocked) {
728 tso->why_blocked = NotBlocked;
729 appendToRunQueue(cap,tso);
730 }
731
732 // mark it dirty; we're about to change its stack.
733 dirty_TSO(cap, tso);
734
735 sp = tso->sp;
736
737 if (stop_here != NULL) {
738 updatee = stop_here->updatee;
739 } else {
740 updatee = NULL;
741 }
742
743 // The stack freezing code assumes there's a closure pointer on
744 // the top of the stack, so we have to arrange that this is the case...
745 //
746 if (sp[0] == (W_)&stg_enter_info) {
747 sp++;
748 } else {
749 sp--;
750 sp[0] = (W_)&stg_dummy_ret_closure;
751 }
752
753 frame = sp + 1;
754 while (stop_here == NULL || frame < (StgPtr)stop_here) {
755
756 // 1. Let the top of the stack be the "current closure"
757 //
758 // 2. Walk up the stack until we find either an UPDATE_FRAME or a
759 // CATCH_FRAME.
760 //
761 // 3. If it's an UPDATE_FRAME, then make an AP_STACK containing the
762 // current closure applied to the chunk of stack up to (but not
763 // including) the update frame. This closure becomes the "current
764 // closure". Go back to step 2.
765 //
766 // 4. If it's a CATCH_FRAME, then leave the exception handler on
767 // top of the stack applied to the exception.
768 //
769 // 5. If it's a STOP_FRAME, then kill the thread.
770 //
771 // NB: if we pass an ATOMICALLY_FRAME then abort the associated
772 // transaction
773
774 info = get_ret_itbl((StgClosure *)frame);
775
776 switch (info->i.type) {
777
778 case UPDATE_FRAME:
779 {
780 StgAP_STACK * ap;
781 nat words;
782
783 // First build an AP_STACK consisting of the stack chunk above the
784 // current update frame, with the top word on the stack as the
785 // fun field.
786 //
787 words = frame - sp - 1;
788 ap = (StgAP_STACK *)allocate(cap,AP_STACK_sizeW(words));
789
790 ap->size = words;
791 ap->fun = (StgClosure *)sp[0];
792 sp++;
793 for(i=0; i < (nat)words; ++i) {
794 ap->payload[i] = (StgClosure *)*sp++;
795 }
796
797 SET_HDR(ap,&stg_AP_STACK_info,
798 ((StgClosure *)frame)->header.prof.ccs /* ToDo */);
799 TICK_ALLOC_UP_THK(words+1,0);
800
801 //IF_DEBUG(scheduler,
802 // debugBelch("sched: Updating ");
803 // printPtr((P_)((StgUpdateFrame *)frame)->updatee);
804 // debugBelch(" with ");
805 // printObj((StgClosure *)ap);
806 // );
807
808 if (((StgUpdateFrame *)frame)->updatee == updatee) {
809 // If this update frame points to the same closure as
810 // the update frame further down the stack
811 // (stop_here), then don't perform the update. We
812 // want to keep the blackhole in this case, so we can
813 // detect and report the loop (#2783).
814 ap = (StgAP_STACK*)updatee;
815 } else {
816 // Perform the update
817 // TODO: this may waste some work, if the thunk has
818 // already been updated by another thread.
819 updateThunk(cap, tso,
820 ((StgUpdateFrame *)frame)->updatee, (StgClosure *)ap);
821 }
822
823 sp += sizeofW(StgUpdateFrame) - 1;
824 sp[0] = (W_)ap; // push onto stack
825 frame = sp + 1;
826 continue; //no need to bump frame
827 }
828
829 case STOP_FRAME:
830 {
831 // We've stripped the entire stack, the thread is now dead.
832 tso->what_next = ThreadKilled;
833 tso->sp = frame + sizeofW(StgStopFrame);
834 return;
835 }
836
837 case CATCH_FRAME:
838 // If we find a CATCH_FRAME, and we've got an exception to raise,
839 // then build the THUNK raise(exception), and leave it on
840 // top of the CATCH_FRAME ready to enter.
841 //
842 {
843 StgCatchFrame *cf = (StgCatchFrame *)frame;
844 StgThunk *raise;
845
846 if (exception == NULL) break;
847
848 // we've got an exception to raise, so let's pass it to the
849 // handler in this frame.
850 //
851 raise = (StgThunk *)allocate(cap,sizeofW(StgThunk)+1);
852 TICK_ALLOC_SE_THK(1,0);
853 SET_HDR(raise,&stg_raise_info,cf->header.prof.ccs);
854 raise->payload[0] = exception;
855
856 // throw away the stack from Sp up to the CATCH_FRAME.
857 //
858 sp = frame - 1;
859
860 /* Ensure that async excpetions are blocked now, so we don't get
861 * a surprise exception before we get around to executing the
862 * handler.
863 */
864 tso->flags |= TSO_BLOCKEX;
865 if ((cf->exceptions_blocked & TSO_INTERRUPTIBLE) == 0) {
866 tso->flags &= ~TSO_INTERRUPTIBLE;
867 } else {
868 tso->flags |= TSO_INTERRUPTIBLE;
869 }
870
871 /* Put the newly-built THUNK on top of the stack, ready to execute
872 * when the thread restarts.
873 */
874 sp[0] = (W_)raise;
875 sp[-1] = (W_)&stg_enter_info;
876 tso->sp = sp-1;
877 tso->what_next = ThreadRunGHC;
878 IF_DEBUG(sanity, checkTSO(tso));
879 return;
880 }
881
882 case ATOMICALLY_FRAME:
883 if (stop_at_atomically) {
884 ASSERT(tso->trec->enclosing_trec == NO_TREC);
885 stmCondemnTransaction(cap, tso -> trec);
886 tso->sp = frame - 2;
887 // The ATOMICALLY_FRAME expects to be returned a
888 // result from the transaction, which it stores in the
889 // stack frame. Hence we arrange to return a dummy
890 // result, so that the GC doesn't get upset (#3578).
891 // Perhaps a better way would be to have a different
892 // ATOMICALLY_FRAME instance for condemned
893 // transactions, but I don't fully understand the
894 // interaction with STM invariants.
895 tso->sp[1] = (W_)&stg_NO_TREC_closure;
896 tso->sp[0] = (W_)&stg_gc_unpt_r1_info;
897 tso->what_next = ThreadRunGHC;
898 return;
899 }
900 // Not stop_at_atomically... fall through and abort the
901 // transaction.
902
903 case CATCH_STM_FRAME:
904 case CATCH_RETRY_FRAME:
905 // IF we find an ATOMICALLY_FRAME then we abort the
906 // current transaction and propagate the exception. In
907 // this case (unlike ordinary exceptions) we do not care
908 // whether the transaction is valid or not because its
909 // possible validity cannot have caused the exception
910 // and will not be visible after the abort.
911
912 {
913 StgTRecHeader *trec = tso -> trec;
914 StgTRecHeader *outer = trec -> enclosing_trec;
915 debugTraceCap(DEBUG_stm, cap,
916 "found atomically block delivering async exception");
917 stmAbortTransaction(cap, trec);
918 stmFreeAbortedTRec(cap, trec);
919 tso -> trec = outer;
920 break;
921 };
922
923 default:
924 break;
925 }
926
927 // move on to the next stack frame
928 frame += stack_frame_sizeW((StgClosure *)frame);
929 }
930
931 // if we got here, then we stopped at stop_here
932 ASSERT(stop_here != NULL);
933 }
934
935