d5a4918f34f6dcb56d112a7a2916893f177fd535
[ghc.git] / rts / RaiseAsync.c
1 /* ---------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 1998-2006
4 *
5 * Asynchronous exceptions
6 *
7 * --------------------------------------------------------------------------*/
8
9 #include "PosixSource.h"
10 #include "Rts.h"
11
12 #include "sm/Storage.h"
13 #include "Threads.h"
14 #include "Trace.h"
15 #include "RaiseAsync.h"
16 #include "Schedule.h"
17 #include "Updates.h"
18 #include "STM.h"
19 #include "sm/Sanity.h"
20 #include "Profiling.h"
21 #include "Messages.h"
22 #if defined(mingw32_HOST_OS)
23 #include "win32/IOManager.h"
24 #endif
25
26 static void raiseAsync (Capability *cap,
27 StgTSO *tso,
28 StgClosure *exception,
29 rtsBool stop_at_atomically,
30 StgUpdateFrame *stop_here);
31
32 static void removeFromQueues(Capability *cap, StgTSO *tso);
33
34 static void blockedThrowTo (Capability *cap,
35 StgTSO *target, MessageThrowTo *msg);
36
37 static void throwToSendMsg (Capability *cap USED_IF_THREADS,
38 Capability *target_cap USED_IF_THREADS,
39 MessageThrowTo *msg USED_IF_THREADS);
40
41 static void performBlockedException (Capability *cap, MessageThrowTo *msg);
42
43 /* -----------------------------------------------------------------------------
44 throwToSingleThreaded
45
46 This version of throwTo is safe to use if and only if one of the
47 following holds:
48
49 - !THREADED_RTS
50
51 - all the other threads in the system are stopped (eg. during GC).
52
53 - we surely own the target TSO (eg. we just took it from the
54 run queue of the current capability, or we are running it).
55
56 It doesn't cater for blocking the source thread until the exception
57 has been raised.
58 -------------------------------------------------------------------------- */
59
60 void
61 throwToSingleThreaded(Capability *cap, StgTSO *tso, StgClosure *exception)
62 {
63 throwToSingleThreaded_(cap, tso, exception, rtsFalse);
64 }
65
66 void
67 throwToSingleThreaded_(Capability *cap, StgTSO *tso, StgClosure *exception,
68 rtsBool stop_at_atomically)
69 {
70 tso = deRefTSO(tso);
71
72 // Thread already dead?
73 if (tso->what_next == ThreadComplete || tso->what_next == ThreadKilled) {
74 return;
75 }
76
77 // Remove it from any blocking queues
78 removeFromQueues(cap,tso);
79
80 raiseAsync(cap, tso, exception, stop_at_atomically, NULL);
81 }
82
83 void
84 suspendComputation(Capability *cap, StgTSO *tso, StgUpdateFrame *stop_here)
85 {
86 tso = deRefTSO(tso);
87
88 // Thread already dead?
89 if (tso->what_next == ThreadComplete || tso->what_next == ThreadKilled) {
90 return;
91 }
92
93 // Remove it from any blocking queues
94 removeFromQueues(cap,tso);
95
96 raiseAsync(cap, tso, NULL, rtsFalse, stop_here);
97 }
98
99 /* -----------------------------------------------------------------------------
100 throwTo
101
102 This function may be used to throw an exception from one thread to
103 another, during the course of normal execution. This is a tricky
104 task: the target thread might be running on another CPU, or it
105 may be blocked and could be woken up at any point by another CPU.
106 We have some delicate synchronisation to do.
107
108 The underlying scheme when multiple Capabilities are in use is
109 message passing: when the target of a throwTo is on another
110 Capability, we send a message (a MessageThrowTo closure) to that
111 Capability.
112
113 If the throwTo needs to block because the target TSO is masking
114 exceptions (the TSO_BLOCKEX flag), then the message is placed on
115 the blocked_exceptions queue attached to the target TSO. When the
116 target TSO enters the unmasked state again, it must check the
117 queue. The blocked_exceptions queue is not locked; only the
118 Capability owning the TSO may modify it.
119
120 To make things simpler for throwTo, we always create the message
121 first before deciding what to do. The message may get sent, or it
122 may get attached to a TSO's blocked_exceptions queue, or the
123 exception may get thrown immediately and the message dropped,
124 depending on the current state of the target.
125
126 Currently we send a message if the target belongs to another
127 Capability, and it is
128
129 - NotBlocked, BlockedOnMsgWakeup, BlockedOnMsgThrowTo,
130 BlockedOnCCall
131
132 - or it is masking exceptions (TSO_BLOCKEX)
133
134 Currently, if the target is BlockedOnMVar, BlockedOnSTM, or
135 BlockedOnBlackHole then we acquire ownership of the TSO by locking
136 its parent container (e.g. the MVar) and then raise the exception.
137 We might change these cases to be more message-passing-like in the
138 future.
139
140 Returns:
141
142 NULL exception was raised, ok to continue
143
144 MessageThrowTo * exception was not raised; the source TSO
145 should now put itself in the state
146 BlockedOnMsgThrowTo, and when it is ready
147 it should unlock the mssage using
148 unlockClosure(msg, &stg_MSG_THROWTO_info);
149 If it decides not to raise the exception after
150 all, it can revoke it safely with
151 unlockClosure(msg, &stg_IND_info);
152
153 -------------------------------------------------------------------------- */
154
155 MessageThrowTo *
156 throwTo (Capability *cap, // the Capability we hold
157 StgTSO *source, // the TSO sending the exception (or NULL)
158 StgTSO *target, // the TSO receiving the exception
159 StgClosure *exception) // the exception closure
160 {
161 MessageThrowTo *msg;
162
163 msg = (MessageThrowTo *) allocate(cap, sizeofW(MessageThrowTo));
164 // message starts locked; the caller has to unlock it when it is
165 // ready.
166 SET_HDR(msg, &stg_WHITEHOLE_info, CCS_SYSTEM);
167 msg->source = source;
168 msg->target = target;
169 msg->exception = exception;
170
171 switch (throwToMsg(cap, msg))
172 {
173 case THROWTO_SUCCESS:
174 return NULL;
175 case THROWTO_BLOCKED:
176 default:
177 return msg;
178 }
179 }
180
181
182 nat
183 throwToMsg (Capability *cap, MessageThrowTo *msg)
184 {
185 StgWord status;
186 StgTSO *target = msg->target;
187 Capability *target_cap;
188
189 goto check_target;
190
191 retry:
192 write_barrier();
193 debugTrace(DEBUG_sched, "throwTo: retrying...");
194
195 check_target:
196 ASSERT(target != END_TSO_QUEUE);
197
198 // follow ThreadRelocated links in the target first
199 target = deRefTSO(target);
200
201 // Thread already dead?
202 if (target->what_next == ThreadComplete
203 || target->what_next == ThreadKilled) {
204 return THROWTO_SUCCESS;
205 }
206
207 debugTraceCap(DEBUG_sched, cap,
208 "throwTo: from thread %lu to thread %lu",
209 (unsigned long)msg->source->id,
210 (unsigned long)msg->target->id);
211
212 #ifdef DEBUG
213 traceThreadStatus(DEBUG_sched, target);
214 #endif
215
216 target_cap = target->cap;
217 if (target->cap != cap) {
218 throwToSendMsg(cap, target_cap, msg);
219 return THROWTO_BLOCKED;
220 }
221
222 status = target->why_blocked;
223
224 switch (status) {
225 case NotBlocked:
226 case BlockedOnMsgWakeup:
227 /* if status==NotBlocked, and target->cap == cap, then
228 we own this TSO and can raise the exception.
229
230 How do we establish this condition? Very carefully.
231
232 Let
233 P = (status == NotBlocked)
234 Q = (tso->cap == cap)
235
236 Now, if P & Q are true, then the TSO is locked and owned by
237 this capability. No other OS thread can steal it.
238
239 If P==0 and Q==1: the TSO is blocked, but attached to this
240 capabilty, and it can be stolen by another capability.
241
242 If P==1 and Q==0: the TSO is runnable on another
243 capability. At any time, the TSO may change from runnable
244 to blocked and vice versa, while it remains owned by
245 another capability.
246
247 Suppose we test like this:
248
249 p = P
250 q = Q
251 if (p && q) ...
252
253 this is defeated by another capability stealing a blocked
254 TSO from us to wake it up (Schedule.c:unblockOne()). The
255 other thread is doing
256
257 Q = 0
258 P = 1
259
260 assuming arbitrary reordering, we could see this
261 interleaving:
262
263 start: P==0 && Q==1
264 P = 1
265 p = P
266 q = Q
267 Q = 0
268 if (p && q) ...
269
270 so we need a memory barrier:
271
272 p = P
273 mb()
274 q = Q
275 if (p && q) ...
276
277 this avoids the problematic case. There are other cases
278 to consider, but this is the tricky one.
279
280 Note that we must be sure that unblockOne() does the
281 writes in the correct order: Q before P. The memory
282 barrier ensures that if we have seen the write to P, we
283 have also seen the write to Q.
284 */
285 {
286 write_barrier();
287 if ((target->flags & TSO_BLOCKEX) == 0) {
288 // It's on our run queue and not blocking exceptions
289 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
290 return THROWTO_SUCCESS;
291 } else {
292 blockedThrowTo(cap,target,msg);
293 return THROWTO_BLOCKED;
294 }
295 }
296
297 case BlockedOnMsgThrowTo:
298 {
299 const StgInfoTable *i;
300 MessageThrowTo *m;
301
302 m = target->block_info.throwto;
303
304 // target is local to this cap, but has sent a throwto
305 // message to another cap.
306 //
307 // The source message is locked. We need to revoke the
308 // target's message so that we can raise the exception, so
309 // we attempt to lock it.
310
311 // There's a possibility of a deadlock if two threads are both
312 // trying to throwTo each other (or more generally, a cycle of
313 // threads). To break the symmetry we compare the addresses
314 // of the MessageThrowTo objects, and the one for which m <
315 // msg gets to spin, while the other can only try to lock
316 // once, but must then back off and unlock both before trying
317 // again.
318 if (m < msg) {
319 i = lockClosure((StgClosure *)m);
320 } else {
321 i = tryLockClosure((StgClosure *)m);
322 if (i == NULL) {
323 // debugBelch("collision\n");
324 throwToSendMsg(cap, target->cap, msg);
325 return THROWTO_BLOCKED;
326 }
327 }
328
329 if (i != &stg_MSG_THROWTO_info) {
330 // if it's an IND, this TSO has been woken up by another Cap
331 unlockClosure((StgClosure*)m, i);
332 goto retry;
333 }
334
335 if ((target->flags & TSO_BLOCKEX) &&
336 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
337 unlockClosure((StgClosure*)m, i);
338 blockedThrowTo(cap,target,msg);
339 return THROWTO_BLOCKED;
340 }
341
342 // nobody else can wake up this TSO after we claim the message
343 unlockClosure((StgClosure*)m, &stg_IND_info);
344
345 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
346 return THROWTO_SUCCESS;
347 }
348
349 case BlockedOnMVar:
350 {
351 /*
352 To establish ownership of this TSO, we need to acquire a
353 lock on the MVar that it is blocked on.
354 */
355 StgMVar *mvar;
356 StgInfoTable *info USED_IF_THREADS;
357
358 mvar = (StgMVar *)target->block_info.closure;
359
360 // ASSUMPTION: tso->block_info must always point to a
361 // closure. In the threaded RTS it does.
362 switch (get_itbl(mvar)->type) {
363 case MVAR_CLEAN:
364 case MVAR_DIRTY:
365 break;
366 default:
367 goto retry;
368 }
369
370 info = lockClosure((StgClosure *)mvar);
371
372 if (target->what_next == ThreadRelocated) {
373 target = target->_link;
374 unlockClosure((StgClosure *)mvar,info);
375 goto retry;
376 }
377 // we have the MVar, let's check whether the thread
378 // is still blocked on the same MVar.
379 if (target->why_blocked != BlockedOnMVar
380 || (StgMVar *)target->block_info.closure != mvar) {
381 unlockClosure((StgClosure *)mvar, info);
382 goto retry;
383 }
384
385 if ((target->flags & TSO_BLOCKEX) &&
386 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
387 blockedThrowTo(cap,target,msg);
388 unlockClosure((StgClosure *)mvar, info);
389 return THROWTO_BLOCKED;
390 } else {
391 removeThreadFromMVarQueue(cap, mvar, target);
392 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
393 if (info == &stg_MVAR_CLEAN_info) {
394 dirty_MVAR(&cap->r,(StgClosure*)mvar);
395 }
396 unlockClosure((StgClosure *)mvar, &stg_MVAR_DIRTY_info);
397 return THROWTO_SUCCESS;
398 }
399 }
400
401 case BlockedOnBlackHole:
402 {
403 // Revoke the message by replacing it with IND. We're not
404 // locking anything here, so we might still get a TRY_WAKEUP
405 // message from the owner of the blackhole some time in the
406 // future, but that doesn't matter.
407 ASSERT(target->block_info.bh->header.info == &stg_MSG_BLACKHOLE_info);
408 OVERWRITE_INFO(target->block_info.bh, &stg_IND_info);
409 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
410 return THROWTO_SUCCESS;
411 }
412
413 case BlockedOnSTM:
414 lockTSO(target);
415 // Unblocking BlockedOnSTM threads requires the TSO to be
416 // locked; see STM.c:unpark_tso().
417 if (target->why_blocked != BlockedOnSTM) {
418 unlockTSO(target);
419 goto retry;
420 }
421 if ((target->flags & TSO_BLOCKEX) &&
422 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
423 blockedThrowTo(cap,target,msg);
424 unlockTSO(target);
425 return THROWTO_BLOCKED;
426 } else {
427 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
428 unlockTSO(target);
429 return THROWTO_SUCCESS;
430 }
431
432 case BlockedOnCCall:
433 case BlockedOnCCall_NoUnblockExc:
434 blockedThrowTo(cap,target,msg);
435 return THROWTO_BLOCKED;
436
437 #ifndef THREADEDED_RTS
438 case BlockedOnRead:
439 case BlockedOnWrite:
440 case BlockedOnDelay:
441 #if defined(mingw32_HOST_OS)
442 case BlockedOnDoProc:
443 #endif
444 if ((target->flags & TSO_BLOCKEX) &&
445 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
446 blockedThrowTo(cap,target,msg);
447 return THROWTO_BLOCKED;
448 } else {
449 removeFromQueues(cap,target);
450 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
451 return THROWTO_SUCCESS;
452 }
453 #endif
454
455 default:
456 barf("throwTo: unrecognised why_blocked value");
457 }
458 barf("throwTo");
459 }
460
461 static void
462 throwToSendMsg (Capability *cap STG_UNUSED,
463 Capability *target_cap USED_IF_THREADS,
464 MessageThrowTo *msg USED_IF_THREADS)
465
466 {
467 #ifdef THREADED_RTS
468 debugTraceCap(DEBUG_sched, cap, "throwTo: sending a throwto message to cap %lu", (unsigned long)target_cap->no);
469
470 sendMessage(cap, target_cap, (Message*)msg);
471 #endif
472 }
473
474 // Block a throwTo message on the target TSO's blocked_exceptions
475 // queue. The current Capability must own the target TSO in order to
476 // modify the blocked_exceptions queue.
477 static void
478 blockedThrowTo (Capability *cap, StgTSO *target, MessageThrowTo *msg)
479 {
480 debugTraceCap(DEBUG_sched, cap, "throwTo: blocking on thread %lu",
481 (unsigned long)target->id);
482
483 ASSERT(target->cap == cap);
484
485 msg->link = target->blocked_exceptions;
486 target->blocked_exceptions = msg;
487 dirty_TSO(cap,target); // we modified the blocked_exceptions queue
488 }
489
490 /* -----------------------------------------------------------------------------
491 Waking up threads blocked in throwTo
492
493 There are two ways to do this: maybePerformBlockedException() will
494 perform the throwTo() for the thread at the head of the queue
495 immediately, and leave the other threads on the queue.
496 maybePerformBlockedException() also checks the TSO_BLOCKEX flag
497 before raising an exception.
498
499 awakenBlockedExceptionQueue() will wake up all the threads in the
500 queue, but not perform any throwTo() immediately. This might be
501 more appropriate when the target thread is the one actually running
502 (see Exception.cmm).
503
504 Returns: non-zero if an exception was raised, zero otherwise.
505 -------------------------------------------------------------------------- */
506
507 int
508 maybePerformBlockedException (Capability *cap, StgTSO *tso)
509 {
510 MessageThrowTo *msg;
511 const StgInfoTable *i;
512
513 if (tso->what_next == ThreadComplete || tso->what_next == ThreadFinished) {
514 if (tso->blocked_exceptions != END_BLOCKED_EXCEPTIONS_QUEUE) {
515 awakenBlockedExceptionQueue(cap,tso);
516 return 1;
517 } else {
518 return 0;
519 }
520 }
521
522 if (tso->blocked_exceptions != END_BLOCKED_EXCEPTIONS_QUEUE &&
523 (tso->flags & TSO_BLOCKEX) != 0) {
524 debugTraceCap(DEBUG_sched, cap, "throwTo: thread %lu has blocked exceptions but is inside block", (unsigned long)tso->id);
525 }
526
527 if (tso->blocked_exceptions != END_BLOCKED_EXCEPTIONS_QUEUE
528 && ((tso->flags & TSO_BLOCKEX) == 0
529 || ((tso->flags & TSO_INTERRUPTIBLE) && interruptible(tso)))) {
530
531 // We unblock just the first thread on the queue, and perform
532 // its throw immediately.
533 loop:
534 msg = tso->blocked_exceptions;
535 if (msg == END_BLOCKED_EXCEPTIONS_QUEUE) return 0;
536 i = lockClosure((StgClosure*)msg);
537 tso->blocked_exceptions = (MessageThrowTo*)msg->link;
538 if (i == &stg_IND_info) {
539 unlockClosure((StgClosure*)msg,i);
540 goto loop;
541 }
542
543 performBlockedException(cap, msg);
544 unblockOne_(cap, msg->source, rtsFalse/*no migrate*/);
545 unlockClosure((StgClosure*)msg,&stg_IND_info);
546 return 1;
547 }
548 return 0;
549 }
550
551 // awakenBlockedExceptionQueue(): Just wake up the whole queue of
552 // blocked exceptions and let them try again.
553
554 void
555 awakenBlockedExceptionQueue (Capability *cap, StgTSO *tso)
556 {
557 MessageThrowTo *msg;
558 const StgInfoTable *i;
559
560 for (msg = tso->blocked_exceptions; msg != END_BLOCKED_EXCEPTIONS_QUEUE;
561 msg = (MessageThrowTo*)msg->link) {
562 i = lockClosure((StgClosure *)msg);
563 if (i != &stg_IND_info) {
564 unblockOne_(cap, msg->source, rtsFalse/*no migrate*/);
565 }
566 unlockClosure((StgClosure *)msg,i);
567 }
568 tso->blocked_exceptions = END_BLOCKED_EXCEPTIONS_QUEUE;
569 }
570
571 static void
572 performBlockedException (Capability *cap, MessageThrowTo *msg)
573 {
574 StgTSO *source;
575
576 source = msg->source;
577
578 ASSERT(source->why_blocked == BlockedOnMsgThrowTo);
579 ASSERT(source->block_info.closure == (StgClosure *)msg);
580 ASSERT(source->sp[0] == (StgWord)&stg_block_throwto_info);
581 ASSERT(((StgTSO *)source->sp[1])->id == msg->target->id);
582 // check ids not pointers, because the thread might be relocated
583
584 throwToSingleThreaded(cap, msg->target, msg->exception);
585 source->sp += 3;
586 }
587
588 /* -----------------------------------------------------------------------------
589 Remove a thread from blocking queues.
590
591 This is for use when we raise an exception in another thread, which
592 may be blocked.
593
594 Precondition: we have exclusive access to the TSO, via the same set
595 of conditions as throwToSingleThreaded() (c.f.).
596 -------------------------------------------------------------------------- */
597
598 static void
599 removeFromQueues(Capability *cap, StgTSO *tso)
600 {
601 switch (tso->why_blocked) {
602
603 case NotBlocked:
604 return;
605
606 case BlockedOnSTM:
607 // Be careful: nothing to do here! We tell the scheduler that the
608 // thread is runnable and we leave it to the stack-walking code to
609 // abort the transaction while unwinding the stack. We should
610 // perhaps have a debugging test to make sure that this really
611 // happens and that the 'zombie' transaction does not get
612 // committed.
613 goto done;
614
615 case BlockedOnMVar:
616 removeThreadFromMVarQueue(cap, (StgMVar *)tso->block_info.closure, tso);
617 // we aren't doing a write barrier here: the MVar is supposed to
618 // be already locked, so replacing the info pointer would unlock it.
619 goto done;
620
621 case BlockedOnBlackHole:
622 // nothing to do
623 goto done;
624
625 case BlockedOnMsgWakeup:
626 {
627 // kill the message, atomically:
628 OVERWRITE_INFO(tso->block_info.wakeup, &stg_IND_info);
629 break;
630 }
631
632 case BlockedOnMsgThrowTo:
633 {
634 MessageThrowTo *m = tso->block_info.throwto;
635 // The message is locked by us, unless we got here via
636 // deleteAllThreads(), in which case we own all the
637 // capabilities.
638 // ASSERT(m->header.info == &stg_WHITEHOLE_info);
639
640 // unlock and revoke it at the same time
641 unlockClosure((StgClosure*)m,&stg_IND_info);
642 break;
643 }
644
645 #if !defined(THREADED_RTS)
646 case BlockedOnRead:
647 case BlockedOnWrite:
648 #if defined(mingw32_HOST_OS)
649 case BlockedOnDoProc:
650 #endif
651 removeThreadFromDeQueue(cap, &blocked_queue_hd, &blocked_queue_tl, tso);
652 #if defined(mingw32_HOST_OS)
653 /* (Cooperatively) signal that the worker thread should abort
654 * the request.
655 */
656 abandonWorkRequest(tso->block_info.async_result->reqID);
657 #endif
658 goto done;
659
660 case BlockedOnDelay:
661 removeThreadFromQueue(cap, &sleeping_queue, tso);
662 goto done;
663 #endif
664
665 default:
666 barf("removeFromQueues: %d", tso->why_blocked);
667 }
668
669 done:
670 unblockOne(cap, tso);
671 }
672
673 /* -----------------------------------------------------------------------------
674 * raiseAsync()
675 *
676 * The following function implements the magic for raising an
677 * asynchronous exception in an existing thread.
678 *
679 * We first remove the thread from any queue on which it might be
680 * blocked. The possible blockages are MVARs, BLOCKING_QUEUESs, and
681 * TSO blocked_exception queues.
682 *
683 * We strip the stack down to the innermost CATCH_FRAME, building
684 * thunks in the heap for all the active computations, so they can
685 * be restarted if necessary. When we reach a CATCH_FRAME, we build
686 * an application of the handler to the exception, and push it on
687 * the top of the stack.
688 *
689 * How exactly do we save all the active computations? We create an
690 * AP_STACK for every UpdateFrame on the stack. Entering one of these
691 * AP_STACKs pushes everything from the corresponding update frame
692 * upwards onto the stack. (Actually, it pushes everything up to the
693 * next update frame plus a pointer to the next AP_STACK object.
694 * Entering the next AP_STACK object pushes more onto the stack until we
695 * reach the last AP_STACK object - at which point the stack should look
696 * exactly as it did when we killed the TSO and we can continue
697 * execution by entering the closure on top of the stack.
698 *
699 * We can also kill a thread entirely - this happens if either (a) the
700 * exception passed to raiseAsync is NULL, or (b) there's no
701 * CATCH_FRAME on the stack. In either case, we strip the entire
702 * stack and replace the thread with a zombie.
703 *
704 * ToDo: in THREADED_RTS mode, this function is only safe if either
705 * (a) we hold all the Capabilities (eg. in GC, or if there is only
706 * one Capability), or (b) we own the Capability that the TSO is
707 * currently blocked on or on the run queue of.
708 *
709 * -------------------------------------------------------------------------- */
710
711 static void
712 raiseAsync(Capability *cap, StgTSO *tso, StgClosure *exception,
713 rtsBool stop_at_atomically, StgUpdateFrame *stop_here)
714 {
715 StgRetInfoTable *info;
716 StgPtr sp, frame;
717 StgClosure *updatee;
718 nat i;
719
720 debugTraceCap(DEBUG_sched, cap,
721 "raising exception in thread %ld.", (long)tso->id);
722
723 #if defined(PROFILING)
724 /*
725 * Debugging tool: on raising an exception, show where we are.
726 * See also Exception.cmm:stg_raisezh.
727 * This wasn't done for asynchronous exceptions originally; see #1450
728 */
729 if (RtsFlags.ProfFlags.showCCSOnException)
730 {
731 fprintCCS_stderr(tso->prof.CCCS);
732 }
733 #endif
734 // ASSUMES: the thread is not already complete or dead, or
735 // ThreadRelocated. Upper layers should deal with that.
736 ASSERT(tso->what_next != ThreadComplete &&
737 tso->what_next != ThreadKilled &&
738 tso->what_next != ThreadRelocated);
739
740 // only if we own this TSO (except that deleteThread() calls this
741 ASSERT(tso->cap == cap);
742
743 // wake it up
744 if (tso->why_blocked != NotBlocked && tso->why_blocked != BlockedOnMsgWakeup) {
745 tso->why_blocked = NotBlocked;
746 appendToRunQueue(cap,tso);
747 }
748
749 // mark it dirty; we're about to change its stack.
750 dirty_TSO(cap, tso);
751
752 sp = tso->sp;
753
754 if (stop_here != NULL) {
755 updatee = stop_here->updatee;
756 } else {
757 updatee = NULL;
758 }
759
760 // The stack freezing code assumes there's a closure pointer on
761 // the top of the stack, so we have to arrange that this is the case...
762 //
763 if (sp[0] == (W_)&stg_enter_info) {
764 sp++;
765 } else {
766 sp--;
767 sp[0] = (W_)&stg_dummy_ret_closure;
768 }
769
770 frame = sp + 1;
771 while (stop_here == NULL || frame < (StgPtr)stop_here) {
772
773 // 1. Let the top of the stack be the "current closure"
774 //
775 // 2. Walk up the stack until we find either an UPDATE_FRAME or a
776 // CATCH_FRAME.
777 //
778 // 3. If it's an UPDATE_FRAME, then make an AP_STACK containing the
779 // current closure applied to the chunk of stack up to (but not
780 // including) the update frame. This closure becomes the "current
781 // closure". Go back to step 2.
782 //
783 // 4. If it's a CATCH_FRAME, then leave the exception handler on
784 // top of the stack applied to the exception.
785 //
786 // 5. If it's a STOP_FRAME, then kill the thread.
787 //
788 // NB: if we pass an ATOMICALLY_FRAME then abort the associated
789 // transaction
790
791 info = get_ret_itbl((StgClosure *)frame);
792
793 switch (info->i.type) {
794
795 case UPDATE_FRAME:
796 {
797 StgAP_STACK * ap;
798 nat words;
799
800 // First build an AP_STACK consisting of the stack chunk above the
801 // current update frame, with the top word on the stack as the
802 // fun field.
803 //
804 words = frame - sp - 1;
805 ap = (StgAP_STACK *)allocate(cap,AP_STACK_sizeW(words));
806
807 ap->size = words;
808 ap->fun = (StgClosure *)sp[0];
809 sp++;
810 for(i=0; i < (nat)words; ++i) {
811 ap->payload[i] = (StgClosure *)*sp++;
812 }
813
814 SET_HDR(ap,&stg_AP_STACK_info,
815 ((StgClosure *)frame)->header.prof.ccs /* ToDo */);
816 TICK_ALLOC_UP_THK(words+1,0);
817
818 //IF_DEBUG(scheduler,
819 // debugBelch("sched: Updating ");
820 // printPtr((P_)((StgUpdateFrame *)frame)->updatee);
821 // debugBelch(" with ");
822 // printObj((StgClosure *)ap);
823 // );
824
825 if (((StgUpdateFrame *)frame)->updatee == updatee) {
826 // If this update frame points to the same closure as
827 // the update frame further down the stack
828 // (stop_here), then don't perform the update. We
829 // want to keep the blackhole in this case, so we can
830 // detect and report the loop (#2783).
831 ap = (StgAP_STACK*)updatee;
832 } else {
833 // Perform the update
834 // TODO: this may waste some work, if the thunk has
835 // already been updated by another thread.
836 updateThunk(cap, tso,
837 ((StgUpdateFrame *)frame)->updatee, (StgClosure *)ap);
838 }
839
840 sp += sizeofW(StgUpdateFrame) - 1;
841 sp[0] = (W_)ap; // push onto stack
842 frame = sp + 1;
843 continue; //no need to bump frame
844 }
845
846 case STOP_FRAME:
847 {
848 // We've stripped the entire stack, the thread is now dead.
849 tso->what_next = ThreadKilled;
850 tso->sp = frame + sizeofW(StgStopFrame);
851 return;
852 }
853
854 case CATCH_FRAME:
855 // If we find a CATCH_FRAME, and we've got an exception to raise,
856 // then build the THUNK raise(exception), and leave it on
857 // top of the CATCH_FRAME ready to enter.
858 //
859 {
860 #ifdef PROFILING
861 StgCatchFrame *cf = (StgCatchFrame *)frame;
862 #endif
863 StgThunk *raise;
864
865 if (exception == NULL) break;
866
867 // we've got an exception to raise, so let's pass it to the
868 // handler in this frame.
869 //
870 raise = (StgThunk *)allocate(cap,sizeofW(StgThunk)+1);
871 TICK_ALLOC_SE_THK(1,0);
872 SET_HDR(raise,&stg_raise_info,cf->header.prof.ccs);
873 raise->payload[0] = exception;
874
875 // throw away the stack from Sp up to the CATCH_FRAME.
876 //
877 sp = frame - 1;
878
879 /* Ensure that async excpetions are blocked now, so we don't get
880 * a surprise exception before we get around to executing the
881 * handler.
882 */
883 tso->flags |= TSO_BLOCKEX | TSO_INTERRUPTIBLE;
884
885 /* Put the newly-built THUNK on top of the stack, ready to execute
886 * when the thread restarts.
887 */
888 sp[0] = (W_)raise;
889 sp[-1] = (W_)&stg_enter_info;
890 tso->sp = sp-1;
891 tso->what_next = ThreadRunGHC;
892 IF_DEBUG(sanity, checkTSO(tso));
893 return;
894 }
895
896 case ATOMICALLY_FRAME:
897 if (stop_at_atomically) {
898 ASSERT(tso->trec->enclosing_trec == NO_TREC);
899 stmCondemnTransaction(cap, tso -> trec);
900 tso->sp = frame - 2;
901 // The ATOMICALLY_FRAME expects to be returned a
902 // result from the transaction, which it stores in the
903 // stack frame. Hence we arrange to return a dummy
904 // result, so that the GC doesn't get upset (#3578).
905 // Perhaps a better way would be to have a different
906 // ATOMICALLY_FRAME instance for condemned
907 // transactions, but I don't fully understand the
908 // interaction with STM invariants.
909 tso->sp[1] = (W_)&stg_NO_TREC_closure;
910 tso->sp[0] = (W_)&stg_gc_unpt_r1_info;
911 tso->what_next = ThreadRunGHC;
912 return;
913 }
914 // Not stop_at_atomically... fall through and abort the
915 // transaction.
916
917 case CATCH_STM_FRAME:
918 case CATCH_RETRY_FRAME:
919 // IF we find an ATOMICALLY_FRAME then we abort the
920 // current transaction and propagate the exception. In
921 // this case (unlike ordinary exceptions) we do not care
922 // whether the transaction is valid or not because its
923 // possible validity cannot have caused the exception
924 // and will not be visible after the abort.
925
926 {
927 StgTRecHeader *trec = tso -> trec;
928 StgTRecHeader *outer = trec -> enclosing_trec;
929 debugTraceCap(DEBUG_stm, cap,
930 "found atomically block delivering async exception");
931 stmAbortTransaction(cap, trec);
932 stmFreeAbortedTRec(cap, trec);
933 tso -> trec = outer;
934 break;
935 };
936
937 default:
938 break;
939 }
940
941 // move on to the next stack frame
942 frame += stack_frame_sizeW((StgClosure *)frame);
943 }
944
945 // if we got here, then we stopped at stop_here
946 ASSERT(stop_here != NULL);
947 }
948
949