Fix for derefing ThreadRelocated TSOs in MVar operations
[ghc.git] / rts / RaiseAsync.c
1 /* ---------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 1998-2006
4 *
5 * Asynchronous exceptions
6 *
7 * --------------------------------------------------------------------------*/
8
9 #include "PosixSource.h"
10 #include "Rts.h"
11
12 #include "sm/Storage.h"
13 #include "Threads.h"
14 #include "Trace.h"
15 #include "RaiseAsync.h"
16 #include "Schedule.h"
17 #include "Updates.h"
18 #include "STM.h"
19 #include "sm/Sanity.h"
20 #include "Profiling.h"
21 #include "Messages.h"
22 #if defined(mingw32_HOST_OS)
23 #include "win32/IOManager.h"
24 #endif
25
26 static void raiseAsync (Capability *cap,
27 StgTSO *tso,
28 StgClosure *exception,
29 rtsBool stop_at_atomically,
30 StgUpdateFrame *stop_here);
31
32 static void removeFromQueues(Capability *cap, StgTSO *tso);
33
34 static void removeFromMVarBlockedQueue (StgTSO *tso);
35
36 static void blockedThrowTo (Capability *cap,
37 StgTSO *target, MessageThrowTo *msg);
38
39 static void throwToSendMsg (Capability *cap USED_IF_THREADS,
40 Capability *target_cap USED_IF_THREADS,
41 MessageThrowTo *msg USED_IF_THREADS);
42
43 /* -----------------------------------------------------------------------------
44 throwToSingleThreaded
45
46 This version of throwTo is safe to use if and only if one of the
47 following holds:
48
49 - !THREADED_RTS
50
51 - all the other threads in the system are stopped (eg. during GC).
52
53 - we surely own the target TSO (eg. we just took it from the
54 run queue of the current capability, or we are running it).
55
56 It doesn't cater for blocking the source thread until the exception
57 has been raised.
58 -------------------------------------------------------------------------- */
59
60 void
61 throwToSingleThreaded(Capability *cap, StgTSO *tso, StgClosure *exception)
62 {
63 throwToSingleThreaded_(cap, tso, exception, rtsFalse);
64 }
65
66 void
67 throwToSingleThreaded_(Capability *cap, StgTSO *tso, StgClosure *exception,
68 rtsBool stop_at_atomically)
69 {
70 tso = deRefTSO(tso);
71
72 // Thread already dead?
73 if (tso->what_next == ThreadComplete || tso->what_next == ThreadKilled) {
74 return;
75 }
76
77 // Remove it from any blocking queues
78 removeFromQueues(cap,tso);
79
80 raiseAsync(cap, tso, exception, stop_at_atomically, NULL);
81 }
82
83 void
84 suspendComputation(Capability *cap, StgTSO *tso, StgUpdateFrame *stop_here)
85 {
86 tso = deRefTSO(tso);
87
88 // Thread already dead?
89 if (tso->what_next == ThreadComplete || tso->what_next == ThreadKilled) {
90 return;
91 }
92
93 // Remove it from any blocking queues
94 removeFromQueues(cap,tso);
95
96 raiseAsync(cap, tso, NULL, rtsFalse, stop_here);
97 }
98
99 /* -----------------------------------------------------------------------------
100 throwTo
101
102 This function may be used to throw an exception from one thread to
103 another, during the course of normal execution. This is a tricky
104 task: the target thread might be running on another CPU, or it
105 may be blocked and could be woken up at any point by another CPU.
106 We have some delicate synchronisation to do.
107
108 The underlying scheme when multiple Capabilities are in use is
109 message passing: when the target of a throwTo is on another
110 Capability, we send a message (a MessageThrowTo closure) to that
111 Capability.
112
113 If the throwTo needs to block because the target TSO is masking
114 exceptions (the TSO_BLOCKEX flag), then the message is placed on
115 the blocked_exceptions queue attached to the target TSO. When the
116 target TSO enters the unmasked state again, it must check the
117 queue. The blocked_exceptions queue is not locked; only the
118 Capability owning the TSO may modify it.
119
120 To make things simpler for throwTo, we always create the message
121 first before deciding what to do. The message may get sent, or it
122 may get attached to a TSO's blocked_exceptions queue, or the
123 exception may get thrown immediately and the message dropped,
124 depending on the current state of the target.
125
126 Currently we send a message if the target belongs to another
127 Capability, and it is
128
129 - NotBlocked, BlockedOnMsgThrowTo,
130 BlockedOnCCall
131
132 - or it is masking exceptions (TSO_BLOCKEX)
133
134 Currently, if the target is BlockedOnMVar, BlockedOnSTM, or
135 BlockedOnBlackHole then we acquire ownership of the TSO by locking
136 its parent container (e.g. the MVar) and then raise the exception.
137 We might change these cases to be more message-passing-like in the
138 future.
139
140 Returns:
141
142 NULL exception was raised, ok to continue
143
144 MessageThrowTo * exception was not raised; the source TSO
145 should now put itself in the state
146 BlockedOnMsgThrowTo, and when it is ready
147 it should unlock the mssage using
148 unlockClosure(msg, &stg_MSG_THROWTO_info);
149 If it decides not to raise the exception after
150 all, it can revoke it safely with
151 unlockClosure(msg, &stg_MSG_NULL_info);
152
153 -------------------------------------------------------------------------- */
154
155 MessageThrowTo *
156 throwTo (Capability *cap, // the Capability we hold
157 StgTSO *source, // the TSO sending the exception (or NULL)
158 StgTSO *target, // the TSO receiving the exception
159 StgClosure *exception) // the exception closure
160 {
161 MessageThrowTo *msg;
162
163 msg = (MessageThrowTo *) allocate(cap, sizeofW(MessageThrowTo));
164 // message starts locked; the caller has to unlock it when it is
165 // ready.
166 SET_HDR(msg, &stg_WHITEHOLE_info, CCS_SYSTEM);
167 msg->source = source;
168 msg->target = target;
169 msg->exception = exception;
170
171 switch (throwToMsg(cap, msg))
172 {
173 case THROWTO_SUCCESS:
174 return NULL;
175 case THROWTO_BLOCKED:
176 default:
177 return msg;
178 }
179 }
180
181
182 nat
183 throwToMsg (Capability *cap, MessageThrowTo *msg)
184 {
185 StgWord status;
186 StgTSO *target = msg->target;
187 Capability *target_cap;
188
189 goto check_target;
190
191 retry:
192 write_barrier();
193 debugTrace(DEBUG_sched, "throwTo: retrying...");
194
195 check_target:
196 ASSERT(target != END_TSO_QUEUE);
197
198 // follow ThreadRelocated links in the target first
199 target = deRefTSO(target);
200
201 // Thread already dead?
202 if (target->what_next == ThreadComplete
203 || target->what_next == ThreadKilled) {
204 return THROWTO_SUCCESS;
205 }
206
207 debugTraceCap(DEBUG_sched, cap,
208 "throwTo: from thread %lu to thread %lu",
209 (unsigned long)msg->source->id,
210 (unsigned long)msg->target->id);
211
212 #ifdef DEBUG
213 traceThreadStatus(DEBUG_sched, target);
214 #endif
215
216 target_cap = target->cap;
217 if (target->cap != cap) {
218 throwToSendMsg(cap, target_cap, msg);
219 return THROWTO_BLOCKED;
220 }
221
222 status = target->why_blocked;
223
224 switch (status) {
225 case NotBlocked:
226 {
227 if ((target->flags & TSO_BLOCKEX) == 0) {
228 // It's on our run queue and not blocking exceptions
229 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
230 return THROWTO_SUCCESS;
231 } else {
232 blockedThrowTo(cap,target,msg);
233 return THROWTO_BLOCKED;
234 }
235 }
236
237 case BlockedOnMsgThrowTo:
238 {
239 const StgInfoTable *i;
240 MessageThrowTo *m;
241
242 m = target->block_info.throwto;
243
244 // target is local to this cap, but has sent a throwto
245 // message to another cap.
246 //
247 // The source message is locked. We need to revoke the
248 // target's message so that we can raise the exception, so
249 // we attempt to lock it.
250
251 // There's a possibility of a deadlock if two threads are both
252 // trying to throwTo each other (or more generally, a cycle of
253 // threads). To break the symmetry we compare the addresses
254 // of the MessageThrowTo objects, and the one for which m <
255 // msg gets to spin, while the other can only try to lock
256 // once, but must then back off and unlock both before trying
257 // again.
258 if (m < msg) {
259 i = lockClosure((StgClosure *)m);
260 } else {
261 i = tryLockClosure((StgClosure *)m);
262 if (i == NULL) {
263 // debugBelch("collision\n");
264 throwToSendMsg(cap, target->cap, msg);
265 return THROWTO_BLOCKED;
266 }
267 }
268
269 if (i == &stg_MSG_NULL_info) {
270 // we know there's a MSG_TRY_WAKEUP on the way, so we
271 // might as well just do it now. The message will
272 // be a no-op when it arrives.
273 unlockClosure((StgClosure*)m, i);
274 tryWakeupThread_(cap, target);
275 goto retry;
276 }
277
278 if (i != &stg_MSG_THROWTO_info) {
279 // if it's a MSG_NULL, this TSO has been woken up by another Cap
280 unlockClosure((StgClosure*)m, i);
281 goto retry;
282 }
283
284 if ((target->flags & TSO_BLOCKEX) &&
285 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
286 unlockClosure((StgClosure*)m, i);
287 blockedThrowTo(cap,target,msg);
288 return THROWTO_BLOCKED;
289 }
290
291 // nobody else can wake up this TSO after we claim the message
292 unlockClosure((StgClosure*)m, &stg_MSG_NULL_info);
293
294 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
295 return THROWTO_SUCCESS;
296 }
297
298 case BlockedOnMVar:
299 {
300 /*
301 To establish ownership of this TSO, we need to acquire a
302 lock on the MVar that it is blocked on.
303 */
304 StgMVar *mvar;
305 StgInfoTable *info USED_IF_THREADS;
306
307 mvar = (StgMVar *)target->block_info.closure;
308
309 // ASSUMPTION: tso->block_info must always point to a
310 // closure. In the threaded RTS it does.
311 switch (get_itbl(mvar)->type) {
312 case MVAR_CLEAN:
313 case MVAR_DIRTY:
314 break;
315 default:
316 goto retry;
317 }
318
319 info = lockClosure((StgClosure *)mvar);
320
321 if (target->what_next == ThreadRelocated) {
322 target = target->_link;
323 unlockClosure((StgClosure *)mvar,info);
324 goto retry;
325 }
326 // we have the MVar, let's check whether the thread
327 // is still blocked on the same MVar.
328 if (target->why_blocked != BlockedOnMVar
329 || (StgMVar *)target->block_info.closure != mvar) {
330 unlockClosure((StgClosure *)mvar, info);
331 goto retry;
332 }
333
334 if (target->_link == END_TSO_QUEUE) {
335 // the MVar operation has already completed. There is a
336 // MSG_TRY_WAKEUP on the way, but we can just wake up the
337 // thread now anyway and ignore the message when it
338 // arrives.
339 unlockClosure((StgClosure *)mvar, info);
340 tryWakeupThread_(cap, target);
341 goto retry;
342 }
343
344 if ((target->flags & TSO_BLOCKEX) &&
345 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
346 blockedThrowTo(cap,target,msg);
347 unlockClosure((StgClosure *)mvar, info);
348 return THROWTO_BLOCKED;
349 } else {
350 // revoke the MVar operation
351 removeFromMVarBlockedQueue(target);
352 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
353 unlockClosure((StgClosure *)mvar, info);
354 return THROWTO_SUCCESS;
355 }
356 }
357
358 case BlockedOnBlackHole:
359 {
360 // Revoke the message by replacing it with IND. We're not
361 // locking anything here, so we might still get a TRY_WAKEUP
362 // message from the owner of the blackhole some time in the
363 // future, but that doesn't matter.
364 ASSERT(target->block_info.bh->header.info == &stg_MSG_BLACKHOLE_info);
365 OVERWRITE_INFO(target->block_info.bh, &stg_IND_info);
366 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
367 return THROWTO_SUCCESS;
368 }
369
370 case BlockedOnSTM:
371 lockTSO(target);
372 // Unblocking BlockedOnSTM threads requires the TSO to be
373 // locked; see STM.c:unpark_tso().
374 if (target->why_blocked != BlockedOnSTM) {
375 unlockTSO(target);
376 goto retry;
377 }
378 if ((target->flags & TSO_BLOCKEX) &&
379 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
380 blockedThrowTo(cap,target,msg);
381 unlockTSO(target);
382 return THROWTO_BLOCKED;
383 } else {
384 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
385 unlockTSO(target);
386 return THROWTO_SUCCESS;
387 }
388
389 case BlockedOnCCall:
390 case BlockedOnCCall_NoUnblockExc:
391 blockedThrowTo(cap,target,msg);
392 return THROWTO_BLOCKED;
393
394 #ifndef THREADEDED_RTS
395 case BlockedOnRead:
396 case BlockedOnWrite:
397 case BlockedOnDelay:
398 #if defined(mingw32_HOST_OS)
399 case BlockedOnDoProc:
400 #endif
401 if ((target->flags & TSO_BLOCKEX) &&
402 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
403 blockedThrowTo(cap,target,msg);
404 return THROWTO_BLOCKED;
405 } else {
406 removeFromQueues(cap,target);
407 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
408 return THROWTO_SUCCESS;
409 }
410 #endif
411
412 default:
413 barf("throwTo: unrecognised why_blocked value");
414 }
415 barf("throwTo");
416 }
417
418 static void
419 throwToSendMsg (Capability *cap STG_UNUSED,
420 Capability *target_cap USED_IF_THREADS,
421 MessageThrowTo *msg USED_IF_THREADS)
422
423 {
424 #ifdef THREADED_RTS
425 debugTraceCap(DEBUG_sched, cap, "throwTo: sending a throwto message to cap %lu", (unsigned long)target_cap->no);
426
427 sendMessage(cap, target_cap, (Message*)msg);
428 #endif
429 }
430
431 // Block a throwTo message on the target TSO's blocked_exceptions
432 // queue. The current Capability must own the target TSO in order to
433 // modify the blocked_exceptions queue.
434 static void
435 blockedThrowTo (Capability *cap, StgTSO *target, MessageThrowTo *msg)
436 {
437 debugTraceCap(DEBUG_sched, cap, "throwTo: blocking on thread %lu",
438 (unsigned long)target->id);
439
440 ASSERT(target->cap == cap);
441
442 msg->link = target->blocked_exceptions;
443 target->blocked_exceptions = msg;
444 dirty_TSO(cap,target); // we modified the blocked_exceptions queue
445 }
446
447 /* -----------------------------------------------------------------------------
448 Waking up threads blocked in throwTo
449
450 There are two ways to do this: maybePerformBlockedException() will
451 perform the throwTo() for the thread at the head of the queue
452 immediately, and leave the other threads on the queue.
453 maybePerformBlockedException() also checks the TSO_BLOCKEX flag
454 before raising an exception.
455
456 awakenBlockedExceptionQueue() will wake up all the threads in the
457 queue, but not perform any throwTo() immediately. This might be
458 more appropriate when the target thread is the one actually running
459 (see Exception.cmm).
460
461 Returns: non-zero if an exception was raised, zero otherwise.
462 -------------------------------------------------------------------------- */
463
464 int
465 maybePerformBlockedException (Capability *cap, StgTSO *tso)
466 {
467 MessageThrowTo *msg;
468 const StgInfoTable *i;
469
470 if (tso->what_next == ThreadComplete || tso->what_next == ThreadFinished) {
471 if (tso->blocked_exceptions != END_BLOCKED_EXCEPTIONS_QUEUE) {
472 awakenBlockedExceptionQueue(cap,tso);
473 return 1;
474 } else {
475 return 0;
476 }
477 }
478
479 if (tso->blocked_exceptions != END_BLOCKED_EXCEPTIONS_QUEUE &&
480 (tso->flags & TSO_BLOCKEX) != 0) {
481 debugTraceCap(DEBUG_sched, cap, "throwTo: thread %lu has blocked exceptions but is inside block", (unsigned long)tso->id);
482 }
483
484 if (tso->blocked_exceptions != END_BLOCKED_EXCEPTIONS_QUEUE
485 && ((tso->flags & TSO_BLOCKEX) == 0
486 || ((tso->flags & TSO_INTERRUPTIBLE) && interruptible(tso)))) {
487
488 // We unblock just the first thread on the queue, and perform
489 // its throw immediately.
490 loop:
491 msg = tso->blocked_exceptions;
492 if (msg == END_BLOCKED_EXCEPTIONS_QUEUE) return 0;
493 i = lockClosure((StgClosure*)msg);
494 tso->blocked_exceptions = (MessageThrowTo*)msg->link;
495 if (i == &stg_MSG_NULL_info) {
496 unlockClosure((StgClosure*)msg,i);
497 goto loop;
498 }
499
500 throwToSingleThreaded(cap, msg->target, msg->exception);
501 unlockClosure((StgClosure*)msg,&stg_MSG_NULL_info);
502 tryWakeupThread(cap, msg->source);
503 return 1;
504 }
505 return 0;
506 }
507
508 // awakenBlockedExceptionQueue(): Just wake up the whole queue of
509 // blocked exceptions.
510
511 void
512 awakenBlockedExceptionQueue (Capability *cap, StgTSO *tso)
513 {
514 MessageThrowTo *msg;
515 const StgInfoTable *i;
516
517 for (msg = tso->blocked_exceptions; msg != END_BLOCKED_EXCEPTIONS_QUEUE;
518 msg = (MessageThrowTo*)msg->link) {
519 i = lockClosure((StgClosure *)msg);
520 if (i != &stg_MSG_NULL_info) {
521 unlockClosure((StgClosure *)msg,&stg_MSG_NULL_info);
522 tryWakeupThread(cap, msg->source);
523 } else {
524 unlockClosure((StgClosure *)msg,i);
525 }
526 }
527 tso->blocked_exceptions = END_BLOCKED_EXCEPTIONS_QUEUE;
528 }
529
530 /* -----------------------------------------------------------------------------
531 Remove a thread from blocking queues.
532
533 This is for use when we raise an exception in another thread, which
534 may be blocked.
535
536 Precondition: we have exclusive access to the TSO, via the same set
537 of conditions as throwToSingleThreaded() (c.f.).
538 -------------------------------------------------------------------------- */
539
540 static void
541 removeFromMVarBlockedQueue (StgTSO *tso)
542 {
543 StgMVar *mvar = (StgMVar*)tso->block_info.closure;
544 StgMVarTSOQueue *q = (StgMVarTSOQueue*)tso->_link;
545
546 if (q == (StgMVarTSOQueue*)END_TSO_QUEUE) {
547 // already removed from this MVar
548 return;
549 }
550
551 // Assume the MVar is locked. (not assertable; sometimes it isn't
552 // actually WHITEHOLE'd).
553
554 // We want to remove the MVAR_TSO_QUEUE object from the queue. It
555 // isn't doubly-linked so we can't actually remove it; instead we
556 // just overwrite it with an IND if possible and let the GC short
557 // it out. However, we have to be careful to maintain the deque
558 // structure:
559
560 if (mvar->head == q) {
561 mvar->head = q->link;
562 q->header.info = &stg_IND_info;
563 if (mvar->tail == q) {
564 mvar->tail = (StgMVarTSOQueue*)END_TSO_QUEUE;
565 }
566 }
567 else if (mvar->tail == q) {
568 // we can't replace it with an IND in this case, because then
569 // we lose the tail pointer when the GC shorts out the IND.
570 // So we use MSG_NULL as a kind of non-dupable indirection;
571 // these are ignored by takeMVar/putMVar.
572 q->header.info = &stg_MSG_NULL_info;
573 }
574 else {
575 q->header.info = &stg_IND_info;
576 }
577
578 // revoke the MVar operation
579 tso->_link = END_TSO_QUEUE;
580 }
581
582 static void
583 removeFromQueues(Capability *cap, StgTSO *tso)
584 {
585 switch (tso->why_blocked) {
586
587 case NotBlocked:
588 case ThreadMigrating:
589 return;
590
591 case BlockedOnSTM:
592 // Be careful: nothing to do here! We tell the scheduler that the
593 // thread is runnable and we leave it to the stack-walking code to
594 // abort the transaction while unwinding the stack. We should
595 // perhaps have a debugging test to make sure that this really
596 // happens and that the 'zombie' transaction does not get
597 // committed.
598 goto done;
599
600 case BlockedOnMVar:
601 removeFromMVarBlockedQueue(tso);
602 goto done;
603
604 case BlockedOnBlackHole:
605 // nothing to do
606 goto done;
607
608 case BlockedOnMsgThrowTo:
609 {
610 MessageThrowTo *m = tso->block_info.throwto;
611 // The message is locked by us, unless we got here via
612 // deleteAllThreads(), in which case we own all the
613 // capabilities.
614 // ASSERT(m->header.info == &stg_WHITEHOLE_info);
615
616 // unlock and revoke it at the same time
617 unlockClosure((StgClosure*)m,&stg_MSG_NULL_info);
618 break;
619 }
620
621 #if !defined(THREADED_RTS)
622 case BlockedOnRead:
623 case BlockedOnWrite:
624 #if defined(mingw32_HOST_OS)
625 case BlockedOnDoProc:
626 #endif
627 removeThreadFromDeQueue(cap, &blocked_queue_hd, &blocked_queue_tl, tso);
628 #if defined(mingw32_HOST_OS)
629 /* (Cooperatively) signal that the worker thread should abort
630 * the request.
631 */
632 abandonWorkRequest(tso->block_info.async_result->reqID);
633 #endif
634 goto done;
635
636 case BlockedOnDelay:
637 removeThreadFromQueue(cap, &sleeping_queue, tso);
638 goto done;
639 #endif
640
641 default:
642 barf("removeFromQueues: %d", tso->why_blocked);
643 }
644
645 done:
646 tso->why_blocked = NotBlocked;
647 appendToRunQueue(cap, tso);
648 }
649
650 /* -----------------------------------------------------------------------------
651 * raiseAsync()
652 *
653 * The following function implements the magic for raising an
654 * asynchronous exception in an existing thread.
655 *
656 * We first remove the thread from any queue on which it might be
657 * blocked. The possible blockages are MVARs, BLOCKING_QUEUESs, and
658 * TSO blocked_exception queues.
659 *
660 * We strip the stack down to the innermost CATCH_FRAME, building
661 * thunks in the heap for all the active computations, so they can
662 * be restarted if necessary. When we reach a CATCH_FRAME, we build
663 * an application of the handler to the exception, and push it on
664 * the top of the stack.
665 *
666 * How exactly do we save all the active computations? We create an
667 * AP_STACK for every UpdateFrame on the stack. Entering one of these
668 * AP_STACKs pushes everything from the corresponding update frame
669 * upwards onto the stack. (Actually, it pushes everything up to the
670 * next update frame plus a pointer to the next AP_STACK object.
671 * Entering the next AP_STACK object pushes more onto the stack until we
672 * reach the last AP_STACK object - at which point the stack should look
673 * exactly as it did when we killed the TSO and we can continue
674 * execution by entering the closure on top of the stack.
675 *
676 * We can also kill a thread entirely - this happens if either (a) the
677 * exception passed to raiseAsync is NULL, or (b) there's no
678 * CATCH_FRAME on the stack. In either case, we strip the entire
679 * stack and replace the thread with a zombie.
680 *
681 * ToDo: in THREADED_RTS mode, this function is only safe if either
682 * (a) we hold all the Capabilities (eg. in GC, or if there is only
683 * one Capability), or (b) we own the Capability that the TSO is
684 * currently blocked on or on the run queue of.
685 *
686 * -------------------------------------------------------------------------- */
687
688 static void
689 raiseAsync(Capability *cap, StgTSO *tso, StgClosure *exception,
690 rtsBool stop_at_atomically, StgUpdateFrame *stop_here)
691 {
692 StgRetInfoTable *info;
693 StgPtr sp, frame;
694 StgClosure *updatee;
695 nat i;
696
697 debugTraceCap(DEBUG_sched, cap,
698 "raising exception in thread %ld.", (long)tso->id);
699
700 #if defined(PROFILING)
701 /*
702 * Debugging tool: on raising an exception, show where we are.
703 * See also Exception.cmm:stg_raisezh.
704 * This wasn't done for asynchronous exceptions originally; see #1450
705 */
706 if (RtsFlags.ProfFlags.showCCSOnException)
707 {
708 fprintCCS_stderr(tso->prof.CCCS);
709 }
710 #endif
711 // ASSUMES: the thread is not already complete or dead, or
712 // ThreadRelocated. Upper layers should deal with that.
713 ASSERT(tso->what_next != ThreadComplete &&
714 tso->what_next != ThreadKilled &&
715 tso->what_next != ThreadRelocated);
716
717 // only if we own this TSO (except that deleteThread() calls this
718 ASSERT(tso->cap == cap);
719
720 // wake it up
721 if (tso->why_blocked != NotBlocked) {
722 tso->why_blocked = NotBlocked;
723 appendToRunQueue(cap,tso);
724 }
725
726 // mark it dirty; we're about to change its stack.
727 dirty_TSO(cap, tso);
728
729 sp = tso->sp;
730
731 if (stop_here != NULL) {
732 updatee = stop_here->updatee;
733 } else {
734 updatee = NULL;
735 }
736
737 // The stack freezing code assumes there's a closure pointer on
738 // the top of the stack, so we have to arrange that this is the case...
739 //
740 if (sp[0] == (W_)&stg_enter_info) {
741 sp++;
742 } else {
743 sp--;
744 sp[0] = (W_)&stg_dummy_ret_closure;
745 }
746
747 frame = sp + 1;
748 while (stop_here == NULL || frame < (StgPtr)stop_here) {
749
750 // 1. Let the top of the stack be the "current closure"
751 //
752 // 2. Walk up the stack until we find either an UPDATE_FRAME or a
753 // CATCH_FRAME.
754 //
755 // 3. If it's an UPDATE_FRAME, then make an AP_STACK containing the
756 // current closure applied to the chunk of stack up to (but not
757 // including) the update frame. This closure becomes the "current
758 // closure". Go back to step 2.
759 //
760 // 4. If it's a CATCH_FRAME, then leave the exception handler on
761 // top of the stack applied to the exception.
762 //
763 // 5. If it's a STOP_FRAME, then kill the thread.
764 //
765 // NB: if we pass an ATOMICALLY_FRAME then abort the associated
766 // transaction
767
768 info = get_ret_itbl((StgClosure *)frame);
769
770 switch (info->i.type) {
771
772 case UPDATE_FRAME:
773 {
774 StgAP_STACK * ap;
775 nat words;
776
777 // First build an AP_STACK consisting of the stack chunk above the
778 // current update frame, with the top word on the stack as the
779 // fun field.
780 //
781 words = frame - sp - 1;
782 ap = (StgAP_STACK *)allocate(cap,AP_STACK_sizeW(words));
783
784 ap->size = words;
785 ap->fun = (StgClosure *)sp[0];
786 sp++;
787 for(i=0; i < (nat)words; ++i) {
788 ap->payload[i] = (StgClosure *)*sp++;
789 }
790
791 SET_HDR(ap,&stg_AP_STACK_info,
792 ((StgClosure *)frame)->header.prof.ccs /* ToDo */);
793 TICK_ALLOC_UP_THK(words+1,0);
794
795 //IF_DEBUG(scheduler,
796 // debugBelch("sched: Updating ");
797 // printPtr((P_)((StgUpdateFrame *)frame)->updatee);
798 // debugBelch(" with ");
799 // printObj((StgClosure *)ap);
800 // );
801
802 if (((StgUpdateFrame *)frame)->updatee == updatee) {
803 // If this update frame points to the same closure as
804 // the update frame further down the stack
805 // (stop_here), then don't perform the update. We
806 // want to keep the blackhole in this case, so we can
807 // detect and report the loop (#2783).
808 ap = (StgAP_STACK*)updatee;
809 } else {
810 // Perform the update
811 // TODO: this may waste some work, if the thunk has
812 // already been updated by another thread.
813 updateThunk(cap, tso,
814 ((StgUpdateFrame *)frame)->updatee, (StgClosure *)ap);
815 }
816
817 sp += sizeofW(StgUpdateFrame) - 1;
818 sp[0] = (W_)ap; // push onto stack
819 frame = sp + 1;
820 continue; //no need to bump frame
821 }
822
823 case STOP_FRAME:
824 {
825 // We've stripped the entire stack, the thread is now dead.
826 tso->what_next = ThreadKilled;
827 tso->sp = frame + sizeofW(StgStopFrame);
828 return;
829 }
830
831 case CATCH_FRAME:
832 // If we find a CATCH_FRAME, and we've got an exception to raise,
833 // then build the THUNK raise(exception), and leave it on
834 // top of the CATCH_FRAME ready to enter.
835 //
836 {
837 #ifdef PROFILING
838 StgCatchFrame *cf = (StgCatchFrame *)frame;
839 #endif
840 StgThunk *raise;
841
842 if (exception == NULL) break;
843
844 // we've got an exception to raise, so let's pass it to the
845 // handler in this frame.
846 //
847 raise = (StgThunk *)allocate(cap,sizeofW(StgThunk)+1);
848 TICK_ALLOC_SE_THK(1,0);
849 SET_HDR(raise,&stg_raise_info,cf->header.prof.ccs);
850 raise->payload[0] = exception;
851
852 // throw away the stack from Sp up to the CATCH_FRAME.
853 //
854 sp = frame - 1;
855
856 /* Ensure that async excpetions are blocked now, so we don't get
857 * a surprise exception before we get around to executing the
858 * handler.
859 */
860 tso->flags |= TSO_BLOCKEX | TSO_INTERRUPTIBLE;
861
862 /* Put the newly-built THUNK on top of the stack, ready to execute
863 * when the thread restarts.
864 */
865 sp[0] = (W_)raise;
866 sp[-1] = (W_)&stg_enter_info;
867 tso->sp = sp-1;
868 tso->what_next = ThreadRunGHC;
869 IF_DEBUG(sanity, checkTSO(tso));
870 return;
871 }
872
873 case ATOMICALLY_FRAME:
874 if (stop_at_atomically) {
875 ASSERT(tso->trec->enclosing_trec == NO_TREC);
876 stmCondemnTransaction(cap, tso -> trec);
877 tso->sp = frame - 2;
878 // The ATOMICALLY_FRAME expects to be returned a
879 // result from the transaction, which it stores in the
880 // stack frame. Hence we arrange to return a dummy
881 // result, so that the GC doesn't get upset (#3578).
882 // Perhaps a better way would be to have a different
883 // ATOMICALLY_FRAME instance for condemned
884 // transactions, but I don't fully understand the
885 // interaction with STM invariants.
886 tso->sp[1] = (W_)&stg_NO_TREC_closure;
887 tso->sp[0] = (W_)&stg_gc_unpt_r1_info;
888 tso->what_next = ThreadRunGHC;
889 return;
890 }
891 // Not stop_at_atomically... fall through and abort the
892 // transaction.
893
894 case CATCH_STM_FRAME:
895 case CATCH_RETRY_FRAME:
896 // IF we find an ATOMICALLY_FRAME then we abort the
897 // current transaction and propagate the exception. In
898 // this case (unlike ordinary exceptions) we do not care
899 // whether the transaction is valid or not because its
900 // possible validity cannot have caused the exception
901 // and will not be visible after the abort.
902
903 {
904 StgTRecHeader *trec = tso -> trec;
905 StgTRecHeader *outer = trec -> enclosing_trec;
906 debugTraceCap(DEBUG_stm, cap,
907 "found atomically block delivering async exception");
908 stmAbortTransaction(cap, trec);
909 stmFreeAbortedTRec(cap, trec);
910 tso -> trec = outer;
911 break;
912 };
913
914 default:
915 break;
916 }
917
918 // move on to the next stack frame
919 frame += stack_frame_sizeW((StgClosure *)frame);
920 }
921
922 // if we got here, then we stopped at stop_here
923 ASSERT(stop_here != NULL);
924 }
925
926