Make allocatePinned use local storage, and other refactorings
[ghc.git] / rts / RaiseAsync.c
1 /* ---------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 1998-2006
4 *
5 * Asynchronous exceptions
6 *
7 * --------------------------------------------------------------------------*/
8
9 #include "PosixSource.h"
10 #include "Rts.h"
11
12 #include "sm/Storage.h"
13 #include "Threads.h"
14 #include "Trace.h"
15 #include "RaiseAsync.h"
16 #include "Schedule.h"
17 #include "Updates.h"
18 #include "STM.h"
19 #include "Sanity.h"
20 #include "Profiling.h"
21 #if defined(mingw32_HOST_OS)
22 #include "win32/IOManager.h"
23 #endif
24
25 static void raiseAsync (Capability *cap,
26 StgTSO *tso,
27 StgClosure *exception,
28 rtsBool stop_at_atomically,
29 StgUpdateFrame *stop_here);
30
31 static void removeFromQueues(Capability *cap, StgTSO *tso);
32
33 static void blockedThrowTo (Capability *cap, StgTSO *source, StgTSO *target);
34
35 static void performBlockedException (Capability *cap,
36 StgTSO *source, StgTSO *target);
37
38 /* -----------------------------------------------------------------------------
39 throwToSingleThreaded
40
41 This version of throwTo is safe to use if and only if one of the
42 following holds:
43
44 - !THREADED_RTS
45
46 - all the other threads in the system are stopped (eg. during GC).
47
48 - we surely own the target TSO (eg. we just took it from the
49 run queue of the current capability, or we are running it).
50
51 It doesn't cater for blocking the source thread until the exception
52 has been raised.
53 -------------------------------------------------------------------------- */
54
55 void
56 throwToSingleThreaded(Capability *cap, StgTSO *tso, StgClosure *exception)
57 {
58 throwToSingleThreaded_(cap, tso, exception, rtsFalse);
59 }
60
61 void
62 throwToSingleThreaded_(Capability *cap, StgTSO *tso, StgClosure *exception,
63 rtsBool stop_at_atomically)
64 {
65 // Thread already dead?
66 if (tso->what_next == ThreadComplete || tso->what_next == ThreadKilled) {
67 return;
68 }
69
70 // Remove it from any blocking queues
71 removeFromQueues(cap,tso);
72
73 raiseAsync(cap, tso, exception, stop_at_atomically, NULL);
74 }
75
76 void
77 suspendComputation(Capability *cap, StgTSO *tso, StgUpdateFrame *stop_here)
78 {
79 // Thread already dead?
80 if (tso->what_next == ThreadComplete || tso->what_next == ThreadKilled) {
81 return;
82 }
83
84 // Remove it from any blocking queues
85 removeFromQueues(cap,tso);
86
87 raiseAsync(cap, tso, NULL, rtsFalse, stop_here);
88 }
89
90 /* -----------------------------------------------------------------------------
91 throwTo
92
93 This function may be used to throw an exception from one thread to
94 another, during the course of normal execution. This is a tricky
95 task: the target thread might be running on another CPU, or it
96 may be blocked and could be woken up at any point by another CPU.
97 We have some delicate synchronisation to do.
98
99 There is a completely safe fallback scheme: it is always possible
100 to just block the source TSO on the target TSO's blocked_exceptions
101 queue. This queue is locked using lockTSO()/unlockTSO(). It is
102 checked at regular intervals: before and after running a thread
103 (schedule() and threadPaused() respectively), and just before GC
104 (scheduleDoGC()). Activating a thread on this queue should be done
105 using maybePerformBlockedException(): this is done in the context
106 of the target thread, so the exception can be raised eagerly.
107
108 This fallback scheme works even if the target thread is complete or
109 killed: scheduleDoGC() will discover the blocked thread before the
110 target is GC'd.
111
112 Blocking the source thread on the target thread's blocked_exception
113 queue is also employed when the target thread is currently blocking
114 exceptions (ie. inside Control.Exception.block).
115
116 We could use the safe fallback scheme exclusively, but that
117 wouldn't be ideal: most calls to throwTo would block immediately,
118 possibly until the next GC, which might require the deadlock
119 detection mechanism to kick in. So we try to provide promptness
120 wherever possible.
121
122 We can promptly deliver the exception if the target thread is:
123
124 - runnable, on the same Capability as the source thread (because
125 we own the run queue and therefore the target thread).
126
127 - blocked, and we can obtain exclusive access to it. Obtaining
128 exclusive access to the thread depends on how it is blocked.
129
130 We must also be careful to not trip over threadStackOverflow(),
131 which might be moving the TSO to enlarge its stack.
132 lockTSO()/unlockTSO() are used here too.
133
134 Returns:
135
136 THROWTO_SUCCESS exception was raised, ok to continue
137
138 THROWTO_BLOCKED exception was not raised; block the source
139 thread then call throwToReleaseTarget() when
140 the source thread is properly tidied away.
141
142 -------------------------------------------------------------------------- */
143
144 nat
145 throwTo (Capability *cap, // the Capability we hold
146 StgTSO *source, // the TSO sending the exception
147 StgTSO *target, // the TSO receiving the exception
148 StgClosure *exception, // the exception closure
149 /*[out]*/ void **out USED_IF_THREADS)
150 {
151 StgWord status;
152
153 // follow ThreadRelocated links in the target first
154 while (target->what_next == ThreadRelocated) {
155 target = target->_link;
156 // No, it might be a WHITEHOLE:
157 // ASSERT(get_itbl(target)->type == TSO);
158 }
159
160 debugTrace(DEBUG_sched, "throwTo: from thread %lu to thread %lu",
161 (unsigned long)source->id, (unsigned long)target->id);
162
163 #ifdef DEBUG
164 traceThreadStatus(DEBUG_sched, target);
165 #endif
166
167 goto check_target;
168 retry:
169 debugTrace(DEBUG_sched, "throwTo: retrying...");
170
171 check_target:
172 // Thread already dead?
173 if (target->what_next == ThreadComplete
174 || target->what_next == ThreadKilled) {
175 return THROWTO_SUCCESS;
176 }
177
178 status = target->why_blocked;
179
180 switch (status) {
181 case NotBlocked:
182 /* if status==NotBlocked, and target->cap == cap, then
183 we own this TSO and can raise the exception.
184
185 How do we establish this condition? Very carefully.
186
187 Let
188 P = (status == NotBlocked)
189 Q = (tso->cap == cap)
190
191 Now, if P & Q are true, then the TSO is locked and owned by
192 this capability. No other OS thread can steal it.
193
194 If P==0 and Q==1: the TSO is blocked, but attached to this
195 capabilty, and it can be stolen by another capability.
196
197 If P==1 and Q==0: the TSO is runnable on another
198 capability. At any time, the TSO may change from runnable
199 to blocked and vice versa, while it remains owned by
200 another capability.
201
202 Suppose we test like this:
203
204 p = P
205 q = Q
206 if (p && q) ...
207
208 this is defeated by another capability stealing a blocked
209 TSO from us to wake it up (Schedule.c:unblockOne()). The
210 other thread is doing
211
212 Q = 0
213 P = 1
214
215 assuming arbitrary reordering, we could see this
216 interleaving:
217
218 start: P==0 && Q==1
219 P = 1
220 p = P
221 q = Q
222 Q = 0
223 if (p && q) ...
224
225 so we need a memory barrier:
226
227 p = P
228 mb()
229 q = Q
230 if (p && q) ...
231
232 this avoids the problematic case. There are other cases
233 to consider, but this is the tricky one.
234
235 Note that we must be sure that unblockOne() does the
236 writes in the correct order: Q before P. The memory
237 barrier ensures that if we have seen the write to P, we
238 have also seen the write to Q.
239 */
240 {
241 Capability *target_cap;
242
243 write_barrier();
244 target_cap = target->cap;
245 if (target_cap == cap && (target->flags & TSO_BLOCKEX) == 0) {
246 // It's on our run queue and not blocking exceptions
247 raiseAsync(cap, target, exception, rtsFalse, NULL);
248 return THROWTO_SUCCESS;
249 } else {
250 // Otherwise, just block on the blocked_exceptions queue
251 // of the target thread. The queue will get looked at
252 // soon enough: it is checked before and after running a
253 // thread, and during GC.
254 lockTSO(target);
255
256 // Avoid race with threadStackOverflow, which may have
257 // just moved this TSO.
258 if (target->what_next == ThreadRelocated) {
259 unlockTSO(target);
260 target = target->_link;
261 goto retry;
262 }
263 // check again for ThreadComplete and ThreadKilled. This
264 // cooperates with scheduleHandleThreadFinished to ensure
265 // that we never miss any threads that are throwing an
266 // exception to a thread in the process of terminating.
267 if (target->what_next == ThreadComplete
268 || target->what_next == ThreadKilled) {
269 unlockTSO(target);
270 return THROWTO_SUCCESS;
271 }
272 blockedThrowTo(cap,source,target);
273 *out = target;
274 return THROWTO_BLOCKED;
275 }
276 }
277
278 case BlockedOnMVar:
279 {
280 /*
281 To establish ownership of this TSO, we need to acquire a
282 lock on the MVar that it is blocked on.
283 */
284 StgMVar *mvar;
285 StgInfoTable *info USED_IF_THREADS;
286
287 mvar = (StgMVar *)target->block_info.closure;
288
289 // ASSUMPTION: tso->block_info must always point to a
290 // closure. In the threaded RTS it does.
291 switch (get_itbl(mvar)->type) {
292 case MVAR_CLEAN:
293 case MVAR_DIRTY:
294 break;
295 default:
296 goto retry;
297 }
298
299 info = lockClosure((StgClosure *)mvar);
300
301 if (target->what_next == ThreadRelocated) {
302 target = target->_link;
303 unlockClosure((StgClosure *)mvar,info);
304 goto retry;
305 }
306 // we have the MVar, let's check whether the thread
307 // is still blocked on the same MVar.
308 if (target->why_blocked != BlockedOnMVar
309 || (StgMVar *)target->block_info.closure != mvar) {
310 unlockClosure((StgClosure *)mvar, info);
311 goto retry;
312 }
313
314 if ((target->flags & TSO_BLOCKEX) &&
315 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
316 lockClosure((StgClosure *)target);
317 blockedThrowTo(cap,source,target);
318 unlockClosure((StgClosure *)mvar, info);
319 *out = target;
320 return THROWTO_BLOCKED; // caller releases TSO
321 } else {
322 removeThreadFromMVarQueue(cap, mvar, target);
323 raiseAsync(cap, target, exception, rtsFalse, NULL);
324 unblockOne(cap, target);
325 unlockClosure((StgClosure *)mvar, info);
326 return THROWTO_SUCCESS;
327 }
328 }
329
330 case BlockedOnBlackHole:
331 {
332 ACQUIRE_LOCK(&sched_mutex);
333 // double checking the status after the memory barrier:
334 if (target->why_blocked != BlockedOnBlackHole) {
335 RELEASE_LOCK(&sched_mutex);
336 goto retry;
337 }
338
339 if (target->flags & TSO_BLOCKEX) {
340 lockTSO(target);
341 blockedThrowTo(cap,source,target);
342 RELEASE_LOCK(&sched_mutex);
343 *out = target;
344 return THROWTO_BLOCKED; // caller releases TSO
345 } else {
346 removeThreadFromQueue(cap, &blackhole_queue, target);
347 raiseAsync(cap, target, exception, rtsFalse, NULL);
348 unblockOne(cap, target);
349 RELEASE_LOCK(&sched_mutex);
350 return THROWTO_SUCCESS;
351 }
352 }
353
354 case BlockedOnException:
355 {
356 StgTSO *target2;
357 StgInfoTable *info;
358
359 /*
360 To obtain exclusive access to a BlockedOnException thread,
361 we must call lockClosure() on the TSO on which it is blocked.
362 Since the TSO might change underneath our feet, after we
363 call lockClosure() we must check that
364
365 (a) the closure we locked is actually a TSO
366 (b) the original thread is still BlockedOnException,
367 (c) the original thread is still blocked on the TSO we locked
368 and (d) the target thread has not been relocated.
369
370 We synchronise with threadStackOverflow() (which relocates
371 threads) using lockClosure()/unlockClosure().
372 */
373 target2 = target->block_info.tso;
374
375 info = lockClosure((StgClosure *)target2);
376 if (info != &stg_TSO_info) {
377 unlockClosure((StgClosure *)target2, info);
378 goto retry;
379 }
380 if (target->what_next == ThreadRelocated) {
381 target = target->_link;
382 unlockTSO(target2);
383 goto retry;
384 }
385 if (target2->what_next == ThreadRelocated) {
386 target->block_info.tso = target2->_link;
387 unlockTSO(target2);
388 goto retry;
389 }
390 if (target->why_blocked != BlockedOnException
391 || target->block_info.tso != target2) {
392 unlockTSO(target2);
393 goto retry;
394 }
395
396 /*
397 Now we have exclusive rights to the target TSO...
398
399 If it is blocking exceptions, add the source TSO to its
400 blocked_exceptions queue. Otherwise, raise the exception.
401 */
402 if ((target->flags & TSO_BLOCKEX) &&
403 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
404 lockTSO(target);
405 blockedThrowTo(cap,source,target);
406 unlockTSO(target2);
407 *out = target;
408 return THROWTO_BLOCKED;
409 } else {
410 removeThreadFromQueue(cap, &target2->blocked_exceptions, target);
411 raiseAsync(cap, target, exception, rtsFalse, NULL);
412 unblockOne(cap, target);
413 unlockTSO(target2);
414 return THROWTO_SUCCESS;
415 }
416 }
417
418 case BlockedOnSTM:
419 lockTSO(target);
420 // Unblocking BlockedOnSTM threads requires the TSO to be
421 // locked; see STM.c:unpark_tso().
422 if (target->why_blocked != BlockedOnSTM) {
423 unlockTSO(target);
424 goto retry;
425 }
426 if ((target->flags & TSO_BLOCKEX) &&
427 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
428 blockedThrowTo(cap,source,target);
429 *out = target;
430 return THROWTO_BLOCKED;
431 } else {
432 raiseAsync(cap, target, exception, rtsFalse, NULL);
433 unblockOne(cap, target);
434 unlockTSO(target);
435 return THROWTO_SUCCESS;
436 }
437
438 case BlockedOnCCall:
439 case BlockedOnCCall_NoUnblockExc:
440 // I don't think it's possible to acquire ownership of a
441 // BlockedOnCCall thread. We just assume that the target
442 // thread is blocking exceptions, and block on its
443 // blocked_exception queue.
444 lockTSO(target);
445 if (target->why_blocked != BlockedOnCCall &&
446 target->why_blocked != BlockedOnCCall_NoUnblockExc) {
447 unlockTSO(target);
448 goto retry;
449 }
450 blockedThrowTo(cap,source,target);
451 *out = target;
452 return THROWTO_BLOCKED;
453
454 #ifndef THREADEDED_RTS
455 case BlockedOnRead:
456 case BlockedOnWrite:
457 case BlockedOnDelay:
458 #if defined(mingw32_HOST_OS)
459 case BlockedOnDoProc:
460 #endif
461 if ((target->flags & TSO_BLOCKEX) &&
462 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
463 blockedThrowTo(cap,source,target);
464 return THROWTO_BLOCKED;
465 } else {
466 removeFromQueues(cap,target);
467 raiseAsync(cap, target, exception, rtsFalse, NULL);
468 return THROWTO_SUCCESS;
469 }
470 #endif
471
472 default:
473 barf("throwTo: unrecognised why_blocked value");
474 }
475 barf("throwTo");
476 }
477
478 // Block a TSO on another TSO's blocked_exceptions queue.
479 // Precondition: we hold an exclusive lock on the target TSO (this is
480 // complex to achieve as there's no single lock on a TSO; see
481 // throwTo()).
482 static void
483 blockedThrowTo (Capability *cap, StgTSO *source, StgTSO *target)
484 {
485 debugTrace(DEBUG_sched, "throwTo: blocking on thread %lu", (unsigned long)target->id);
486 setTSOLink(cap, source, target->blocked_exceptions);
487 target->blocked_exceptions = source;
488 dirty_TSO(cap,target); // we modified the blocked_exceptions queue
489
490 source->block_info.tso = target;
491 write_barrier(); // throwTo_exception *must* be visible if BlockedOnException is.
492 source->why_blocked = BlockedOnException;
493 }
494
495
496 #ifdef THREADED_RTS
497 void
498 throwToReleaseTarget (void *tso)
499 {
500 unlockTSO((StgTSO *)tso);
501 }
502 #endif
503
504 /* -----------------------------------------------------------------------------
505 Waking up threads blocked in throwTo
506
507 There are two ways to do this: maybePerformBlockedException() will
508 perform the throwTo() for the thread at the head of the queue
509 immediately, and leave the other threads on the queue.
510 maybePerformBlockedException() also checks the TSO_BLOCKEX flag
511 before raising an exception.
512
513 awakenBlockedExceptionQueue() will wake up all the threads in the
514 queue, but not perform any throwTo() immediately. This might be
515 more appropriate when the target thread is the one actually running
516 (see Exception.cmm).
517
518 Returns: non-zero if an exception was raised, zero otherwise.
519 -------------------------------------------------------------------------- */
520
521 int
522 maybePerformBlockedException (Capability *cap, StgTSO *tso)
523 {
524 StgTSO *source;
525
526 if (tso->what_next == ThreadComplete || tso->what_next == ThreadFinished) {
527 if (tso->blocked_exceptions != END_TSO_QUEUE) {
528 awakenBlockedExceptionQueue(cap,tso);
529 return 1;
530 } else {
531 return 0;
532 }
533 }
534
535 if (tso->blocked_exceptions != END_TSO_QUEUE &&
536 (tso->flags & TSO_BLOCKEX) != 0) {
537 debugTrace(DEBUG_sched, "throwTo: thread %lu has blocked exceptions but is inside block", (unsigned long)tso->id);
538 }
539
540 if (tso->blocked_exceptions != END_TSO_QUEUE
541 && ((tso->flags & TSO_BLOCKEX) == 0
542 || ((tso->flags & TSO_INTERRUPTIBLE) && interruptible(tso)))) {
543
544 // Lock the TSO, this gives us exclusive access to the queue
545 lockTSO(tso);
546
547 // Check the queue again; it might have changed before we
548 // locked it.
549 if (tso->blocked_exceptions == END_TSO_QUEUE) {
550 unlockTSO(tso);
551 return 0;
552 }
553
554 // We unblock just the first thread on the queue, and perform
555 // its throw immediately.
556 source = tso->blocked_exceptions;
557 performBlockedException(cap, source, tso);
558 tso->blocked_exceptions = unblockOne_(cap, source,
559 rtsFalse/*no migrate*/);
560 unlockTSO(tso);
561 return 1;
562 }
563 return 0;
564 }
565
566 // awakenBlockedExceptionQueue(): Just wake up the whole queue of
567 // blocked exceptions and let them try again.
568
569 void
570 awakenBlockedExceptionQueue (Capability *cap, StgTSO *tso)
571 {
572 lockTSO(tso);
573 awakenBlockedQueue(cap, tso->blocked_exceptions);
574 tso->blocked_exceptions = END_TSO_QUEUE;
575 unlockTSO(tso);
576 }
577
578 static void
579 performBlockedException (Capability *cap, StgTSO *source, StgTSO *target)
580 {
581 StgClosure *exception;
582
583 ASSERT(source->why_blocked == BlockedOnException);
584 ASSERT(source->block_info.tso->id == target->id);
585 ASSERT(source->sp[0] == (StgWord)&stg_block_throwto_info);
586 ASSERT(((StgTSO *)source->sp[1])->id == target->id);
587 // check ids not pointers, because the thread might be relocated
588
589 exception = (StgClosure *)source->sp[2];
590 throwToSingleThreaded(cap, target, exception);
591 source->sp += 3;
592 }
593
594 /* -----------------------------------------------------------------------------
595 Remove a thread from blocking queues.
596
597 This is for use when we raise an exception in another thread, which
598 may be blocked.
599
600 Precondition: we have exclusive access to the TSO, via the same set
601 of conditions as throwToSingleThreaded() (c.f.).
602 -------------------------------------------------------------------------- */
603
604 static void
605 removeFromQueues(Capability *cap, StgTSO *tso)
606 {
607 switch (tso->why_blocked) {
608
609 case NotBlocked:
610 return;
611
612 case BlockedOnSTM:
613 // Be careful: nothing to do here! We tell the scheduler that the
614 // thread is runnable and we leave it to the stack-walking code to
615 // abort the transaction while unwinding the stack. We should
616 // perhaps have a debugging test to make sure that this really
617 // happens and that the 'zombie' transaction does not get
618 // committed.
619 goto done;
620
621 case BlockedOnMVar:
622 removeThreadFromMVarQueue(cap, (StgMVar *)tso->block_info.closure, tso);
623 goto done;
624
625 case BlockedOnBlackHole:
626 removeThreadFromQueue(cap, &blackhole_queue, tso);
627 goto done;
628
629 case BlockedOnException:
630 {
631 StgTSO *target = tso->block_info.tso;
632
633 // NO: when called by threadPaused(), we probably have this
634 // TSO already locked (WHITEHOLEd) because we just placed
635 // ourselves on its queue.
636 // ASSERT(get_itbl(target)->type == TSO);
637
638 while (target->what_next == ThreadRelocated) {
639 target = target->_link;
640 }
641
642 removeThreadFromQueue(cap, &target->blocked_exceptions, tso);
643 goto done;
644 }
645
646 #if !defined(THREADED_RTS)
647 case BlockedOnRead:
648 case BlockedOnWrite:
649 #if defined(mingw32_HOST_OS)
650 case BlockedOnDoProc:
651 #endif
652 removeThreadFromDeQueue(cap, &blocked_queue_hd, &blocked_queue_tl, tso);
653 #if defined(mingw32_HOST_OS)
654 /* (Cooperatively) signal that the worker thread should abort
655 * the request.
656 */
657 abandonWorkRequest(tso->block_info.async_result->reqID);
658 #endif
659 goto done;
660
661 case BlockedOnDelay:
662 removeThreadFromQueue(cap, &sleeping_queue, tso);
663 goto done;
664 #endif
665
666 default:
667 barf("removeFromQueues: %d", tso->why_blocked);
668 }
669
670 done:
671 unblockOne(cap, tso);
672 }
673
674 /* -----------------------------------------------------------------------------
675 * raiseAsync()
676 *
677 * The following function implements the magic for raising an
678 * asynchronous exception in an existing thread.
679 *
680 * We first remove the thread from any queue on which it might be
681 * blocked. The possible blockages are MVARs and BLACKHOLE_BQs.
682 *
683 * We strip the stack down to the innermost CATCH_FRAME, building
684 * thunks in the heap for all the active computations, so they can
685 * be restarted if necessary. When we reach a CATCH_FRAME, we build
686 * an application of the handler to the exception, and push it on
687 * the top of the stack.
688 *
689 * How exactly do we save all the active computations? We create an
690 * AP_STACK for every UpdateFrame on the stack. Entering one of these
691 * AP_STACKs pushes everything from the corresponding update frame
692 * upwards onto the stack. (Actually, it pushes everything up to the
693 * next update frame plus a pointer to the next AP_STACK object.
694 * Entering the next AP_STACK object pushes more onto the stack until we
695 * reach the last AP_STACK object - at which point the stack should look
696 * exactly as it did when we killed the TSO and we can continue
697 * execution by entering the closure on top of the stack.
698 *
699 * We can also kill a thread entirely - this happens if either (a) the
700 * exception passed to raiseAsync is NULL, or (b) there's no
701 * CATCH_FRAME on the stack. In either case, we strip the entire
702 * stack and replace the thread with a zombie.
703 *
704 * ToDo: in THREADED_RTS mode, this function is only safe if either
705 * (a) we hold all the Capabilities (eg. in GC, or if there is only
706 * one Capability), or (b) we own the Capability that the TSO is
707 * currently blocked on or on the run queue of.
708 *
709 * -------------------------------------------------------------------------- */
710
711 static void
712 raiseAsync(Capability *cap, StgTSO *tso, StgClosure *exception,
713 rtsBool stop_at_atomically, StgUpdateFrame *stop_here)
714 {
715 StgRetInfoTable *info;
716 StgPtr sp, frame;
717 StgClosure *updatee;
718 nat i;
719
720 debugTrace(DEBUG_sched,
721 "raising exception in thread %ld.", (long)tso->id);
722
723 #if defined(PROFILING)
724 /*
725 * Debugging tool: on raising an exception, show where we are.
726 * See also Exception.cmm:stg_raisezh.
727 * This wasn't done for asynchronous exceptions originally; see #1450
728 */
729 if (RtsFlags.ProfFlags.showCCSOnException)
730 {
731 fprintCCS_stderr(tso->prof.CCCS);
732 }
733 #endif
734
735 // mark it dirty; we're about to change its stack.
736 dirty_TSO(cap, tso);
737
738 sp = tso->sp;
739
740 // ASSUMES: the thread is not already complete or dead. Upper
741 // layers should deal with that.
742 ASSERT(tso->what_next != ThreadComplete && tso->what_next != ThreadKilled);
743
744 if (stop_here != NULL) {
745 updatee = stop_here->updatee;
746 } else {
747 updatee = NULL;
748 }
749
750 // The stack freezing code assumes there's a closure pointer on
751 // the top of the stack, so we have to arrange that this is the case...
752 //
753 if (sp[0] == (W_)&stg_enter_info) {
754 sp++;
755 } else {
756 sp--;
757 sp[0] = (W_)&stg_dummy_ret_closure;
758 }
759
760 frame = sp + 1;
761 while (stop_here == NULL || frame < (StgPtr)stop_here) {
762
763 // 1. Let the top of the stack be the "current closure"
764 //
765 // 2. Walk up the stack until we find either an UPDATE_FRAME or a
766 // CATCH_FRAME.
767 //
768 // 3. If it's an UPDATE_FRAME, then make an AP_STACK containing the
769 // current closure applied to the chunk of stack up to (but not
770 // including) the update frame. This closure becomes the "current
771 // closure". Go back to step 2.
772 //
773 // 4. If it's a CATCH_FRAME, then leave the exception handler on
774 // top of the stack applied to the exception.
775 //
776 // 5. If it's a STOP_FRAME, then kill the thread.
777 //
778 // NB: if we pass an ATOMICALLY_FRAME then abort the associated
779 // transaction
780
781 info = get_ret_itbl((StgClosure *)frame);
782
783 switch (info->i.type) {
784
785 case UPDATE_FRAME:
786 {
787 StgAP_STACK * ap;
788 nat words;
789
790 // First build an AP_STACK consisting of the stack chunk above the
791 // current update frame, with the top word on the stack as the
792 // fun field.
793 //
794 words = frame - sp - 1;
795 ap = (StgAP_STACK *)allocate(cap,AP_STACK_sizeW(words));
796
797 ap->size = words;
798 ap->fun = (StgClosure *)sp[0];
799 sp++;
800 for(i=0; i < (nat)words; ++i) {
801 ap->payload[i] = (StgClosure *)*sp++;
802 }
803
804 SET_HDR(ap,&stg_AP_STACK_info,
805 ((StgClosure *)frame)->header.prof.ccs /* ToDo */);
806 TICK_ALLOC_UP_THK(words+1,0);
807
808 //IF_DEBUG(scheduler,
809 // debugBelch("sched: Updating ");
810 // printPtr((P_)((StgUpdateFrame *)frame)->updatee);
811 // debugBelch(" with ");
812 // printObj((StgClosure *)ap);
813 // );
814
815 if (((StgUpdateFrame *)frame)->updatee == updatee) {
816 // If this update frame points to the same closure as
817 // the update frame further down the stack
818 // (stop_here), then don't perform the update. We
819 // want to keep the blackhole in this case, so we can
820 // detect and report the loop (#2783).
821 ap = (StgAP_STACK*)updatee;
822 } else {
823 // Perform the update
824 // TODO: this may waste some work, if the thunk has
825 // already been updated by another thread.
826 UPD_IND(((StgUpdateFrame *)frame)->updatee, (StgClosure *)ap);
827 }
828
829 sp += sizeofW(StgUpdateFrame) - 1;
830 sp[0] = (W_)ap; // push onto stack
831 frame = sp + 1;
832 continue; //no need to bump frame
833 }
834
835 case STOP_FRAME:
836 {
837 // We've stripped the entire stack, the thread is now dead.
838 tso->what_next = ThreadKilled;
839 tso->sp = frame + sizeofW(StgStopFrame);
840 return;
841 }
842
843 case CATCH_FRAME:
844 // If we find a CATCH_FRAME, and we've got an exception to raise,
845 // then build the THUNK raise(exception), and leave it on
846 // top of the CATCH_FRAME ready to enter.
847 //
848 {
849 #ifdef PROFILING
850 StgCatchFrame *cf = (StgCatchFrame *)frame;
851 #endif
852 StgThunk *raise;
853
854 if (exception == NULL) break;
855
856 // we've got an exception to raise, so let's pass it to the
857 // handler in this frame.
858 //
859 raise = (StgThunk *)allocate(cap,sizeofW(StgThunk)+1);
860 TICK_ALLOC_SE_THK(1,0);
861 SET_HDR(raise,&stg_raise_info,cf->header.prof.ccs);
862 raise->payload[0] = exception;
863
864 // throw away the stack from Sp up to the CATCH_FRAME.
865 //
866 sp = frame - 1;
867
868 /* Ensure that async excpetions are blocked now, so we don't get
869 * a surprise exception before we get around to executing the
870 * handler.
871 */
872 tso->flags |= TSO_BLOCKEX | TSO_INTERRUPTIBLE;
873
874 /* Put the newly-built THUNK on top of the stack, ready to execute
875 * when the thread restarts.
876 */
877 sp[0] = (W_)raise;
878 sp[-1] = (W_)&stg_enter_info;
879 tso->sp = sp-1;
880 tso->what_next = ThreadRunGHC;
881 IF_DEBUG(sanity, checkTSO(tso));
882 return;
883 }
884
885 case ATOMICALLY_FRAME:
886 if (stop_at_atomically) {
887 ASSERT(tso->trec->enclosing_trec == NO_TREC);
888 stmCondemnTransaction(cap, tso -> trec);
889 tso->sp = frame - 2;
890 // The ATOMICALLY_FRAME expects to be returned a
891 // result from the transaction, which it stores in the
892 // stack frame. Hence we arrange to return a dummy
893 // result, so that the GC doesn't get upset (#3578).
894 // Perhaps a better way would be to have a different
895 // ATOMICALLY_FRAME instance for condemned
896 // transactions, but I don't fully understand the
897 // interaction with STM invariants.
898 tso->sp[1] = (W_)&stg_NO_TREC_closure;
899 tso->sp[0] = (W_)&stg_gc_unpt_r1_info;
900 tso->what_next = ThreadRunGHC;
901 return;
902 }
903 // Not stop_at_atomically... fall through and abort the
904 // transaction.
905
906 case CATCH_STM_FRAME:
907 case CATCH_RETRY_FRAME:
908 // IF we find an ATOMICALLY_FRAME then we abort the
909 // current transaction and propagate the exception. In
910 // this case (unlike ordinary exceptions) we do not care
911 // whether the transaction is valid or not because its
912 // possible validity cannot have caused the exception
913 // and will not be visible after the abort.
914
915 {
916 StgTRecHeader *trec = tso -> trec;
917 StgTRecHeader *outer = trec -> enclosing_trec;
918 debugTrace(DEBUG_stm,
919 "found atomically block delivering async exception");
920 stmAbortTransaction(cap, trec);
921 stmFreeAbortedTRec(cap, trec);
922 tso -> trec = outer;
923 break;
924 };
925
926 default:
927 break;
928 }
929
930 // move on to the next stack frame
931 frame += stack_frame_sizeW((StgClosure *)frame);
932 }
933
934 // if we got here, then we stopped at stop_here
935 ASSERT(stop_here != NULL);
936 }
937
938