Pull recent Hadrian changes from upstream
[ghc.git] / rts / RaiseAsync.c
1 /* ---------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 1998-2006
4 *
5 * Asynchronous exceptions
6 *
7 * --------------------------------------------------------------------------*/
8
9 #include "PosixSource.h"
10 #include "Rts.h"
11
12 #include "sm/Storage.h"
13 #include "Threads.h"
14 #include "Trace.h"
15 #include "RaiseAsync.h"
16 #include "Schedule.h"
17 #include "Updates.h"
18 #include "STM.h"
19 #include "sm/Sanity.h"
20 #include "Profiling.h"
21 #include "Messages.h"
22 #if defined(mingw32_HOST_OS)
23 #include "win32/IOManager.h"
24 #endif
25
26 static void blockedThrowTo (Capability *cap,
27 StgTSO *target, MessageThrowTo *msg);
28
29 static void removeFromQueues(Capability *cap, StgTSO *tso);
30
31 static void removeFromMVarBlockedQueue (StgTSO *tso);
32
33 static void throwToSendMsg (Capability *cap USED_IF_THREADS,
34 Capability *target_cap USED_IF_THREADS,
35 MessageThrowTo *msg USED_IF_THREADS);
36
37 /* -----------------------------------------------------------------------------
38 throwToSingleThreaded
39
40 This version of throwTo is safe to use if and only if one of the
41 following holds:
42
43 - !THREADED_RTS
44
45 - all the other threads in the system are stopped (eg. during GC).
46
47 - we surely own the target TSO (eg. we just took it from the
48 run queue of the current capability, or we are running it).
49
50 It doesn't cater for blocking the source thread until the exception
51 has been raised.
52 -------------------------------------------------------------------------- */
53
54 static void
55 throwToSingleThreaded__ (Capability *cap, StgTSO *tso, StgClosure *exception,
56 bool stop_at_atomically, StgUpdateFrame *stop_here)
57 {
58 // Thread already dead?
59 if (tso->what_next == ThreadComplete || tso->what_next == ThreadKilled) {
60 return;
61 }
62
63 // Remove it from any blocking queues
64 removeFromQueues(cap,tso);
65
66 raiseAsync(cap, tso, exception, stop_at_atomically, stop_here);
67 }
68
69 void
70 throwToSingleThreaded (Capability *cap, StgTSO *tso, StgClosure *exception)
71 {
72 throwToSingleThreaded__(cap, tso, exception, false, NULL);
73 }
74
75 void
76 throwToSingleThreaded_ (Capability *cap, StgTSO *tso, StgClosure *exception,
77 bool stop_at_atomically)
78 {
79 throwToSingleThreaded__ (cap, tso, exception, stop_at_atomically, NULL);
80 }
81
82 void // cannot return a different TSO
83 suspendComputation (Capability *cap, StgTSO *tso, StgUpdateFrame *stop_here)
84 {
85 throwToSingleThreaded__ (cap, tso, NULL, false, stop_here);
86 }
87
88 /* -----------------------------------------------------------------------------
89 throwToSelf
90
91 Useful for throwing an async exception in a thread from the
92 runtime. It handles unlocking the throwto message returned by
93 throwTo().
94
95 Note [Throw to self when masked]
96
97 When a StackOverflow occurs when the thread is masked, we want to
98 defer the exception to when the thread becomes unmasked/hits an
99 interruptible point. We already have a mechanism for doing this,
100 the blocked_exceptions list, but the use here is a bit unusual,
101 because an exception is normally only added to this list upon
102 an asynchronous 'throwTo' call (with all of the relevant
103 multithreaded nonsense). Morally, a stack overflow should be an
104 asynchronous exception sent by a thread to itself, and it should
105 have the same semantics. But there are a few key differences:
106
107 - If you actually tried to send an asynchronous exception to
108 yourself using throwTo, the exception would actually immediately
109 be delivered. This is because throwTo itself is considered an
110 interruptible point, so the exception is always deliverable. Thus,
111 ordinarily, we never end up with a message to oneself in the
112 blocked_exceptions queue.
113
114 - In the case of a StackOverflow, we don't actually care about the
115 wakeup semantics; when an exception is delivered, the thread that
116 originally threw the exception should be woken up, since throwTo
117 blocks until the exception is successfully thrown. Fortunately,
118 it is harmless to wakeup a thread that doesn't actually need waking
119 up, e.g. ourselves.
120
121 - No synchronization is necessary, because we own the TSO and the
122 capability. You can observe this by tracing through the execution
123 of throwTo. We skip synchronizing the message and inter-capability
124 communication.
125
126 We think this doesn't break any invariants, but do be careful!
127 -------------------------------------------------------------------------- */
128
129 void
130 throwToSelf (Capability *cap, StgTSO *tso, StgClosure *exception)
131 {
132 MessageThrowTo *m;
133
134 m = throwTo(cap, tso, tso, exception);
135
136 if (m != NULL) {
137 // throwTo leaves it locked
138 unlockClosure((StgClosure*)m, &stg_MSG_THROWTO_info);
139 }
140 }
141
142 /* -----------------------------------------------------------------------------
143 throwTo
144
145 This function may be used to throw an exception from one thread to
146 another, during the course of normal execution. This is a tricky
147 task: the target thread might be running on another CPU, or it
148 may be blocked and could be woken up at any point by another CPU.
149 We have some delicate synchronisation to do.
150
151 The underlying scheme when multiple Capabilities are in use is
152 message passing: when the target of a throwTo is on another
153 Capability, we send a message (a MessageThrowTo closure) to that
154 Capability.
155
156 If the throwTo needs to block because the target TSO is masking
157 exceptions (the TSO_BLOCKEX flag), then the message is placed on
158 the blocked_exceptions queue attached to the target TSO. When the
159 target TSO enters the unmasked state again, it must check the
160 queue. The blocked_exceptions queue is not locked; only the
161 Capability owning the TSO may modify it.
162
163 To make things simpler for throwTo, we always create the message
164 first before deciding what to do. The message may get sent, or it
165 may get attached to a TSO's blocked_exceptions queue, or the
166 exception may get thrown immediately and the message dropped,
167 depending on the current state of the target.
168
169 Currently we send a message if the target belongs to another
170 Capability, and it is
171
172 - NotBlocked, BlockedOnMsgThrowTo,
173 BlockedOnCCall_Interruptible
174
175 - or it is masking exceptions (TSO_BLOCKEX)
176
177 Currently, if the target is BlockedOnMVar, BlockedOnSTM, or
178 BlockedOnBlackHole then we acquire ownership of the TSO by locking
179 its parent container (e.g. the MVar) and then raise the exception.
180 We might change these cases to be more message-passing-like in the
181 future.
182
183 Returns:
184
185 NULL exception was raised, ok to continue
186
187 MessageThrowTo * exception was not raised; the source TSO
188 should now put itself in the state
189 BlockedOnMsgThrowTo, and when it is ready
190 it should unlock the mssage using
191 unlockClosure(msg, &stg_MSG_THROWTO_info);
192 If it decides not to raise the exception after
193 all, it can revoke it safely with
194 unlockClosure(msg, &stg_MSG_NULL_info);
195
196 -------------------------------------------------------------------------- */
197
198 MessageThrowTo *
199 throwTo (Capability *cap, // the Capability we hold
200 StgTSO *source, // the TSO sending the exception (or NULL)
201 StgTSO *target, // the TSO receiving the exception
202 StgClosure *exception) // the exception closure
203 {
204 MessageThrowTo *msg;
205
206 msg = (MessageThrowTo *) allocate(cap, sizeofW(MessageThrowTo));
207 // the message starts locked; see below
208 SET_HDR(msg, &stg_WHITEHOLE_info, CCS_SYSTEM);
209 msg->source = source;
210 msg->target = target;
211 msg->exception = exception;
212
213 switch (throwToMsg(cap, msg))
214 {
215 case THROWTO_SUCCESS:
216 // unlock the message now, otherwise we leave a WHITEHOLE in
217 // the heap (#6103)
218 SET_HDR(msg, &stg_MSG_THROWTO_info, CCS_SYSTEM);
219 return NULL;
220
221 case THROWTO_BLOCKED:
222 default:
223 // the caller will unlock the message when it is ready. We
224 // cannot unlock it yet, because the calling thread will need
225 // to tidy up its state first.
226 return msg;
227 }
228 }
229
230
231 uint32_t
232 throwToMsg (Capability *cap, MessageThrowTo *msg)
233 {
234 StgWord status;
235 StgTSO *target = msg->target;
236 Capability *target_cap;
237
238 goto check_target;
239
240 retry:
241 write_barrier();
242 debugTrace(DEBUG_sched, "throwTo: retrying...");
243
244 check_target:
245 ASSERT(target != END_TSO_QUEUE);
246
247 // Thread already dead?
248 if (target->what_next == ThreadComplete
249 || target->what_next == ThreadKilled) {
250 return THROWTO_SUCCESS;
251 }
252
253 debugTraceCap(DEBUG_sched, cap,
254 "throwTo: from thread %lu to thread %lu",
255 (unsigned long)msg->source->id,
256 (unsigned long)msg->target->id);
257
258 #if defined(DEBUG)
259 traceThreadStatus(DEBUG_sched, target);
260 #endif
261
262 target_cap = target->cap;
263 if (target->cap != cap) {
264 throwToSendMsg(cap, target_cap, msg);
265 return THROWTO_BLOCKED;
266 }
267
268 status = target->why_blocked;
269
270 switch (status) {
271 case NotBlocked:
272 {
273 if ((target->flags & TSO_BLOCKEX) == 0) {
274 // It's on our run queue and not blocking exceptions
275 raiseAsync(cap, target, msg->exception, false, NULL);
276 return THROWTO_SUCCESS;
277 } else {
278 blockedThrowTo(cap,target,msg);
279 return THROWTO_BLOCKED;
280 }
281 }
282
283 case BlockedOnMsgThrowTo:
284 {
285 const StgInfoTable *i;
286 MessageThrowTo *m;
287
288 m = target->block_info.throwto;
289
290 // target is local to this cap, but has sent a throwto
291 // message to another cap.
292 //
293 // The source message is locked. We need to revoke the
294 // target's message so that we can raise the exception, so
295 // we attempt to lock it.
296
297 // There's a possibility of a deadlock if two threads are both
298 // trying to throwTo each other (or more generally, a cycle of
299 // threads). To break the symmetry we compare the addresses
300 // of the MessageThrowTo objects, and the one for which m <
301 // msg gets to spin, while the other can only try to lock
302 // once, but must then back off and unlock both before trying
303 // again.
304 if (m < msg) {
305 i = lockClosure((StgClosure *)m);
306 } else {
307 i = tryLockClosure((StgClosure *)m);
308 if (i == NULL) {
309 // debugBelch("collision\n");
310 throwToSendMsg(cap, target->cap, msg);
311 return THROWTO_BLOCKED;
312 }
313 }
314
315 if (i == &stg_MSG_NULL_info) {
316 // we know there's a MSG_TRY_WAKEUP on the way, so we
317 // might as well just do it now. The message will
318 // be a no-op when it arrives.
319 unlockClosure((StgClosure*)m, i);
320 tryWakeupThread(cap, target);
321 goto retry;
322 }
323
324 if (i != &stg_MSG_THROWTO_info) {
325 // if it's a MSG_NULL, this TSO has been woken up by another Cap
326 unlockClosure((StgClosure*)m, i);
327 goto retry;
328 }
329
330 if ((target->flags & TSO_BLOCKEX) &&
331 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
332 unlockClosure((StgClosure*)m, i);
333 blockedThrowTo(cap,target,msg);
334 return THROWTO_BLOCKED;
335 }
336
337 // nobody else can wake up this TSO after we claim the message
338 doneWithMsgThrowTo(m);
339
340 raiseAsync(cap, target, msg->exception, false, NULL);
341 return THROWTO_SUCCESS;
342 }
343
344 case BlockedOnMVar:
345 case BlockedOnMVarRead:
346 {
347 /*
348 To establish ownership of this TSO, we need to acquire a
349 lock on the MVar that it is blocked on.
350 */
351 StgMVar *mvar;
352 StgInfoTable *info USED_IF_THREADS;
353
354 mvar = (StgMVar *)target->block_info.closure;
355
356 // ASSUMPTION: tso->block_info must always point to a
357 // closure. In the threaded RTS it does.
358 switch (get_itbl((StgClosure *)mvar)->type) {
359 case MVAR_CLEAN:
360 case MVAR_DIRTY:
361 break;
362 default:
363 goto retry;
364 }
365
366 info = lockClosure((StgClosure *)mvar);
367
368 // we have the MVar, let's check whether the thread
369 // is still blocked on the same MVar.
370 if ((target->why_blocked != BlockedOnMVar && target->why_blocked != BlockedOnMVarRead)
371 || (StgMVar *)target->block_info.closure != mvar) {
372 unlockClosure((StgClosure *)mvar, info);
373 goto retry;
374 }
375
376 if (target->_link == END_TSO_QUEUE) {
377 // the MVar operation has already completed. There is a
378 // MSG_TRY_WAKEUP on the way, but we can just wake up the
379 // thread now anyway and ignore the message when it
380 // arrives.
381 unlockClosure((StgClosure *)mvar, info);
382 tryWakeupThread(cap, target);
383 goto retry;
384 }
385
386 if ((target->flags & TSO_BLOCKEX) &&
387 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
388 blockedThrowTo(cap,target,msg);
389 unlockClosure((StgClosure *)mvar, info);
390 return THROWTO_BLOCKED;
391 } else {
392 // revoke the MVar operation
393 removeFromMVarBlockedQueue(target);
394 raiseAsync(cap, target, msg->exception, false, NULL);
395 unlockClosure((StgClosure *)mvar, info);
396 return THROWTO_SUCCESS;
397 }
398 }
399
400 case BlockedOnBlackHole:
401 {
402 if (target->flags & TSO_BLOCKEX) {
403 // BlockedOnBlackHole is not interruptible.
404 blockedThrowTo(cap,target,msg);
405 return THROWTO_BLOCKED;
406 } else {
407 // Revoke the message by replacing it with IND. We're not
408 // locking anything here, so we might still get a TRY_WAKEUP
409 // message from the owner of the blackhole some time in the
410 // future, but that doesn't matter.
411 ASSERT(target->block_info.bh->header.info == &stg_MSG_BLACKHOLE_info);
412 OVERWRITE_INFO(target->block_info.bh, &stg_IND_info);
413 raiseAsync(cap, target, msg->exception, false, NULL);
414 return THROWTO_SUCCESS;
415 }
416 }
417
418 case BlockedOnSTM:
419 lockTSO(target);
420 // Unblocking BlockedOnSTM threads requires the TSO to be
421 // locked; see STM.c:unpark_tso().
422 if (target->why_blocked != BlockedOnSTM) {
423 unlockTSO(target);
424 goto retry;
425 }
426 if ((target->flags & TSO_BLOCKEX) &&
427 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
428 blockedThrowTo(cap,target,msg);
429 unlockTSO(target);
430 return THROWTO_BLOCKED;
431 } else {
432 raiseAsync(cap, target, msg->exception, false, NULL);
433 unlockTSO(target);
434 return THROWTO_SUCCESS;
435 }
436
437 case BlockedOnCCall_Interruptible:
438 #if defined(THREADED_RTS)
439 {
440 Task *task = NULL;
441 // walk suspended_ccalls to find the correct worker thread
442 InCall *incall;
443 for (incall = cap->suspended_ccalls; incall != NULL; incall = incall->next) {
444 if (incall->suspended_tso == target) {
445 task = incall->task;
446 break;
447 }
448 }
449 if (task != NULL) {
450 blockedThrowTo(cap, target, msg);
451 if (!((target->flags & TSO_BLOCKEX) &&
452 ((target->flags & TSO_INTERRUPTIBLE) == 0))) {
453 interruptWorkerTask(task);
454 }
455 return THROWTO_BLOCKED;
456 } else {
457 debugTraceCap(DEBUG_sched, cap, "throwTo: could not find worker thread to kill");
458 }
459 // fall to next
460 }
461 #endif
462 /* fallthrough */
463 case BlockedOnCCall:
464 blockedThrowTo(cap,target,msg);
465 return THROWTO_BLOCKED;
466
467 #if !defined(THREADEDED_RTS)
468 case BlockedOnRead:
469 case BlockedOnWrite:
470 case BlockedOnDelay:
471 #if defined(mingw32_HOST_OS)
472 case BlockedOnDoProc:
473 #endif
474 if ((target->flags & TSO_BLOCKEX) &&
475 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
476 blockedThrowTo(cap,target,msg);
477 return THROWTO_BLOCKED;
478 } else {
479 removeFromQueues(cap,target);
480 raiseAsync(cap, target, msg->exception, false, NULL);
481 return THROWTO_SUCCESS;
482 }
483 #endif
484
485 case ThreadMigrating:
486 // if it is ThreadMigrating and tso->cap is ours, then it
487 // *must* be migrating *to* this capability. If it were
488 // migrating away from the capability, then tso->cap would
489 // point to the destination.
490 //
491 // There is a MSG_WAKEUP in the message queue for this thread,
492 // but we can just do it preemptively:
493 tryWakeupThread(cap, target);
494 // and now retry, the thread should be runnable.
495 goto retry;
496
497 default:
498 barf("throwTo: unrecognised why_blocked (%d)", target->why_blocked);
499 }
500 barf("throwTo");
501 }
502
503 static void
504 throwToSendMsg (Capability *cap STG_UNUSED,
505 Capability *target_cap USED_IF_THREADS,
506 MessageThrowTo *msg USED_IF_THREADS)
507
508 {
509 #if defined(THREADED_RTS)
510 debugTraceCap(DEBUG_sched, cap, "throwTo: sending a throwto message to cap %lu", (unsigned long)target_cap->no);
511
512 sendMessage(cap, target_cap, (Message*)msg);
513 #endif
514 }
515
516 // Block a throwTo message on the target TSO's blocked_exceptions
517 // queue. The current Capability must own the target TSO in order to
518 // modify the blocked_exceptions queue.
519 void
520 blockedThrowTo (Capability *cap, StgTSO *target, MessageThrowTo *msg)
521 {
522 debugTraceCap(DEBUG_sched, cap, "throwTo: blocking on thread %lu",
523 (unsigned long)target->id);
524
525 ASSERT(target->cap == cap);
526
527 msg->link = target->blocked_exceptions;
528 target->blocked_exceptions = msg;
529 dirty_TSO(cap,target); // we modified the blocked_exceptions queue
530 }
531
532 /* -----------------------------------------------------------------------------
533 Waking up threads blocked in throwTo
534
535 There are two ways to do this: maybePerformBlockedException() will
536 perform the throwTo() for the thread at the head of the queue
537 immediately, and leave the other threads on the queue.
538 maybePerformBlockedException() also checks the TSO_BLOCKEX flag
539 before raising an exception.
540
541 awakenBlockedExceptionQueue() will wake up all the threads in the
542 queue, but not perform any throwTo() immediately. This might be
543 more appropriate when the target thread is the one actually running
544 (see Exception.cmm).
545
546 Returns: non-zero if an exception was raised, zero otherwise.
547 -------------------------------------------------------------------------- */
548
549 int
550 maybePerformBlockedException (Capability *cap, StgTSO *tso)
551 {
552 MessageThrowTo *msg;
553 const StgInfoTable *i;
554 StgTSO *source;
555
556 if (tso->what_next == ThreadComplete || tso->what_next == ThreadFinished) {
557 if (tso->blocked_exceptions != END_BLOCKED_EXCEPTIONS_QUEUE) {
558 awakenBlockedExceptionQueue(cap,tso);
559 return 1;
560 } else {
561 return 0;
562 }
563 }
564
565 if (tso->blocked_exceptions != END_BLOCKED_EXCEPTIONS_QUEUE &&
566 (tso->flags & TSO_BLOCKEX) != 0) {
567 debugTraceCap(DEBUG_sched, cap, "throwTo: thread %lu has blocked exceptions but is inside block", (unsigned long)tso->id);
568 }
569
570 if (tso->blocked_exceptions != END_BLOCKED_EXCEPTIONS_QUEUE
571 && ((tso->flags & TSO_BLOCKEX) == 0
572 || ((tso->flags & TSO_INTERRUPTIBLE) && interruptible(tso)))) {
573
574 // We unblock just the first thread on the queue, and perform
575 // its throw immediately.
576 loop:
577 msg = tso->blocked_exceptions;
578 if (msg == END_BLOCKED_EXCEPTIONS_QUEUE) return 0;
579 i = lockClosure((StgClosure*)msg);
580 tso->blocked_exceptions = (MessageThrowTo*)msg->link;
581 if (i == &stg_MSG_NULL_info) {
582 unlockClosure((StgClosure*)msg,i);
583 goto loop;
584 }
585
586 throwToSingleThreaded(cap, msg->target, msg->exception);
587 source = msg->source;
588 doneWithMsgThrowTo(msg);
589 tryWakeupThread(cap, source);
590 return 1;
591 }
592 return 0;
593 }
594
595 // awakenBlockedExceptionQueue(): Just wake up the whole queue of
596 // blocked exceptions.
597
598 void
599 awakenBlockedExceptionQueue (Capability *cap, StgTSO *tso)
600 {
601 MessageThrowTo *msg;
602 const StgInfoTable *i;
603 StgTSO *source;
604
605 for (msg = tso->blocked_exceptions; msg != END_BLOCKED_EXCEPTIONS_QUEUE;
606 msg = (MessageThrowTo*)msg->link) {
607 i = lockClosure((StgClosure *)msg);
608 if (i != &stg_MSG_NULL_info) {
609 source = msg->source;
610 doneWithMsgThrowTo(msg);
611 tryWakeupThread(cap, source);
612 } else {
613 unlockClosure((StgClosure *)msg,i);
614 }
615 }
616 tso->blocked_exceptions = END_BLOCKED_EXCEPTIONS_QUEUE;
617 }
618
619 /* -----------------------------------------------------------------------------
620 Remove a thread from blocking queues.
621
622 This is for use when we raise an exception in another thread, which
623 may be blocked.
624
625 Precondition: we have exclusive access to the TSO, via the same set
626 of conditions as throwToSingleThreaded() (c.f.).
627 -------------------------------------------------------------------------- */
628
629 static void
630 removeFromMVarBlockedQueue (StgTSO *tso)
631 {
632 StgMVar *mvar = (StgMVar*)tso->block_info.closure;
633 StgMVarTSOQueue *q = (StgMVarTSOQueue*)tso->_link;
634
635 if (q == (StgMVarTSOQueue*)END_TSO_QUEUE) {
636 // already removed from this MVar
637 return;
638 }
639
640 // Assume the MVar is locked. (not assertable; sometimes it isn't
641 // actually WHITEHOLE'd).
642
643 // We want to remove the MVAR_TSO_QUEUE object from the queue. It
644 // isn't doubly-linked so we can't actually remove it; instead we
645 // just overwrite it with an IND if possible and let the GC short
646 // it out. However, we have to be careful to maintain the deque
647 // structure:
648
649 if (mvar->head == q) {
650 mvar->head = q->link;
651 OVERWRITE_INFO(q, &stg_IND_info);
652 if (mvar->tail == q) {
653 mvar->tail = (StgMVarTSOQueue*)END_TSO_QUEUE;
654 }
655 }
656 else if (mvar->tail == q) {
657 // we can't replace it with an IND in this case, because then
658 // we lose the tail pointer when the GC shorts out the IND.
659 // So we use MSG_NULL as a kind of non-dupable indirection;
660 // these are ignored by takeMVar/putMVar.
661 OVERWRITE_INFO(q, &stg_MSG_NULL_info);
662 }
663 else {
664 OVERWRITE_INFO(q, &stg_IND_info);
665 }
666
667 // revoke the MVar operation
668 tso->_link = END_TSO_QUEUE;
669 }
670
671 static void
672 removeFromQueues(Capability *cap, StgTSO *tso)
673 {
674 switch (tso->why_blocked) {
675
676 case NotBlocked:
677 case ThreadMigrating:
678 return;
679
680 case BlockedOnSTM:
681 // Be careful: nothing to do here! We tell the scheduler that the
682 // thread is runnable and we leave it to the stack-walking code to
683 // abort the transaction while unwinding the stack. We should
684 // perhaps have a debugging test to make sure that this really
685 // happens and that the 'zombie' transaction does not get
686 // committed.
687 goto done;
688
689 case BlockedOnMVar:
690 case BlockedOnMVarRead:
691 removeFromMVarBlockedQueue(tso);
692 goto done;
693
694 case BlockedOnBlackHole:
695 // nothing to do
696 goto done;
697
698 case BlockedOnMsgThrowTo:
699 {
700 MessageThrowTo *m = tso->block_info.throwto;
701 // The message is locked by us, unless we got here via
702 // deleteAllThreads(), in which case we own all the
703 // capabilities.
704 // ASSERT(m->header.info == &stg_WHITEHOLE_info);
705
706 // unlock and revoke it at the same time
707 doneWithMsgThrowTo(m);
708 break;
709 }
710
711 #if !defined(THREADED_RTS)
712 case BlockedOnRead:
713 case BlockedOnWrite:
714 #if defined(mingw32_HOST_OS)
715 case BlockedOnDoProc:
716 #endif
717 removeThreadFromDeQueue(cap, &blocked_queue_hd, &blocked_queue_tl, tso);
718 #if defined(mingw32_HOST_OS)
719 /* (Cooperatively) signal that the worker thread should abort
720 * the request.
721 */
722 abandonWorkRequest(tso->block_info.async_result->reqID);
723 #endif
724 goto done;
725
726 case BlockedOnDelay:
727 removeThreadFromQueue(cap, &sleeping_queue, tso);
728 goto done;
729 #endif
730
731 default:
732 barf("removeFromQueues: %d", tso->why_blocked);
733 }
734
735 done:
736 tso->why_blocked = NotBlocked;
737 appendToRunQueue(cap, tso);
738 }
739
740 /* -----------------------------------------------------------------------------
741 * raiseAsync()
742 *
743 * The following function implements the magic for raising an
744 * asynchronous exception in an existing thread.
745 *
746 * We first remove the thread from any queue on which it might be
747 * blocked. The possible blockages are MVARs, BLOCKING_QUEUESs, and
748 * TSO blocked_exception queues.
749 *
750 * We strip the stack down to the innermost CATCH_FRAME, building
751 * thunks in the heap for all the active computations, so they can
752 * be restarted if necessary. When we reach a CATCH_FRAME, we build
753 * an application of the handler to the exception, and push it on
754 * the top of the stack.
755 *
756 * How exactly do we save all the active computations? We create an
757 * AP_STACK for every UpdateFrame on the stack. Entering one of these
758 * AP_STACKs pushes everything from the corresponding update frame
759 * upwards onto the stack. (Actually, it pushes everything up to the
760 * next update frame plus a pointer to the next AP_STACK object.
761 * Entering the next AP_STACK object pushes more onto the stack until we
762 * reach the last AP_STACK object - at which point the stack should look
763 * exactly as it did when we killed the TSO and we can continue
764 * execution by entering the closure on top of the stack.
765 *
766 * We can also kill a thread entirely - this happens if either (a) the
767 * exception passed to raiseAsync is NULL, or (b) there's no
768 * CATCH_FRAME on the stack. In either case, we strip the entire
769 * stack and replace the thread with a zombie.
770 *
771 * ToDo: in THREADED_RTS mode, this function is only safe if either
772 * (a) we hold all the Capabilities (eg. in GC, or if there is only
773 * one Capability), or (b) we own the Capability that the TSO is
774 * currently blocked on or on the run queue of.
775 *
776 * -------------------------------------------------------------------------- */
777
778 StgTSO *
779 raiseAsync(Capability *cap, StgTSO *tso, StgClosure *exception,
780 bool stop_at_atomically, StgUpdateFrame *stop_here)
781 {
782 const StgRetInfoTable *info;
783 StgPtr sp, frame;
784 StgClosure *updatee;
785 uint32_t i;
786 StgStack *stack;
787
788 debugTraceCap(DEBUG_sched, cap,
789 "raising exception in thread %ld.", (long)tso->id);
790
791 #if defined(PROFILING)
792 /*
793 * Debugging tool: on raising an exception, show where we are.
794 * See also Exception.cmm:stg_raisezh.
795 * This wasn't done for asynchronous exceptions originally; see #1450
796 */
797 if (RtsFlags.ProfFlags.showCCSOnException && exception != NULL)
798 {
799 fprintCCS_stderr(tso->prof.cccs,exception,tso);
800 }
801 #endif
802 // ASSUMES: the thread is not already complete or dead
803 // Upper layers should deal with that.
804 ASSERT(tso->what_next != ThreadComplete &&
805 tso->what_next != ThreadKilled);
806
807 // only if we own this TSO (except that deleteThread() calls this
808 ASSERT(tso->cap == cap);
809
810 stack = tso->stackobj;
811
812 // mark it dirty; we're about to change its stack.
813 dirty_TSO(cap, tso);
814 dirty_STACK(cap, stack);
815
816 sp = stack->sp;
817
818 if (stop_here != NULL) {
819 updatee = stop_here->updatee;
820 } else {
821 updatee = NULL;
822 }
823
824 // The stack freezing code assumes there's a closure pointer on
825 // the top of the stack, so we have to arrange that this is the case...
826 //
827 if (sp[0] == (W_)&stg_enter_info) {
828 sp++;
829 } else {
830 sp--;
831 sp[0] = (W_)&stg_dummy_ret_closure;
832 }
833
834 frame = sp + 1;
835 while (stop_here == NULL || frame < (StgPtr)stop_here) {
836
837 // 1. Let the top of the stack be the "current closure"
838 //
839 // 2. Walk up the stack until we find either an UPDATE_FRAME or a
840 // CATCH_FRAME.
841 //
842 // 3. If it's an UPDATE_FRAME, then make an AP_STACK containing the
843 // current closure applied to the chunk of stack up to (but not
844 // including) the update frame. This closure becomes the "current
845 // closure". Go back to step 2.
846 //
847 // 4. If it's a CATCH_FRAME, then leave the exception handler on
848 // top of the stack applied to the exception.
849 //
850 // 5. If it's a STOP_FRAME, then kill the thread.
851 //
852 // 6. If it's an UNDERFLOW_FRAME, then continue with the next
853 // stack chunk.
854 //
855 // NB: if we pass an ATOMICALLY_FRAME then abort the associated
856 // transaction
857
858 info = get_ret_itbl((StgClosure *)frame);
859
860 switch (info->i.type) {
861
862 case UPDATE_FRAME:
863 {
864 StgAP_STACK * ap;
865 uint32_t words;
866
867 // First build an AP_STACK consisting of the stack chunk above the
868 // current update frame, with the top word on the stack as the
869 // fun field.
870 //
871 words = frame - sp - 1;
872 ap = (StgAP_STACK *)allocate(cap,AP_STACK_sizeW(words));
873
874 ap->size = words;
875 ap->fun = (StgClosure *)sp[0];
876
877 sp++;
878 for(i=0; i < words; ++i) {
879 ap->payload[i] = (StgClosure *)*sp++;
880 }
881
882 SET_HDR(ap,&stg_AP_STACK_info,
883 ((StgClosure *)frame)->header.prof.ccs /* ToDo */);
884 TICK_ALLOC_UP_THK(WDS(words+1),0);
885
886 //IF_DEBUG(scheduler,
887 // debugBelch("sched: Updating ");
888 // printPtr((P_)((StgUpdateFrame *)frame)->updatee);
889 // debugBelch(" with ");
890 // printObj((StgClosure *)ap);
891 // );
892
893 if (((StgUpdateFrame *)frame)->updatee == updatee) {
894 // If this update frame points to the same closure as
895 // the update frame further down the stack
896 // (stop_here), then don't perform the update. We
897 // want to keep the blackhole in this case, so we can
898 // detect and report the loop (#2783).
899 ap = (StgAP_STACK*)updatee;
900 } else {
901 // Perform the update
902 // TODO: this may waste some work, if the thunk has
903 // already been updated by another thread.
904 updateThunk(cap, tso,
905 ((StgUpdateFrame *)frame)->updatee, (StgClosure *)ap);
906 }
907
908 sp += sizeofW(StgUpdateFrame) - 1;
909 sp[0] = (W_)ap; // push onto stack
910 frame = sp + 1;
911 continue; //no need to bump frame
912 }
913
914 case UNDERFLOW_FRAME:
915 {
916 StgAP_STACK * ap;
917 uint32_t words;
918
919 // First build an AP_STACK consisting of the stack chunk above the
920 // current update frame, with the top word on the stack as the
921 // fun field.
922 //
923 words = frame - sp - 1;
924 ap = (StgAP_STACK *)allocate(cap,AP_STACK_sizeW(words));
925
926 ap->size = words;
927 ap->fun = (StgClosure *)sp[0];
928 sp++;
929 for(i=0; i < words; ++i) {
930 ap->payload[i] = (StgClosure *)*sp++;
931 }
932
933 SET_HDR(ap,&stg_AP_STACK_NOUPD_info,
934 ((StgClosure *)frame)->header.prof.ccs /* ToDo */);
935 TICK_ALLOC_SE_THK(WDS(words+1),0);
936
937 stack->sp = sp;
938 threadStackUnderflow(cap,tso);
939 stack = tso->stackobj;
940 sp = stack->sp;
941
942 sp--;
943 sp[0] = (W_)ap;
944 frame = sp + 1;
945 continue;
946 }
947
948 case STOP_FRAME:
949 {
950 // We've stripped the entire stack, the thread is now dead.
951 tso->what_next = ThreadKilled;
952 stack->sp = frame + sizeofW(StgStopFrame);
953 goto done;
954 }
955
956 case CATCH_FRAME:
957 // If we find a CATCH_FRAME, and we've got an exception to raise,
958 // then build the THUNK raise(exception), and leave it on
959 // top of the CATCH_FRAME ready to enter.
960 //
961 {
962 StgCatchFrame *cf = (StgCatchFrame *)frame;
963 StgThunk *raise;
964
965 if (exception == NULL) break;
966
967 // we've got an exception to raise, so let's pass it to the
968 // handler in this frame.
969 //
970 raise = (StgThunk *)allocate(cap,sizeofW(StgThunk)+1);
971 TICK_ALLOC_SE_THK(WDS(1),0);
972 SET_HDR(raise,&stg_raise_info,cf->header.prof.ccs);
973 raise->payload[0] = exception;
974
975 // throw away the stack from Sp up to the CATCH_FRAME.
976 //
977 sp = frame - 1;
978
979 /* Ensure that async exceptions are blocked now, so we don't get
980 * a surprise exception before we get around to executing the
981 * handler.
982 */
983 tso->flags |= TSO_BLOCKEX;
984 if ((cf->exceptions_blocked & TSO_INTERRUPTIBLE) == 0) {
985 tso->flags &= ~TSO_INTERRUPTIBLE;
986 } else {
987 tso->flags |= TSO_INTERRUPTIBLE;
988 }
989
990 /* Put the newly-built THUNK on top of the stack, ready to execute
991 * when the thread restarts.
992 */
993 sp[0] = (W_)raise;
994 sp[-1] = (W_)&stg_enter_info;
995 stack->sp = sp-1;
996 tso->what_next = ThreadRunGHC;
997 goto done;
998 }
999
1000 case ATOMICALLY_FRAME:
1001 if (stop_at_atomically) {
1002 ASSERT(tso->trec->enclosing_trec == NO_TREC);
1003 stmCondemnTransaction(cap, tso -> trec);
1004 stack->sp = frame - 2;
1005 // The ATOMICALLY_FRAME expects to be returned a
1006 // result from the transaction, which it stores in the
1007 // stack frame. Hence we arrange to return a dummy
1008 // result, so that the GC doesn't get upset (#3578).
1009 // Perhaps a better way would be to have a different
1010 // ATOMICALLY_FRAME instance for condemned
1011 // transactions, but I don't fully understand the
1012 // interaction with STM invariants.
1013 stack->sp[1] = (W_)&stg_NO_TREC_closure;
1014 stack->sp[0] = (W_)&stg_ret_p_info;
1015 tso->what_next = ThreadRunGHC;
1016 goto done;
1017 }
1018 else
1019 {
1020 // Freezing an STM transaction. Just aborting the
1021 // transaction would be wrong; this is what we used to
1022 // do, and it goes wrong if the ATOMICALLY_FRAME ever
1023 // gets back onto the stack again, which it will do if
1024 // the transaction is inside unsafePerformIO or
1025 // unsafeInterleaveIO and hence inside an UPDATE_FRAME.
1026 //
1027 // So we want to make it so that if the enclosing
1028 // computation is resumed, we will re-execute the
1029 // transaction. We therefore:
1030 //
1031 // 1. abort the current transaction
1032 // 3. replace the stack up to and including the
1033 // atomically frame with a closure representing
1034 // a call to "atomically x", where x is the code
1035 // of the transaction.
1036 // 4. continue stripping the stack
1037 //
1038 StgTRecHeader *trec = tso->trec;
1039 StgTRecHeader *outer = trec->enclosing_trec;
1040
1041 StgThunk *atomically;
1042 StgAtomicallyFrame *af = (StgAtomicallyFrame*)frame;
1043
1044 debugTraceCap(DEBUG_stm, cap,
1045 "raiseAsync: freezing atomically frame")
1046 stmAbortTransaction(cap, trec);
1047 stmFreeAbortedTRec(cap, trec);
1048 tso->trec = outer;
1049
1050 atomically = (StgThunk*)allocate(cap,sizeofW(StgThunk)+1);
1051 TICK_ALLOC_SE_THK(1,0);
1052 SET_HDR(atomically,&stg_atomically_info,af->header.prof.ccs);
1053 atomically->payload[0] = af->code;
1054
1055 // discard stack up to and including the ATOMICALLY_FRAME
1056 frame += sizeofW(StgAtomicallyFrame);
1057 sp = frame - 1;
1058
1059 // replace the ATOMICALLY_FRAME with call to atomically#
1060 sp[0] = (W_)atomically;
1061 continue;
1062 }
1063
1064 case CATCH_STM_FRAME:
1065 case CATCH_RETRY_FRAME:
1066 // CATCH frames within an atomically block: abort the
1067 // inner transaction and continue. Eventually we will
1068 // hit the outer transaction that will get frozen (see
1069 // above).
1070 //
1071 // In this case (unlike ordinary exceptions) we do not care
1072 // whether the transaction is valid or not because its
1073 // possible validity cannot have caused the exception
1074 // and will not be visible after the abort.
1075 {
1076 StgTRecHeader *trec = tso -> trec;
1077 StgTRecHeader *outer = trec -> enclosing_trec;
1078 debugTraceCap(DEBUG_stm, cap,
1079 "found atomically block delivering async exception");
1080 stmAbortTransaction(cap, trec);
1081 stmFreeAbortedTRec(cap, trec);
1082 tso -> trec = outer;
1083 break;
1084 };
1085
1086 default:
1087 break;
1088 }
1089
1090 // move on to the next stack frame
1091 frame += stack_frame_sizeW((StgClosure *)frame);
1092 }
1093
1094 done:
1095 IF_DEBUG(sanity, checkTSO(tso));
1096
1097 // wake it up
1098 if (tso->why_blocked != NotBlocked) {
1099 tso->why_blocked = NotBlocked;
1100 appendToRunQueue(cap,tso);
1101 }
1102
1103 return tso;
1104 }