rts: Add FALLTHROUGH macro
[ghc.git] / rts / RaiseAsync.c
1 /* ---------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 1998-2006
4 *
5 * Asynchronous exceptions
6 *
7 * --------------------------------------------------------------------------*/
8
9 #include "PosixSource.h"
10 #include "Rts.h"
11
12 #include "sm/Storage.h"
13 #include "Threads.h"
14 #include "Trace.h"
15 #include "RaiseAsync.h"
16 #include "Schedule.h"
17 #include "Updates.h"
18 #include "STM.h"
19 #include "sm/Sanity.h"
20 #include "Profiling.h"
21 #include "Messages.h"
22 #if defined(mingw32_HOST_OS)
23 #include "win32/IOManager.h"
24 #endif
25
26 static void blockedThrowTo (Capability *cap,
27 StgTSO *target, MessageThrowTo *msg);
28
29 static void removeFromQueues(Capability *cap, StgTSO *tso);
30
31 static void removeFromMVarBlockedQueue (StgTSO *tso);
32
33 static void throwToSendMsg (Capability *cap USED_IF_THREADS,
34 Capability *target_cap USED_IF_THREADS,
35 MessageThrowTo *msg USED_IF_THREADS);
36
37 /* -----------------------------------------------------------------------------
38 throwToSingleThreaded
39
40 This version of throwTo is safe to use if and only if one of the
41 following holds:
42
43 - !THREADED_RTS
44
45 - all the other threads in the system are stopped (eg. during GC).
46
47 - we surely own the target TSO (eg. we just took it from the
48 run queue of the current capability, or we are running it).
49
50 It doesn't cater for blocking the source thread until the exception
51 has been raised.
52 -------------------------------------------------------------------------- */
53
54 static void
55 throwToSingleThreaded__ (Capability *cap, StgTSO *tso, StgClosure *exception,
56 bool stop_at_atomically, StgUpdateFrame *stop_here)
57 {
58 // Thread already dead?
59 if (tso->what_next == ThreadComplete || tso->what_next == ThreadKilled) {
60 return;
61 }
62
63 // Remove it from any blocking queues
64 removeFromQueues(cap,tso);
65
66 raiseAsync(cap, tso, exception, stop_at_atomically, stop_here);
67 }
68
69 void
70 throwToSingleThreaded (Capability *cap, StgTSO *tso, StgClosure *exception)
71 {
72 throwToSingleThreaded__(cap, tso, exception, false, NULL);
73 }
74
75 void
76 throwToSingleThreaded_ (Capability *cap, StgTSO *tso, StgClosure *exception,
77 bool stop_at_atomically)
78 {
79 throwToSingleThreaded__ (cap, tso, exception, stop_at_atomically, NULL);
80 }
81
82 void // cannot return a different TSO
83 suspendComputation (Capability *cap, StgTSO *tso, StgUpdateFrame *stop_here)
84 {
85 throwToSingleThreaded__ (cap, tso, NULL, false, stop_here);
86 }
87
88 /* -----------------------------------------------------------------------------
89 throwToSelf
90
91 Useful for throwing an async exception in a thread from the
92 runtime. It handles unlocking the throwto message returned by
93 throwTo().
94
95 Note [Throw to self when masked]
96
97 When a StackOverflow occurs when the thread is masked, we want to
98 defer the exception to when the thread becomes unmasked/hits an
99 interruptible point. We already have a mechanism for doing this,
100 the blocked_exceptions list, but the use here is a bit unusual,
101 because an exception is normally only added to this list upon
102 an asynchronous 'throwTo' call (with all of the relevant
103 multithreaded nonsense). Morally, a stack overflow should be an
104 asynchronous exception sent by a thread to itself, and it should
105 have the same semantics. But there are a few key differences:
106
107 - If you actually tried to send an asynchronous exception to
108 yourself using throwTo, the exception would actually immediately
109 be delivered. This is because throwTo itself is considered an
110 interruptible point, so the exception is always deliverable. Thus,
111 ordinarily, we never end up with a message to oneself in the
112 blocked_exceptions queue.
113
114 - In the case of a StackOverflow, we don't actually care about the
115 wakeup semantics; when an exception is delivered, the thread that
116 originally threw the exception should be woken up, since throwTo
117 blocks until the exception is successfully thrown. Fortunately,
118 it is harmless to wakeup a thread that doesn't actually need waking
119 up, e.g. ourselves.
120
121 - No synchronization is necessary, because we own the TSO and the
122 capability. You can observe this by tracing through the execution
123 of throwTo. We skip synchronizing the message and inter-capability
124 communication.
125
126 We think this doesn't break any invariants, but do be careful!
127 -------------------------------------------------------------------------- */
128
129 void
130 throwToSelf (Capability *cap, StgTSO *tso, StgClosure *exception)
131 {
132 MessageThrowTo *m;
133
134 m = throwTo(cap, tso, tso, exception);
135
136 if (m != NULL) {
137 // throwTo leaves it locked
138 unlockClosure((StgClosure*)m, &stg_MSG_THROWTO_info);
139 }
140 }
141
142 /* -----------------------------------------------------------------------------
143 throwTo
144
145 This function may be used to throw an exception from one thread to
146 another, during the course of normal execution. This is a tricky
147 task: the target thread might be running on another CPU, or it
148 may be blocked and could be woken up at any point by another CPU.
149 We have some delicate synchronisation to do.
150
151 The underlying scheme when multiple Capabilities are in use is
152 message passing: when the target of a throwTo is on another
153 Capability, we send a message (a MessageThrowTo closure) to that
154 Capability.
155
156 If the throwTo needs to block because the target TSO is masking
157 exceptions (the TSO_BLOCKEX flag), then the message is placed on
158 the blocked_exceptions queue attached to the target TSO. When the
159 target TSO enters the unmasked state again, it must check the
160 queue. The blocked_exceptions queue is not locked; only the
161 Capability owning the TSO may modify it.
162
163 To make things simpler for throwTo, we always create the message
164 first before deciding what to do. The message may get sent, or it
165 may get attached to a TSO's blocked_exceptions queue, or the
166 exception may get thrown immediately and the message dropped,
167 depending on the current state of the target.
168
169 Currently we send a message if the target belongs to another
170 Capability, and it is
171
172 - NotBlocked, BlockedOnMsgThrowTo,
173 BlockedOnCCall_Interruptible
174
175 - or it is masking exceptions (TSO_BLOCKEX)
176
177 Currently, if the target is BlockedOnMVar, BlockedOnSTM, or
178 BlockedOnBlackHole then we acquire ownership of the TSO by locking
179 its parent container (e.g. the MVar) and then raise the exception.
180 We might change these cases to be more message-passing-like in the
181 future.
182
183 Returns:
184
185 NULL exception was raised, ok to continue
186
187 MessageThrowTo * exception was not raised; the source TSO
188 should now put itself in the state
189 BlockedOnMsgThrowTo, and when it is ready
190 it should unlock the mssage using
191 unlockClosure(msg, &stg_MSG_THROWTO_info);
192 If it decides not to raise the exception after
193 all, it can revoke it safely with
194 unlockClosure(msg, &stg_MSG_NULL_info);
195
196 -------------------------------------------------------------------------- */
197
198 MessageThrowTo *
199 throwTo (Capability *cap, // the Capability we hold
200 StgTSO *source, // the TSO sending the exception (or NULL)
201 StgTSO *target, // the TSO receiving the exception
202 StgClosure *exception) // the exception closure
203 {
204 MessageThrowTo *msg;
205
206 msg = (MessageThrowTo *) allocate(cap, sizeofW(MessageThrowTo));
207 // the message starts locked; see below
208 SET_HDR(msg, &stg_WHITEHOLE_info, CCS_SYSTEM);
209 msg->source = source;
210 msg->target = target;
211 msg->exception = exception;
212
213 switch (throwToMsg(cap, msg))
214 {
215 case THROWTO_SUCCESS:
216 // unlock the message now, otherwise we leave a WHITEHOLE in
217 // the heap (#6103)
218 SET_HDR(msg, &stg_MSG_THROWTO_info, CCS_SYSTEM);
219 return NULL;
220
221 case THROWTO_BLOCKED:
222 default:
223 // the caller will unlock the message when it is ready. We
224 // cannot unlock it yet, because the calling thread will need
225 // to tidy up its state first.
226 return msg;
227 }
228 }
229
230
231 uint32_t
232 throwToMsg (Capability *cap, MessageThrowTo *msg)
233 {
234 StgWord status;
235 StgTSO *target = msg->target;
236 Capability *target_cap;
237
238 goto check_target;
239
240 retry:
241 write_barrier();
242 debugTrace(DEBUG_sched, "throwTo: retrying...");
243
244 check_target:
245 ASSERT(target != END_TSO_QUEUE);
246
247 // Thread already dead?
248 if (target->what_next == ThreadComplete
249 || target->what_next == ThreadKilled) {
250 return THROWTO_SUCCESS;
251 }
252
253 debugTraceCap(DEBUG_sched, cap,
254 "throwTo: from thread %lu to thread %lu",
255 (unsigned long)msg->source->id,
256 (unsigned long)msg->target->id);
257
258 #if defined(DEBUG)
259 traceThreadStatus(DEBUG_sched, target);
260 #endif
261
262 target_cap = target->cap;
263 if (target->cap != cap) {
264 throwToSendMsg(cap, target_cap, msg);
265 return THROWTO_BLOCKED;
266 }
267
268 status = target->why_blocked;
269
270 switch (status) {
271 case NotBlocked:
272 {
273 if ((target->flags & TSO_BLOCKEX) == 0) {
274 // It's on our run queue and not blocking exceptions
275 raiseAsync(cap, target, msg->exception, false, NULL);
276 return THROWTO_SUCCESS;
277 } else {
278 blockedThrowTo(cap,target,msg);
279 return THROWTO_BLOCKED;
280 }
281 }
282
283 case BlockedOnMsgThrowTo:
284 {
285 const StgInfoTable *i;
286 MessageThrowTo *m;
287
288 m = target->block_info.throwto;
289
290 // target is local to this cap, but has sent a throwto
291 // message to another cap.
292 //
293 // The source message is locked. We need to revoke the
294 // target's message so that we can raise the exception, so
295 // we attempt to lock it.
296
297 // There's a possibility of a deadlock if two threads are both
298 // trying to throwTo each other (or more generally, a cycle of
299 // threads). To break the symmetry we compare the addresses
300 // of the MessageThrowTo objects, and the one for which m <
301 // msg gets to spin, while the other can only try to lock
302 // once, but must then back off and unlock both before trying
303 // again.
304 if (m < msg) {
305 i = lockClosure((StgClosure *)m);
306 } else {
307 i = tryLockClosure((StgClosure *)m);
308 if (i == NULL) {
309 // debugBelch("collision\n");
310 throwToSendMsg(cap, target->cap, msg);
311 return THROWTO_BLOCKED;
312 }
313 }
314
315 if (i == &stg_MSG_NULL_info) {
316 // we know there's a MSG_TRY_WAKEUP on the way, so we
317 // might as well just do it now. The message will
318 // be a no-op when it arrives.
319 unlockClosure((StgClosure*)m, i);
320 tryWakeupThread(cap, target);
321 goto retry;
322 }
323
324 if (i != &stg_MSG_THROWTO_info) {
325 // if it's a MSG_NULL, this TSO has been woken up by another Cap
326 unlockClosure((StgClosure*)m, i);
327 goto retry;
328 }
329
330 if ((target->flags & TSO_BLOCKEX) &&
331 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
332 unlockClosure((StgClosure*)m, i);
333 blockedThrowTo(cap,target,msg);
334 return THROWTO_BLOCKED;
335 }
336
337 // nobody else can wake up this TSO after we claim the message
338 doneWithMsgThrowTo(m);
339
340 raiseAsync(cap, target, msg->exception, false, NULL);
341 return THROWTO_SUCCESS;
342 }
343
344 case BlockedOnMVar:
345 case BlockedOnMVarRead:
346 {
347 /*
348 To establish ownership of this TSO, we need to acquire a
349 lock on the MVar that it is blocked on.
350 */
351 StgMVar *mvar;
352 StgInfoTable *info USED_IF_THREADS;
353
354 mvar = (StgMVar *)target->block_info.closure;
355
356 // ASSUMPTION: tso->block_info must always point to a
357 // closure. In the threaded RTS it does.
358 switch (get_itbl((StgClosure *)mvar)->type) {
359 case MVAR_CLEAN:
360 case MVAR_DIRTY:
361 break;
362 default:
363 goto retry;
364 }
365
366 info = lockClosure((StgClosure *)mvar);
367
368 // we have the MVar, let's check whether the thread
369 // is still blocked on the same MVar.
370 if ((target->why_blocked != BlockedOnMVar && target->why_blocked != BlockedOnMVarRead)
371 || (StgMVar *)target->block_info.closure != mvar) {
372 unlockClosure((StgClosure *)mvar, info);
373 goto retry;
374 }
375
376 if (target->_link == END_TSO_QUEUE) {
377 // the MVar operation has already completed. There is a
378 // MSG_TRY_WAKEUP on the way, but we can just wake up the
379 // thread now anyway and ignore the message when it
380 // arrives.
381 unlockClosure((StgClosure *)mvar, info);
382 tryWakeupThread(cap, target);
383 goto retry;
384 }
385
386 if ((target->flags & TSO_BLOCKEX) &&
387 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
388 blockedThrowTo(cap,target,msg);
389 unlockClosure((StgClosure *)mvar, info);
390 return THROWTO_BLOCKED;
391 } else {
392 // revoke the MVar operation
393 removeFromMVarBlockedQueue(target);
394 raiseAsync(cap, target, msg->exception, false, NULL);
395 unlockClosure((StgClosure *)mvar, info);
396 return THROWTO_SUCCESS;
397 }
398 }
399
400 case BlockedOnBlackHole:
401 {
402 if (target->flags & TSO_BLOCKEX) {
403 // BlockedOnBlackHole is not interruptible.
404 blockedThrowTo(cap,target,msg);
405 return THROWTO_BLOCKED;
406 } else {
407 // Revoke the message by replacing it with IND. We're not
408 // locking anything here, so we might still get a TRY_WAKEUP
409 // message from the owner of the blackhole some time in the
410 // future, but that doesn't matter.
411 ASSERT(target->block_info.bh->header.info == &stg_MSG_BLACKHOLE_info);
412 OVERWRITE_INFO(target->block_info.bh, &stg_IND_info);
413 raiseAsync(cap, target, msg->exception, false, NULL);
414 return THROWTO_SUCCESS;
415 }
416 }
417
418 case BlockedOnSTM:
419 if ((target->flags & TSO_BLOCKEX) &&
420 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
421 blockedThrowTo(cap,target,msg);
422 return THROWTO_BLOCKED;
423 } else {
424 raiseAsync(cap, target, msg->exception, false, NULL);
425 return THROWTO_SUCCESS;
426 }
427
428 case BlockedOnCCall_Interruptible:
429 #if defined(THREADED_RTS)
430 {
431 Task *task = NULL;
432 // walk suspended_ccalls to find the correct worker thread
433 InCall *incall;
434 for (incall = cap->suspended_ccalls; incall != NULL; incall = incall->next) {
435 if (incall->suspended_tso == target) {
436 task = incall->task;
437 break;
438 }
439 }
440 if (task != NULL) {
441 blockedThrowTo(cap, target, msg);
442 if (!((target->flags & TSO_BLOCKEX) &&
443 ((target->flags & TSO_INTERRUPTIBLE) == 0))) {
444 interruptWorkerTask(task);
445 }
446 return THROWTO_BLOCKED;
447 } else {
448 debugTraceCap(DEBUG_sched, cap, "throwTo: could not find worker thread to kill");
449 }
450 // fall to next
451 }
452 FALLTHROUGH;
453 #endif
454 case BlockedOnCCall:
455 blockedThrowTo(cap,target,msg);
456 return THROWTO_BLOCKED;
457
458 #if !defined(THREADEDED_RTS)
459 case BlockedOnRead:
460 case BlockedOnWrite:
461 case BlockedOnDelay:
462 #if defined(mingw32_HOST_OS)
463 case BlockedOnDoProc:
464 #endif
465 if ((target->flags & TSO_BLOCKEX) &&
466 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
467 blockedThrowTo(cap,target,msg);
468 return THROWTO_BLOCKED;
469 } else {
470 removeFromQueues(cap,target);
471 raiseAsync(cap, target, msg->exception, false, NULL);
472 return THROWTO_SUCCESS;
473 }
474 #endif
475
476 case ThreadMigrating:
477 // if it is ThreadMigrating and tso->cap is ours, then it
478 // *must* be migrating *to* this capability. If it were
479 // migrating away from the capability, then tso->cap would
480 // point to the destination.
481 //
482 // There is a MSG_WAKEUP in the message queue for this thread,
483 // but we can just do it preemptively:
484 tryWakeupThread(cap, target);
485 // and now retry, the thread should be runnable.
486 goto retry;
487
488 default:
489 barf("throwTo: unrecognised why_blocked (%d)", target->why_blocked);
490 }
491 barf("throwTo");
492 }
493
494 static void
495 throwToSendMsg (Capability *cap STG_UNUSED,
496 Capability *target_cap USED_IF_THREADS,
497 MessageThrowTo *msg USED_IF_THREADS)
498
499 {
500 #if defined(THREADED_RTS)
501 debugTraceCap(DEBUG_sched, cap, "throwTo: sending a throwto message to cap %lu", (unsigned long)target_cap->no);
502
503 sendMessage(cap, target_cap, (Message*)msg);
504 #endif
505 }
506
507 // Block a throwTo message on the target TSO's blocked_exceptions
508 // queue. The current Capability must own the target TSO in order to
509 // modify the blocked_exceptions queue.
510 void
511 blockedThrowTo (Capability *cap, StgTSO *target, MessageThrowTo *msg)
512 {
513 debugTraceCap(DEBUG_sched, cap, "throwTo: blocking on thread %lu",
514 (unsigned long)target->id);
515
516 ASSERT(target->cap == cap);
517
518 msg->link = target->blocked_exceptions;
519 target->blocked_exceptions = msg;
520 dirty_TSO(cap,target); // we modified the blocked_exceptions queue
521 }
522
523 /* -----------------------------------------------------------------------------
524 Waking up threads blocked in throwTo
525
526 There are two ways to do this: maybePerformBlockedException() will
527 perform the throwTo() for the thread at the head of the queue
528 immediately, and leave the other threads on the queue.
529 maybePerformBlockedException() also checks the TSO_BLOCKEX flag
530 before raising an exception.
531
532 awakenBlockedExceptionQueue() will wake up all the threads in the
533 queue, but not perform any throwTo() immediately. This might be
534 more appropriate when the target thread is the one actually running
535 (see Exception.cmm).
536
537 Returns: non-zero if an exception was raised, zero otherwise.
538 -------------------------------------------------------------------------- */
539
540 int
541 maybePerformBlockedException (Capability *cap, StgTSO *tso)
542 {
543 MessageThrowTo *msg;
544 const StgInfoTable *i;
545 StgTSO *source;
546
547 if (tso->what_next == ThreadComplete || tso->what_next == ThreadFinished) {
548 if (tso->blocked_exceptions != END_BLOCKED_EXCEPTIONS_QUEUE) {
549 awakenBlockedExceptionQueue(cap,tso);
550 return 1;
551 } else {
552 return 0;
553 }
554 }
555
556 if (tso->blocked_exceptions != END_BLOCKED_EXCEPTIONS_QUEUE &&
557 (tso->flags & TSO_BLOCKEX) != 0) {
558 debugTraceCap(DEBUG_sched, cap, "throwTo: thread %lu has blocked exceptions but is inside block", (unsigned long)tso->id);
559 }
560
561 if (tso->blocked_exceptions != END_BLOCKED_EXCEPTIONS_QUEUE
562 && ((tso->flags & TSO_BLOCKEX) == 0
563 || ((tso->flags & TSO_INTERRUPTIBLE) && interruptible(tso)))) {
564
565 // We unblock just the first thread on the queue, and perform
566 // its throw immediately.
567 loop:
568 msg = tso->blocked_exceptions;
569 if (msg == END_BLOCKED_EXCEPTIONS_QUEUE) return 0;
570 i = lockClosure((StgClosure*)msg);
571 tso->blocked_exceptions = (MessageThrowTo*)msg->link;
572 if (i == &stg_MSG_NULL_info) {
573 unlockClosure((StgClosure*)msg,i);
574 goto loop;
575 }
576
577 throwToSingleThreaded(cap, msg->target, msg->exception);
578 source = msg->source;
579 doneWithMsgThrowTo(msg);
580 tryWakeupThread(cap, source);
581 return 1;
582 }
583 return 0;
584 }
585
586 // awakenBlockedExceptionQueue(): Just wake up the whole queue of
587 // blocked exceptions.
588
589 void
590 awakenBlockedExceptionQueue (Capability *cap, StgTSO *tso)
591 {
592 MessageThrowTo *msg;
593 const StgInfoTable *i;
594 StgTSO *source;
595
596 for (msg = tso->blocked_exceptions; msg != END_BLOCKED_EXCEPTIONS_QUEUE;
597 msg = (MessageThrowTo*)msg->link) {
598 i = lockClosure((StgClosure *)msg);
599 if (i != &stg_MSG_NULL_info) {
600 source = msg->source;
601 doneWithMsgThrowTo(msg);
602 tryWakeupThread(cap, source);
603 } else {
604 unlockClosure((StgClosure *)msg,i);
605 }
606 }
607 tso->blocked_exceptions = END_BLOCKED_EXCEPTIONS_QUEUE;
608 }
609
610 /* -----------------------------------------------------------------------------
611 Remove a thread from blocking queues.
612
613 This is for use when we raise an exception in another thread, which
614 may be blocked.
615
616 Precondition: we have exclusive access to the TSO, via the same set
617 of conditions as throwToSingleThreaded() (c.f.).
618 -------------------------------------------------------------------------- */
619
620 static void
621 removeFromMVarBlockedQueue (StgTSO *tso)
622 {
623 StgMVar *mvar = (StgMVar*)tso->block_info.closure;
624 StgMVarTSOQueue *q = (StgMVarTSOQueue*)tso->_link;
625
626 if (q == (StgMVarTSOQueue*)END_TSO_QUEUE) {
627 // already removed from this MVar
628 return;
629 }
630
631 // Assume the MVar is locked. (not assertable; sometimes it isn't
632 // actually WHITEHOLE'd).
633
634 // We want to remove the MVAR_TSO_QUEUE object from the queue. It
635 // isn't doubly-linked so we can't actually remove it; instead we
636 // just overwrite it with an IND if possible and let the GC short
637 // it out. However, we have to be careful to maintain the deque
638 // structure:
639
640 if (mvar->head == q) {
641 mvar->head = q->link;
642 OVERWRITE_INFO(q, &stg_IND_info);
643 if (mvar->tail == q) {
644 mvar->tail = (StgMVarTSOQueue*)END_TSO_QUEUE;
645 }
646 }
647 else if (mvar->tail == q) {
648 // we can't replace it with an IND in this case, because then
649 // we lose the tail pointer when the GC shorts out the IND.
650 // So we use MSG_NULL as a kind of non-dupable indirection;
651 // these are ignored by takeMVar/putMVar.
652 OVERWRITE_INFO(q, &stg_MSG_NULL_info);
653 }
654 else {
655 OVERWRITE_INFO(q, &stg_IND_info);
656 }
657
658 // revoke the MVar operation
659 tso->_link = END_TSO_QUEUE;
660 }
661
662 static void
663 removeFromQueues(Capability *cap, StgTSO *tso)
664 {
665 switch (tso->why_blocked) {
666
667 case NotBlocked:
668 case ThreadMigrating:
669 return;
670
671 case BlockedOnSTM:
672 // Be careful: nothing to do here! We tell the scheduler that the
673 // thread is runnable and we leave it to the stack-walking code to
674 // abort the transaction while unwinding the stack. We should
675 // perhaps have a debugging test to make sure that this really
676 // happens and that the 'zombie' transaction does not get
677 // committed.
678 goto done;
679
680 case BlockedOnMVar:
681 case BlockedOnMVarRead:
682 removeFromMVarBlockedQueue(tso);
683 goto done;
684
685 case BlockedOnBlackHole:
686 // nothing to do
687 goto done;
688
689 case BlockedOnMsgThrowTo:
690 {
691 MessageThrowTo *m = tso->block_info.throwto;
692 // The message is locked by us, unless we got here via
693 // deleteAllThreads(), in which case we own all the
694 // capabilities.
695 // ASSERT(m->header.info == &stg_WHITEHOLE_info);
696
697 // unlock and revoke it at the same time
698 doneWithMsgThrowTo(m);
699 break;
700 }
701
702 #if !defined(THREADED_RTS)
703 case BlockedOnRead:
704 case BlockedOnWrite:
705 #if defined(mingw32_HOST_OS)
706 case BlockedOnDoProc:
707 #endif
708 removeThreadFromDeQueue(cap, &blocked_queue_hd, &blocked_queue_tl, tso);
709 #if defined(mingw32_HOST_OS)
710 /* (Cooperatively) signal that the worker thread should abort
711 * the request.
712 */
713 abandonWorkRequest(tso->block_info.async_result->reqID);
714 #endif
715 goto done;
716
717 case BlockedOnDelay:
718 removeThreadFromQueue(cap, &sleeping_queue, tso);
719 goto done;
720 #endif
721
722 default:
723 barf("removeFromQueues: %d", tso->why_blocked);
724 }
725
726 done:
727 tso->why_blocked = NotBlocked;
728 appendToRunQueue(cap, tso);
729 }
730
731 /* -----------------------------------------------------------------------------
732 * raiseAsync()
733 *
734 * The following function implements the magic for raising an
735 * asynchronous exception in an existing thread.
736 *
737 * We first remove the thread from any queue on which it might be
738 * blocked. The possible blockages are MVARs, BLOCKING_QUEUESs, and
739 * TSO blocked_exception queues.
740 *
741 * We strip the stack down to the innermost CATCH_FRAME, building
742 * thunks in the heap for all the active computations, so they can
743 * be restarted if necessary. When we reach a CATCH_FRAME, we build
744 * an application of the handler to the exception, and push it on
745 * the top of the stack.
746 *
747 * How exactly do we save all the active computations? We create an
748 * AP_STACK for every UpdateFrame on the stack. Entering one of these
749 * AP_STACKs pushes everything from the corresponding update frame
750 * upwards onto the stack. (Actually, it pushes everything up to the
751 * next update frame plus a pointer to the next AP_STACK object.
752 * Entering the next AP_STACK object pushes more onto the stack until we
753 * reach the last AP_STACK object - at which point the stack should look
754 * exactly as it did when we killed the TSO and we can continue
755 * execution by entering the closure on top of the stack.
756 *
757 * We can also kill a thread entirely - this happens if either (a) the
758 * exception passed to raiseAsync is NULL, or (b) there's no
759 * CATCH_FRAME on the stack. In either case, we strip the entire
760 * stack and replace the thread with a zombie.
761 *
762 * ToDo: in THREADED_RTS mode, this function is only safe if either
763 * (a) we hold all the Capabilities (eg. in GC, or if there is only
764 * one Capability), or (b) we own the Capability that the TSO is
765 * currently blocked on or on the run queue of.
766 *
767 * -------------------------------------------------------------------------- */
768
769 StgTSO *
770 raiseAsync(Capability *cap, StgTSO *tso, StgClosure *exception,
771 bool stop_at_atomically, StgUpdateFrame *stop_here)
772 {
773 const StgRetInfoTable *info;
774 StgPtr sp, frame;
775 StgClosure *updatee;
776 uint32_t i;
777 StgStack *stack;
778
779 debugTraceCap(DEBUG_sched, cap,
780 "raising exception in thread %ld.", (long)tso->id);
781
782 #if defined(PROFILING)
783 /*
784 * Debugging tool: on raising an exception, show where we are.
785 * See also Exception.cmm:stg_raisezh.
786 * This wasn't done for asynchronous exceptions originally; see #1450
787 */
788 if (RtsFlags.ProfFlags.showCCSOnException && exception != NULL)
789 {
790 fprintCCS_stderr(tso->prof.cccs,exception,tso);
791 }
792 #endif
793 // ASSUMES: the thread is not already complete or dead
794 // Upper layers should deal with that.
795 ASSERT(tso->what_next != ThreadComplete &&
796 tso->what_next != ThreadKilled);
797
798 // only if we own this TSO (except that deleteThread() calls this
799 ASSERT(tso->cap == cap);
800
801 stack = tso->stackobj;
802
803 // mark it dirty; we're about to change its stack.
804 dirty_TSO(cap, tso);
805 dirty_STACK(cap, stack);
806
807 sp = stack->sp;
808
809 if (stop_here != NULL) {
810 updatee = stop_here->updatee;
811 } else {
812 updatee = NULL;
813 }
814
815 // The stack freezing code assumes there's a closure pointer on
816 // the top of the stack, so we have to arrange that this is the case...
817 //
818 if (sp[0] == (W_)&stg_enter_info) {
819 sp++;
820 } else {
821 sp--;
822 sp[0] = (W_)&stg_dummy_ret_closure;
823 }
824
825 frame = sp + 1;
826 while (stop_here == NULL || frame < (StgPtr)stop_here) {
827
828 // 1. Let the top of the stack be the "current closure"
829 //
830 // 2. Walk up the stack until we find either an UPDATE_FRAME or a
831 // CATCH_FRAME.
832 //
833 // 3. If it's an UPDATE_FRAME, then make an AP_STACK containing the
834 // current closure applied to the chunk of stack up to (but not
835 // including) the update frame. This closure becomes the "current
836 // closure". Go back to step 2.
837 //
838 // 4. If it's a CATCH_FRAME, then leave the exception handler on
839 // top of the stack applied to the exception.
840 //
841 // 5. If it's a STOP_FRAME, then kill the thread.
842 //
843 // 6. If it's an UNDERFLOW_FRAME, then continue with the next
844 // stack chunk.
845 //
846 // NB: if we pass an ATOMICALLY_FRAME then abort the associated
847 // transaction
848
849 info = get_ret_itbl((StgClosure *)frame);
850
851 switch (info->i.type) {
852
853 case UPDATE_FRAME:
854 {
855 StgAP_STACK * ap;
856 uint32_t words;
857
858 // First build an AP_STACK consisting of the stack chunk above the
859 // current update frame, with the top word on the stack as the
860 // fun field.
861 //
862 words = frame - sp - 1;
863 ap = (StgAP_STACK *)allocate(cap,AP_STACK_sizeW(words));
864
865 ap->size = words;
866 ap->fun = (StgClosure *)sp[0];
867
868 sp++;
869 for(i=0; i < words; ++i) {
870 ap->payload[i] = (StgClosure *)*sp++;
871 }
872
873 SET_HDR(ap,&stg_AP_STACK_info,
874 ((StgClosure *)frame)->header.prof.ccs /* ToDo */);
875 TICK_ALLOC_UP_THK(WDS(words+1),0);
876
877 //IF_DEBUG(scheduler,
878 // debugBelch("sched: Updating ");
879 // printPtr((P_)((StgUpdateFrame *)frame)->updatee);
880 // debugBelch(" with ");
881 // printObj((StgClosure *)ap);
882 // );
883
884 if (((StgUpdateFrame *)frame)->updatee == updatee) {
885 // If this update frame points to the same closure as
886 // the update frame further down the stack
887 // (stop_here), then don't perform the update. We
888 // want to keep the blackhole in this case, so we can
889 // detect and report the loop (#2783).
890 ap = (StgAP_STACK*)updatee;
891 } else {
892 // Perform the update
893 // TODO: this may waste some work, if the thunk has
894 // already been updated by another thread.
895 updateThunk(cap, tso,
896 ((StgUpdateFrame *)frame)->updatee, (StgClosure *)ap);
897 }
898
899 sp += sizeofW(StgUpdateFrame) - 1;
900 sp[0] = (W_)ap; // push onto stack
901 frame = sp + 1;
902 continue; //no need to bump frame
903 }
904
905 case UNDERFLOW_FRAME:
906 {
907 StgAP_STACK * ap;
908 uint32_t words;
909
910 // First build an AP_STACK consisting of the stack chunk above the
911 // current update frame, with the top word on the stack as the
912 // fun field.
913 //
914 words = frame - sp - 1;
915 ap = (StgAP_STACK *)allocate(cap,AP_STACK_sizeW(words));
916
917 ap->size = words;
918 ap->fun = (StgClosure *)sp[0];
919 sp++;
920 for(i=0; i < words; ++i) {
921 ap->payload[i] = (StgClosure *)*sp++;
922 }
923
924 SET_HDR(ap,&stg_AP_STACK_NOUPD_info,
925 ((StgClosure *)frame)->header.prof.ccs /* ToDo */);
926 TICK_ALLOC_SE_THK(WDS(words+1),0);
927
928 stack->sp = sp;
929 threadStackUnderflow(cap,tso);
930 stack = tso->stackobj;
931 sp = stack->sp;
932
933 sp--;
934 sp[0] = (W_)ap;
935 frame = sp + 1;
936 continue;
937 }
938
939 case STOP_FRAME:
940 {
941 // We've stripped the entire stack, the thread is now dead.
942 tso->what_next = ThreadKilled;
943 stack->sp = frame + sizeofW(StgStopFrame);
944 goto done;
945 }
946
947 case CATCH_FRAME:
948 // If we find a CATCH_FRAME, and we've got an exception to raise,
949 // then build the THUNK raise(exception), and leave it on
950 // top of the CATCH_FRAME ready to enter.
951 //
952 {
953 StgCatchFrame *cf = (StgCatchFrame *)frame;
954 StgThunk *raise;
955
956 if (exception == NULL) break;
957
958 // we've got an exception to raise, so let's pass it to the
959 // handler in this frame.
960 //
961 raise = (StgThunk *)allocate(cap,sizeofW(StgThunk)+1);
962 TICK_ALLOC_SE_THK(WDS(1),0);
963 SET_HDR(raise,&stg_raise_info,cf->header.prof.ccs);
964 raise->payload[0] = exception;
965
966 // throw away the stack from Sp up to the CATCH_FRAME.
967 //
968 sp = frame - 1;
969
970 /* Ensure that async exceptions are blocked now, so we don't get
971 * a surprise exception before we get around to executing the
972 * handler.
973 */
974 tso->flags |= TSO_BLOCKEX;
975 if ((cf->exceptions_blocked & TSO_INTERRUPTIBLE) == 0) {
976 tso->flags &= ~TSO_INTERRUPTIBLE;
977 } else {
978 tso->flags |= TSO_INTERRUPTIBLE;
979 }
980
981 /* Put the newly-built THUNK on top of the stack, ready to execute
982 * when the thread restarts.
983 */
984 sp[0] = (W_)raise;
985 sp[-1] = (W_)&stg_enter_info;
986 stack->sp = sp-1;
987 tso->what_next = ThreadRunGHC;
988 goto done;
989 }
990
991 case ATOMICALLY_FRAME:
992 if (stop_at_atomically) {
993 ASSERT(tso->trec->enclosing_trec == NO_TREC);
994 stmCondemnTransaction(cap, tso -> trec);
995 stack->sp = frame - 2;
996 // The ATOMICALLY_FRAME expects to be returned a
997 // result from the transaction, which it stores in the
998 // stack frame. Hence we arrange to return a dummy
999 // result, so that the GC doesn't get upset (#3578).
1000 // Perhaps a better way would be to have a different
1001 // ATOMICALLY_FRAME instance for condemned
1002 // transactions, but I don't fully understand the
1003 // interaction with STM invariants.
1004 stack->sp[1] = (W_)&stg_NO_TREC_closure;
1005 stack->sp[0] = (W_)&stg_ret_p_info;
1006 tso->what_next = ThreadRunGHC;
1007 goto done;
1008 }
1009 else
1010 {
1011 // Freezing an STM transaction. Just aborting the
1012 // transaction would be wrong; this is what we used to
1013 // do, and it goes wrong if the ATOMICALLY_FRAME ever
1014 // gets back onto the stack again, which it will do if
1015 // the transaction is inside unsafePerformIO or
1016 // unsafeInterleaveIO and hence inside an UPDATE_FRAME.
1017 //
1018 // So we want to make it so that if the enclosing
1019 // computation is resumed, we will re-execute the
1020 // transaction. We therefore:
1021 //
1022 // 1. abort the current transaction
1023 // 3. replace the stack up to and including the
1024 // atomically frame with a closure representing
1025 // a call to "atomically x", where x is the code
1026 // of the transaction.
1027 // 4. continue stripping the stack
1028 //
1029 StgTRecHeader *trec = tso->trec;
1030 StgTRecHeader *outer = trec->enclosing_trec;
1031
1032 StgThunk *atomically;
1033 StgAtomicallyFrame *af = (StgAtomicallyFrame*)frame;
1034
1035 debugTraceCap(DEBUG_stm, cap,
1036 "raiseAsync: freezing atomically frame")
1037 stmAbortTransaction(cap, trec);
1038 stmFreeAbortedTRec(cap, trec);
1039 tso->trec = outer;
1040
1041 atomically = (StgThunk*)allocate(cap,sizeofW(StgThunk)+1);
1042 TICK_ALLOC_SE_THK(1,0);
1043 SET_HDR(atomically,&stg_atomically_info,af->header.prof.ccs);
1044 atomically->payload[0] = af->code;
1045
1046 // discard stack up to and including the ATOMICALLY_FRAME
1047 frame += sizeofW(StgAtomicallyFrame);
1048 sp = frame - 1;
1049
1050 // replace the ATOMICALLY_FRAME with call to atomically#
1051 sp[0] = (W_)atomically;
1052 continue;
1053 }
1054
1055 case CATCH_STM_FRAME:
1056 case CATCH_RETRY_FRAME:
1057 // CATCH frames within an atomically block: abort the
1058 // inner transaction and continue. Eventually we will
1059 // hit the outer transaction that will get frozen (see
1060 // above).
1061 //
1062 // In this case (unlike ordinary exceptions) we do not care
1063 // whether the transaction is valid or not because its
1064 // possible validity cannot have caused the exception
1065 // and will not be visible after the abort.
1066 {
1067 StgTRecHeader *trec = tso -> trec;
1068 StgTRecHeader *outer = trec -> enclosing_trec;
1069 debugTraceCap(DEBUG_stm, cap,
1070 "found atomically block delivering async exception");
1071 stmAbortTransaction(cap, trec);
1072 stmFreeAbortedTRec(cap, trec);
1073 tso -> trec = outer;
1074 break;
1075 };
1076
1077 default:
1078 break;
1079 }
1080
1081 // move on to the next stack frame
1082 frame += stack_frame_sizeW((StgClosure *)frame);
1083 }
1084
1085 done:
1086 IF_DEBUG(sanity, checkTSO(tso));
1087
1088 // wake it up
1089 if (tso->why_blocked != NotBlocked) {
1090 tso->why_blocked = NotBlocked;
1091 appendToRunQueue(cap,tso);
1092 }
1093
1094 return tso;
1095 }