EventLog: Factor out ensureRoomFor*Event
[ghc.git] / rts / RaiseAsync.c
1 /* ---------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 1998-2006
4 *
5 * Asynchronous exceptions
6 *
7 * --------------------------------------------------------------------------*/
8
9 #include "PosixSource.h"
10 #include "Rts.h"
11
12 #include "sm/Storage.h"
13 #include "Threads.h"
14 #include "Trace.h"
15 #include "RaiseAsync.h"
16 #include "Schedule.h"
17 #include "Updates.h"
18 #include "STM.h"
19 #include "sm/Sanity.h"
20 #include "Profiling.h"
21 #include "Messages.h"
22 #if defined(mingw32_HOST_OS)
23 #include "win32/IOManager.h"
24 #endif
25
26 static void removeFromQueues(Capability *cap, StgTSO *tso);
27
28 static void removeFromMVarBlockedQueue (StgTSO *tso);
29
30 static void throwToSendMsg (Capability *cap USED_IF_THREADS,
31 Capability *target_cap USED_IF_THREADS,
32 MessageThrowTo *msg USED_IF_THREADS);
33
34 /* -----------------------------------------------------------------------------
35 throwToSingleThreaded
36
37 This version of throwTo is safe to use if and only if one of the
38 following holds:
39
40 - !THREADED_RTS
41
42 - all the other threads in the system are stopped (eg. during GC).
43
44 - we surely own the target TSO (eg. we just took it from the
45 run queue of the current capability, or we are running it).
46
47 It doesn't cater for blocking the source thread until the exception
48 has been raised.
49 -------------------------------------------------------------------------- */
50
51 static void
52 throwToSingleThreaded__ (Capability *cap, StgTSO *tso, StgClosure *exception,
53 rtsBool stop_at_atomically, StgUpdateFrame *stop_here)
54 {
55 // Thread already dead?
56 if (tso->what_next == ThreadComplete || tso->what_next == ThreadKilled) {
57 return;
58 }
59
60 // Remove it from any blocking queues
61 removeFromQueues(cap,tso);
62
63 raiseAsync(cap, tso, exception, stop_at_atomically, stop_here);
64 }
65
66 void
67 throwToSingleThreaded (Capability *cap, StgTSO *tso, StgClosure *exception)
68 {
69 throwToSingleThreaded__(cap, tso, exception, rtsFalse, NULL);
70 }
71
72 void
73 throwToSingleThreaded_ (Capability *cap, StgTSO *tso, StgClosure *exception,
74 rtsBool stop_at_atomically)
75 {
76 throwToSingleThreaded__ (cap, tso, exception, stop_at_atomically, NULL);
77 }
78
79 void // cannot return a different TSO
80 suspendComputation (Capability *cap, StgTSO *tso, StgUpdateFrame *stop_here)
81 {
82 throwToSingleThreaded__ (cap, tso, NULL, rtsFalse, stop_here);
83 }
84
85 /* -----------------------------------------------------------------------------
86 throwToSelf
87
88 Useful for throwing an async exception in a thread from the
89 runtime. It handles unlocking the throwto message returned by
90 throwTo().
91
92 Note [Throw to self when masked]
93
94 When a StackOverflow occurs when the thread is masked, we want to
95 defer the exception to when the thread becomes unmasked/hits an
96 interruptible point. We already have a mechanism for doing this,
97 the blocked_exceptions list, but the use here is a bit unusual,
98 because an exception is normally only added to this list upon
99 an asynchronous 'throwTo' call (with all of the relevant
100 multithreaded nonsense). Morally, a stack overflow should be an
101 asynchronous exception sent by a thread to itself, and it should
102 have the same semantics. But there are a few key differences:
103
104 - If you actually tried to send an asynchronous exception to
105 yourself using throwTo, the exception would actually immediately
106 be delivered. This is because throwTo itself is considered an
107 interruptible point, so the exception is always deliverable. Thus,
108 ordinarily, we never end up with a message to onesself in the
109 blocked_exceptions queue.
110
111 - In the case of a StackOverflow, we don't actually care about the
112 wakeup semantics; when an exception is delivered, the thread that
113 originally threw the exception should be woken up, since throwTo
114 blocks until the exception is successfully thrown. Fortunately,
115 it is harmless to wakeup a thread that doesn't actually need waking
116 up, e.g. ourselves.
117
118 - No synchronization is necessary, because we own the TSO and the
119 capability. You can observe this by tracing through the execution
120 of throwTo. We skip synchronizing the message and inter-capability
121 communication.
122
123 We think this doesn't break any invariants, but do be careful!
124 -------------------------------------------------------------------------- */
125
126 void
127 throwToSelf (Capability *cap, StgTSO *tso, StgClosure *exception)
128 {
129 MessageThrowTo *m;
130
131 m = throwTo(cap, tso, tso, exception);
132
133 if (m != NULL) {
134 // throwTo leaves it locked
135 unlockClosure((StgClosure*)m, &stg_MSG_THROWTO_info);
136 }
137 }
138
139 /* -----------------------------------------------------------------------------
140 throwTo
141
142 This function may be used to throw an exception from one thread to
143 another, during the course of normal execution. This is a tricky
144 task: the target thread might be running on another CPU, or it
145 may be blocked and could be woken up at any point by another CPU.
146 We have some delicate synchronisation to do.
147
148 The underlying scheme when multiple Capabilities are in use is
149 message passing: when the target of a throwTo is on another
150 Capability, we send a message (a MessageThrowTo closure) to that
151 Capability.
152
153 If the throwTo needs to block because the target TSO is masking
154 exceptions (the TSO_BLOCKEX flag), then the message is placed on
155 the blocked_exceptions queue attached to the target TSO. When the
156 target TSO enters the unmasked state again, it must check the
157 queue. The blocked_exceptions queue is not locked; only the
158 Capability owning the TSO may modify it.
159
160 To make things simpler for throwTo, we always create the message
161 first before deciding what to do. The message may get sent, or it
162 may get attached to a TSO's blocked_exceptions queue, or the
163 exception may get thrown immediately and the message dropped,
164 depending on the current state of the target.
165
166 Currently we send a message if the target belongs to another
167 Capability, and it is
168
169 - NotBlocked, BlockedOnMsgThrowTo,
170 BlockedOnCCall_Interruptible
171
172 - or it is masking exceptions (TSO_BLOCKEX)
173
174 Currently, if the target is BlockedOnMVar, BlockedOnSTM, or
175 BlockedOnBlackHole then we acquire ownership of the TSO by locking
176 its parent container (e.g. the MVar) and then raise the exception.
177 We might change these cases to be more message-passing-like in the
178 future.
179
180 Returns:
181
182 NULL exception was raised, ok to continue
183
184 MessageThrowTo * exception was not raised; the source TSO
185 should now put itself in the state
186 BlockedOnMsgThrowTo, and when it is ready
187 it should unlock the mssage using
188 unlockClosure(msg, &stg_MSG_THROWTO_info);
189 If it decides not to raise the exception after
190 all, it can revoke it safely with
191 unlockClosure(msg, &stg_MSG_NULL_info);
192
193 -------------------------------------------------------------------------- */
194
195 MessageThrowTo *
196 throwTo (Capability *cap, // the Capability we hold
197 StgTSO *source, // the TSO sending the exception (or NULL)
198 StgTSO *target, // the TSO receiving the exception
199 StgClosure *exception) // the exception closure
200 {
201 MessageThrowTo *msg;
202
203 msg = (MessageThrowTo *) allocate(cap, sizeofW(MessageThrowTo));
204 // the message starts locked; see below
205 SET_HDR(msg, &stg_WHITEHOLE_info, CCS_SYSTEM);
206 msg->source = source;
207 msg->target = target;
208 msg->exception = exception;
209
210 switch (throwToMsg(cap, msg))
211 {
212 case THROWTO_SUCCESS:
213 // unlock the message now, otherwise we leave a WHITEHOLE in
214 // the heap (#6103)
215 SET_HDR(msg, &stg_MSG_THROWTO_info, CCS_SYSTEM);
216 return NULL;
217
218 case THROWTO_BLOCKED:
219 default:
220 // the caller will unlock the message when it is ready. We
221 // cannot unlock it yet, because the calling thread will need
222 // to tidy up its state first.
223 return msg;
224 }
225 }
226
227
228 nat
229 throwToMsg (Capability *cap, MessageThrowTo *msg)
230 {
231 StgWord status;
232 StgTSO *target = msg->target;
233 Capability *target_cap;
234
235 goto check_target;
236
237 retry:
238 write_barrier();
239 debugTrace(DEBUG_sched, "throwTo: retrying...");
240
241 check_target:
242 ASSERT(target != END_TSO_QUEUE);
243
244 // Thread already dead?
245 if (target->what_next == ThreadComplete
246 || target->what_next == ThreadKilled) {
247 return THROWTO_SUCCESS;
248 }
249
250 debugTraceCap(DEBUG_sched, cap,
251 "throwTo: from thread %lu to thread %lu",
252 (unsigned long)msg->source->id,
253 (unsigned long)msg->target->id);
254
255 #ifdef DEBUG
256 traceThreadStatus(DEBUG_sched, target);
257 #endif
258
259 target_cap = target->cap;
260 if (target->cap != cap) {
261 throwToSendMsg(cap, target_cap, msg);
262 return THROWTO_BLOCKED;
263 }
264
265 status = target->why_blocked;
266
267 switch (status) {
268 case NotBlocked:
269 {
270 if ((target->flags & TSO_BLOCKEX) == 0) {
271 // It's on our run queue and not blocking exceptions
272 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
273 return THROWTO_SUCCESS;
274 } else {
275 blockedThrowTo(cap,target,msg);
276 return THROWTO_BLOCKED;
277 }
278 }
279
280 case BlockedOnMsgThrowTo:
281 {
282 const StgInfoTable *i;
283 MessageThrowTo *m;
284
285 m = target->block_info.throwto;
286
287 // target is local to this cap, but has sent a throwto
288 // message to another cap.
289 //
290 // The source message is locked. We need to revoke the
291 // target's message so that we can raise the exception, so
292 // we attempt to lock it.
293
294 // There's a possibility of a deadlock if two threads are both
295 // trying to throwTo each other (or more generally, a cycle of
296 // threads). To break the symmetry we compare the addresses
297 // of the MessageThrowTo objects, and the one for which m <
298 // msg gets to spin, while the other can only try to lock
299 // once, but must then back off and unlock both before trying
300 // again.
301 if (m < msg) {
302 i = lockClosure((StgClosure *)m);
303 } else {
304 i = tryLockClosure((StgClosure *)m);
305 if (i == NULL) {
306 // debugBelch("collision\n");
307 throwToSendMsg(cap, target->cap, msg);
308 return THROWTO_BLOCKED;
309 }
310 }
311
312 if (i == &stg_MSG_NULL_info) {
313 // we know there's a MSG_TRY_WAKEUP on the way, so we
314 // might as well just do it now. The message will
315 // be a no-op when it arrives.
316 unlockClosure((StgClosure*)m, i);
317 tryWakeupThread(cap, target);
318 goto retry;
319 }
320
321 if (i != &stg_MSG_THROWTO_info) {
322 // if it's a MSG_NULL, this TSO has been woken up by another Cap
323 unlockClosure((StgClosure*)m, i);
324 goto retry;
325 }
326
327 if ((target->flags & TSO_BLOCKEX) &&
328 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
329 unlockClosure((StgClosure*)m, i);
330 blockedThrowTo(cap,target,msg);
331 return THROWTO_BLOCKED;
332 }
333
334 // nobody else can wake up this TSO after we claim the message
335 doneWithMsgThrowTo(m);
336
337 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
338 return THROWTO_SUCCESS;
339 }
340
341 case BlockedOnMVar:
342 case BlockedOnMVarRead:
343 {
344 /*
345 To establish ownership of this TSO, we need to acquire a
346 lock on the MVar that it is blocked on.
347 */
348 StgMVar *mvar;
349 StgInfoTable *info USED_IF_THREADS;
350
351 mvar = (StgMVar *)target->block_info.closure;
352
353 // ASSUMPTION: tso->block_info must always point to a
354 // closure. In the threaded RTS it does.
355 switch (get_itbl((StgClosure *)mvar)->type) {
356 case MVAR_CLEAN:
357 case MVAR_DIRTY:
358 break;
359 default:
360 goto retry;
361 }
362
363 info = lockClosure((StgClosure *)mvar);
364
365 // we have the MVar, let's check whether the thread
366 // is still blocked on the same MVar.
367 if ((target->why_blocked != BlockedOnMVar && target->why_blocked != BlockedOnMVarRead)
368 || (StgMVar *)target->block_info.closure != mvar) {
369 unlockClosure((StgClosure *)mvar, info);
370 goto retry;
371 }
372
373 if (target->_link == END_TSO_QUEUE) {
374 // the MVar operation has already completed. There is a
375 // MSG_TRY_WAKEUP on the way, but we can just wake up the
376 // thread now anyway and ignore the message when it
377 // arrives.
378 unlockClosure((StgClosure *)mvar, info);
379 tryWakeupThread(cap, target);
380 goto retry;
381 }
382
383 if ((target->flags & TSO_BLOCKEX) &&
384 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
385 blockedThrowTo(cap,target,msg);
386 unlockClosure((StgClosure *)mvar, info);
387 return THROWTO_BLOCKED;
388 } else {
389 // revoke the MVar operation
390 removeFromMVarBlockedQueue(target);
391 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
392 unlockClosure((StgClosure *)mvar, info);
393 return THROWTO_SUCCESS;
394 }
395 }
396
397 case BlockedOnBlackHole:
398 {
399 if (target->flags & TSO_BLOCKEX) {
400 // BlockedOnBlackHole is not interruptible.
401 blockedThrowTo(cap,target,msg);
402 return THROWTO_BLOCKED;
403 } else {
404 // Revoke the message by replacing it with IND. We're not
405 // locking anything here, so we might still get a TRY_WAKEUP
406 // message from the owner of the blackhole some time in the
407 // future, but that doesn't matter.
408 ASSERT(target->block_info.bh->header.info == &stg_MSG_BLACKHOLE_info);
409 OVERWRITE_INFO(target->block_info.bh, &stg_IND_info);
410 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
411 return THROWTO_SUCCESS;
412 }
413 }
414
415 case BlockedOnSTM:
416 lockTSO(target);
417 // Unblocking BlockedOnSTM threads requires the TSO to be
418 // locked; see STM.c:unpark_tso().
419 if (target->why_blocked != BlockedOnSTM) {
420 unlockTSO(target);
421 goto retry;
422 }
423 if ((target->flags & TSO_BLOCKEX) &&
424 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
425 blockedThrowTo(cap,target,msg);
426 unlockTSO(target);
427 return THROWTO_BLOCKED;
428 } else {
429 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
430 unlockTSO(target);
431 return THROWTO_SUCCESS;
432 }
433
434 case BlockedOnCCall_Interruptible:
435 #ifdef THREADED_RTS
436 {
437 Task *task = NULL;
438 // walk suspended_ccalls to find the correct worker thread
439 InCall *incall;
440 for (incall = cap->suspended_ccalls; incall != NULL; incall = incall->next) {
441 if (incall->suspended_tso == target) {
442 task = incall->task;
443 break;
444 }
445 }
446 if (task != NULL) {
447 blockedThrowTo(cap, target, msg);
448 if (!((target->flags & TSO_BLOCKEX) &&
449 ((target->flags & TSO_INTERRUPTIBLE) == 0))) {
450 interruptWorkerTask(task);
451 }
452 return THROWTO_BLOCKED;
453 } else {
454 debugTraceCap(DEBUG_sched, cap, "throwTo: could not find worker thread to kill");
455 }
456 // fall to next
457 }
458 #endif
459 case BlockedOnCCall:
460 blockedThrowTo(cap,target,msg);
461 return THROWTO_BLOCKED;
462
463 #ifndef THREADEDED_RTS
464 case BlockedOnRead:
465 case BlockedOnWrite:
466 case BlockedOnDelay:
467 #if defined(mingw32_HOST_OS)
468 case BlockedOnDoProc:
469 #endif
470 if ((target->flags & TSO_BLOCKEX) &&
471 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
472 blockedThrowTo(cap,target,msg);
473 return THROWTO_BLOCKED;
474 } else {
475 removeFromQueues(cap,target);
476 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
477 return THROWTO_SUCCESS;
478 }
479 #endif
480
481 case ThreadMigrating:
482 // if is is ThreadMigrating and tso->cap is ours, then it
483 // *must* be migrating *to* this capability. If it were
484 // migrating away from the capability, then tso->cap would
485 // point to the destination.
486 //
487 // There is a MSG_WAKEUP in the message queue for this thread,
488 // but we can just do it preemptively:
489 tryWakeupThread(cap, target);
490 // and now retry, the thread should be runnable.
491 goto retry;
492
493 default:
494 barf("throwTo: unrecognised why_blocked (%d)", target->why_blocked);
495 }
496 barf("throwTo");
497 }
498
499 static void
500 throwToSendMsg (Capability *cap STG_UNUSED,
501 Capability *target_cap USED_IF_THREADS,
502 MessageThrowTo *msg USED_IF_THREADS)
503
504 {
505 #ifdef THREADED_RTS
506 debugTraceCap(DEBUG_sched, cap, "throwTo: sending a throwto message to cap %lu", (unsigned long)target_cap->no);
507
508 sendMessage(cap, target_cap, (Message*)msg);
509 #endif
510 }
511
512 // Block a throwTo message on the target TSO's blocked_exceptions
513 // queue. The current Capability must own the target TSO in order to
514 // modify the blocked_exceptions queue.
515 void
516 blockedThrowTo (Capability *cap, StgTSO *target, MessageThrowTo *msg)
517 {
518 debugTraceCap(DEBUG_sched, cap, "throwTo: blocking on thread %lu",
519 (unsigned long)target->id);
520
521 ASSERT(target->cap == cap);
522
523 msg->link = target->blocked_exceptions;
524 target->blocked_exceptions = msg;
525 dirty_TSO(cap,target); // we modified the blocked_exceptions queue
526 }
527
528 /* -----------------------------------------------------------------------------
529 Waking up threads blocked in throwTo
530
531 There are two ways to do this: maybePerformBlockedException() will
532 perform the throwTo() for the thread at the head of the queue
533 immediately, and leave the other threads on the queue.
534 maybePerformBlockedException() also checks the TSO_BLOCKEX flag
535 before raising an exception.
536
537 awakenBlockedExceptionQueue() will wake up all the threads in the
538 queue, but not perform any throwTo() immediately. This might be
539 more appropriate when the target thread is the one actually running
540 (see Exception.cmm).
541
542 Returns: non-zero if an exception was raised, zero otherwise.
543 -------------------------------------------------------------------------- */
544
545 int
546 maybePerformBlockedException (Capability *cap, StgTSO *tso)
547 {
548 MessageThrowTo *msg;
549 const StgInfoTable *i;
550 StgTSO *source;
551
552 if (tso->what_next == ThreadComplete || tso->what_next == ThreadFinished) {
553 if (tso->blocked_exceptions != END_BLOCKED_EXCEPTIONS_QUEUE) {
554 awakenBlockedExceptionQueue(cap,tso);
555 return 1;
556 } else {
557 return 0;
558 }
559 }
560
561 if (tso->blocked_exceptions != END_BLOCKED_EXCEPTIONS_QUEUE &&
562 (tso->flags & TSO_BLOCKEX) != 0) {
563 debugTraceCap(DEBUG_sched, cap, "throwTo: thread %lu has blocked exceptions but is inside block", (unsigned long)tso->id);
564 }
565
566 if (tso->blocked_exceptions != END_BLOCKED_EXCEPTIONS_QUEUE
567 && ((tso->flags & TSO_BLOCKEX) == 0
568 || ((tso->flags & TSO_INTERRUPTIBLE) && interruptible(tso)))) {
569
570 // We unblock just the first thread on the queue, and perform
571 // its throw immediately.
572 loop:
573 msg = tso->blocked_exceptions;
574 if (msg == END_BLOCKED_EXCEPTIONS_QUEUE) return 0;
575 i = lockClosure((StgClosure*)msg);
576 tso->blocked_exceptions = (MessageThrowTo*)msg->link;
577 if (i == &stg_MSG_NULL_info) {
578 unlockClosure((StgClosure*)msg,i);
579 goto loop;
580 }
581
582 throwToSingleThreaded(cap, msg->target, msg->exception);
583 source = msg->source;
584 doneWithMsgThrowTo(msg);
585 tryWakeupThread(cap, source);
586 return 1;
587 }
588 return 0;
589 }
590
591 // awakenBlockedExceptionQueue(): Just wake up the whole queue of
592 // blocked exceptions.
593
594 void
595 awakenBlockedExceptionQueue (Capability *cap, StgTSO *tso)
596 {
597 MessageThrowTo *msg;
598 const StgInfoTable *i;
599 StgTSO *source;
600
601 for (msg = tso->blocked_exceptions; msg != END_BLOCKED_EXCEPTIONS_QUEUE;
602 msg = (MessageThrowTo*)msg->link) {
603 i = lockClosure((StgClosure *)msg);
604 if (i != &stg_MSG_NULL_info) {
605 source = msg->source;
606 doneWithMsgThrowTo(msg);
607 tryWakeupThread(cap, source);
608 } else {
609 unlockClosure((StgClosure *)msg,i);
610 }
611 }
612 tso->blocked_exceptions = END_BLOCKED_EXCEPTIONS_QUEUE;
613 }
614
615 /* -----------------------------------------------------------------------------
616 Remove a thread from blocking queues.
617
618 This is for use when we raise an exception in another thread, which
619 may be blocked.
620
621 Precondition: we have exclusive access to the TSO, via the same set
622 of conditions as throwToSingleThreaded() (c.f.).
623 -------------------------------------------------------------------------- */
624
625 static void
626 removeFromMVarBlockedQueue (StgTSO *tso)
627 {
628 StgMVar *mvar = (StgMVar*)tso->block_info.closure;
629 StgMVarTSOQueue *q = (StgMVarTSOQueue*)tso->_link;
630
631 if (q == (StgMVarTSOQueue*)END_TSO_QUEUE) {
632 // already removed from this MVar
633 return;
634 }
635
636 // Assume the MVar is locked. (not assertable; sometimes it isn't
637 // actually WHITEHOLE'd).
638
639 // We want to remove the MVAR_TSO_QUEUE object from the queue. It
640 // isn't doubly-linked so we can't actually remove it; instead we
641 // just overwrite it with an IND if possible and let the GC short
642 // it out. However, we have to be careful to maintain the deque
643 // structure:
644
645 if (mvar->head == q) {
646 mvar->head = q->link;
647 OVERWRITE_INFO(q, &stg_IND_info);
648 if (mvar->tail == q) {
649 mvar->tail = (StgMVarTSOQueue*)END_TSO_QUEUE;
650 }
651 }
652 else if (mvar->tail == q) {
653 // we can't replace it with an IND in this case, because then
654 // we lose the tail pointer when the GC shorts out the IND.
655 // So we use MSG_NULL as a kind of non-dupable indirection;
656 // these are ignored by takeMVar/putMVar.
657 OVERWRITE_INFO(q, &stg_MSG_NULL_info);
658 }
659 else {
660 OVERWRITE_INFO(q, &stg_IND_info);
661 }
662
663 // revoke the MVar operation
664 tso->_link = END_TSO_QUEUE;
665 }
666
667 static void
668 removeFromQueues(Capability *cap, StgTSO *tso)
669 {
670 switch (tso->why_blocked) {
671
672 case NotBlocked:
673 case ThreadMigrating:
674 return;
675
676 case BlockedOnSTM:
677 // Be careful: nothing to do here! We tell the scheduler that the
678 // thread is runnable and we leave it to the stack-walking code to
679 // abort the transaction while unwinding the stack. We should
680 // perhaps have a debugging test to make sure that this really
681 // happens and that the 'zombie' transaction does not get
682 // committed.
683 goto done;
684
685 case BlockedOnMVar:
686 case BlockedOnMVarRead:
687 removeFromMVarBlockedQueue(tso);
688 goto done;
689
690 case BlockedOnBlackHole:
691 // nothing to do
692 goto done;
693
694 case BlockedOnMsgThrowTo:
695 {
696 MessageThrowTo *m = tso->block_info.throwto;
697 // The message is locked by us, unless we got here via
698 // deleteAllThreads(), in which case we own all the
699 // capabilities.
700 // ASSERT(m->header.info == &stg_WHITEHOLE_info);
701
702 // unlock and revoke it at the same time
703 doneWithMsgThrowTo(m);
704 break;
705 }
706
707 #if !defined(THREADED_RTS)
708 case BlockedOnRead:
709 case BlockedOnWrite:
710 #if defined(mingw32_HOST_OS)
711 case BlockedOnDoProc:
712 #endif
713 removeThreadFromDeQueue(cap, &blocked_queue_hd, &blocked_queue_tl, tso);
714 #if defined(mingw32_HOST_OS)
715 /* (Cooperatively) signal that the worker thread should abort
716 * the request.
717 */
718 abandonWorkRequest(tso->block_info.async_result->reqID);
719 #endif
720 goto done;
721
722 case BlockedOnDelay:
723 removeThreadFromQueue(cap, &sleeping_queue, tso);
724 goto done;
725 #endif
726
727 default:
728 barf("removeFromQueues: %d", tso->why_blocked);
729 }
730
731 done:
732 tso->why_blocked = NotBlocked;
733 appendToRunQueue(cap, tso);
734 }
735
736 /* -----------------------------------------------------------------------------
737 * raiseAsync()
738 *
739 * The following function implements the magic for raising an
740 * asynchronous exception in an existing thread.
741 *
742 * We first remove the thread from any queue on which it might be
743 * blocked. The possible blockages are MVARs, BLOCKING_QUEUESs, and
744 * TSO blocked_exception queues.
745 *
746 * We strip the stack down to the innermost CATCH_FRAME, building
747 * thunks in the heap for all the active computations, so they can
748 * be restarted if necessary. When we reach a CATCH_FRAME, we build
749 * an application of the handler to the exception, and push it on
750 * the top of the stack.
751 *
752 * How exactly do we save all the active computations? We create an
753 * AP_STACK for every UpdateFrame on the stack. Entering one of these
754 * AP_STACKs pushes everything from the corresponding update frame
755 * upwards onto the stack. (Actually, it pushes everything up to the
756 * next update frame plus a pointer to the next AP_STACK object.
757 * Entering the next AP_STACK object pushes more onto the stack until we
758 * reach the last AP_STACK object - at which point the stack should look
759 * exactly as it did when we killed the TSO and we can continue
760 * execution by entering the closure on top of the stack.
761 *
762 * We can also kill a thread entirely - this happens if either (a) the
763 * exception passed to raiseAsync is NULL, or (b) there's no
764 * CATCH_FRAME on the stack. In either case, we strip the entire
765 * stack and replace the thread with a zombie.
766 *
767 * ToDo: in THREADED_RTS mode, this function is only safe if either
768 * (a) we hold all the Capabilities (eg. in GC, or if there is only
769 * one Capability), or (b) we own the Capability that the TSO is
770 * currently blocked on or on the run queue of.
771 *
772 * -------------------------------------------------------------------------- */
773
774 StgTSO *
775 raiseAsync(Capability *cap, StgTSO *tso, StgClosure *exception,
776 rtsBool stop_at_atomically, StgUpdateFrame *stop_here)
777 {
778 StgRetInfoTable *info;
779 StgPtr sp, frame;
780 StgClosure *updatee;
781 nat i;
782 StgStack *stack;
783
784 debugTraceCap(DEBUG_sched, cap,
785 "raising exception in thread %ld.", (long)tso->id);
786
787 #if defined(PROFILING)
788 /*
789 * Debugging tool: on raising an exception, show where we are.
790 * See also Exception.cmm:stg_raisezh.
791 * This wasn't done for asynchronous exceptions originally; see #1450
792 */
793 if (RtsFlags.ProfFlags.showCCSOnException && exception != NULL)
794 {
795 fprintCCS_stderr(tso->prof.cccs,exception,tso);
796 }
797 #endif
798 // ASSUMES: the thread is not already complete or dead
799 // Upper layers should deal with that.
800 ASSERT(tso->what_next != ThreadComplete &&
801 tso->what_next != ThreadKilled);
802
803 // only if we own this TSO (except that deleteThread() calls this
804 ASSERT(tso->cap == cap);
805
806 stack = tso->stackobj;
807
808 // mark it dirty; we're about to change its stack.
809 dirty_TSO(cap, tso);
810 dirty_STACK(cap, stack);
811
812 sp = stack->sp;
813
814 if (stop_here != NULL) {
815 updatee = stop_here->updatee;
816 } else {
817 updatee = NULL;
818 }
819
820 // The stack freezing code assumes there's a closure pointer on
821 // the top of the stack, so we have to arrange that this is the case...
822 //
823 if (sp[0] == (W_)&stg_enter_info) {
824 sp++;
825 } else {
826 sp--;
827 sp[0] = (W_)&stg_dummy_ret_closure;
828 }
829
830 frame = sp + 1;
831 while (stop_here == NULL || frame < (StgPtr)stop_here) {
832
833 // 1. Let the top of the stack be the "current closure"
834 //
835 // 2. Walk up the stack until we find either an UPDATE_FRAME or a
836 // CATCH_FRAME.
837 //
838 // 3. If it's an UPDATE_FRAME, then make an AP_STACK containing the
839 // current closure applied to the chunk of stack up to (but not
840 // including) the update frame. This closure becomes the "current
841 // closure". Go back to step 2.
842 //
843 // 4. If it's a CATCH_FRAME, then leave the exception handler on
844 // top of the stack applied to the exception.
845 //
846 // 5. If it's a STOP_FRAME, then kill the thread.
847 //
848 // 6. If it's an UNDERFLOW_FRAME, then continue with the next
849 // stack chunk.
850 //
851 // NB: if we pass an ATOMICALLY_FRAME then abort the associated
852 // transaction
853
854 info = get_ret_itbl((StgClosure *)frame);
855
856 switch (info->i.type) {
857
858 case UPDATE_FRAME:
859 {
860 StgAP_STACK * ap;
861 nat words;
862
863 // First build an AP_STACK consisting of the stack chunk above the
864 // current update frame, with the top word on the stack as the
865 // fun field.
866 //
867 words = frame - sp - 1;
868 ap = (StgAP_STACK *)allocate(cap,AP_STACK_sizeW(words));
869
870 ap->size = words;
871 ap->fun = (StgClosure *)sp[0];
872 sp++;
873 for(i=0; i < (nat)words; ++i) {
874 ap->payload[i] = (StgClosure *)*sp++;
875 }
876
877 SET_HDR(ap,&stg_AP_STACK_info,
878 ((StgClosure *)frame)->header.prof.ccs /* ToDo */);
879 TICK_ALLOC_UP_THK(WDS(words+1),0);
880
881 //IF_DEBUG(scheduler,
882 // debugBelch("sched: Updating ");
883 // printPtr((P_)((StgUpdateFrame *)frame)->updatee);
884 // debugBelch(" with ");
885 // printObj((StgClosure *)ap);
886 // );
887
888 if (((StgUpdateFrame *)frame)->updatee == updatee) {
889 // If this update frame points to the same closure as
890 // the update frame further down the stack
891 // (stop_here), then don't perform the update. We
892 // want to keep the blackhole in this case, so we can
893 // detect and report the loop (#2783).
894 ap = (StgAP_STACK*)updatee;
895 } else {
896 // Perform the update
897 // TODO: this may waste some work, if the thunk has
898 // already been updated by another thread.
899 updateThunk(cap, tso,
900 ((StgUpdateFrame *)frame)->updatee, (StgClosure *)ap);
901 }
902
903 sp += sizeofW(StgUpdateFrame) - 1;
904 sp[0] = (W_)ap; // push onto stack
905 frame = sp + 1;
906 continue; //no need to bump frame
907 }
908
909 case UNDERFLOW_FRAME:
910 {
911 StgAP_STACK * ap;
912 nat words;
913
914 // First build an AP_STACK consisting of the stack chunk above the
915 // current update frame, with the top word on the stack as the
916 // fun field.
917 //
918 words = frame - sp - 1;
919 ap = (StgAP_STACK *)allocate(cap,AP_STACK_sizeW(words));
920
921 ap->size = words;
922 ap->fun = (StgClosure *)sp[0];
923 sp++;
924 for(i=0; i < (nat)words; ++i) {
925 ap->payload[i] = (StgClosure *)*sp++;
926 }
927
928 SET_HDR(ap,&stg_AP_STACK_NOUPD_info,
929 ((StgClosure *)frame)->header.prof.ccs /* ToDo */);
930 TICK_ALLOC_SE_THK(WDS(words+1),0);
931
932 stack->sp = sp;
933 threadStackUnderflow(cap,tso);
934 stack = tso->stackobj;
935 sp = stack->sp;
936
937 sp--;
938 sp[0] = (W_)ap;
939 frame = sp + 1;
940 continue;
941 }
942
943 case STOP_FRAME:
944 {
945 // We've stripped the entire stack, the thread is now dead.
946 tso->what_next = ThreadKilled;
947 stack->sp = frame + sizeofW(StgStopFrame);
948 goto done;
949 }
950
951 case CATCH_FRAME:
952 // If we find a CATCH_FRAME, and we've got an exception to raise,
953 // then build the THUNK raise(exception), and leave it on
954 // top of the CATCH_FRAME ready to enter.
955 //
956 {
957 StgCatchFrame *cf = (StgCatchFrame *)frame;
958 StgThunk *raise;
959
960 if (exception == NULL) break;
961
962 // we've got an exception to raise, so let's pass it to the
963 // handler in this frame.
964 //
965 raise = (StgThunk *)allocate(cap,sizeofW(StgThunk)+1);
966 TICK_ALLOC_SE_THK(WDS(1),0);
967 SET_HDR(raise,&stg_raise_info,cf->header.prof.ccs);
968 raise->payload[0] = exception;
969
970 // throw away the stack from Sp up to the CATCH_FRAME.
971 //
972 sp = frame - 1;
973
974 /* Ensure that async exceptions are blocked now, so we don't get
975 * a surprise exception before we get around to executing the
976 * handler.
977 */
978 tso->flags |= TSO_BLOCKEX;
979 if ((cf->exceptions_blocked & TSO_INTERRUPTIBLE) == 0) {
980 tso->flags &= ~TSO_INTERRUPTIBLE;
981 } else {
982 tso->flags |= TSO_INTERRUPTIBLE;
983 }
984
985 /* Put the newly-built THUNK on top of the stack, ready to execute
986 * when the thread restarts.
987 */
988 sp[0] = (W_)raise;
989 sp[-1] = (W_)&stg_enter_info;
990 stack->sp = sp-1;
991 tso->what_next = ThreadRunGHC;
992 goto done;
993 }
994
995 case ATOMICALLY_FRAME:
996 if (stop_at_atomically) {
997 ASSERT(tso->trec->enclosing_trec == NO_TREC);
998 stmCondemnTransaction(cap, tso -> trec);
999 stack->sp = frame - 2;
1000 // The ATOMICALLY_FRAME expects to be returned a
1001 // result from the transaction, which it stores in the
1002 // stack frame. Hence we arrange to return a dummy
1003 // result, so that the GC doesn't get upset (#3578).
1004 // Perhaps a better way would be to have a different
1005 // ATOMICALLY_FRAME instance for condemned
1006 // transactions, but I don't fully understand the
1007 // interaction with STM invariants.
1008 stack->sp[1] = (W_)&stg_NO_TREC_closure;
1009 stack->sp[0] = (W_)&stg_ret_p_info;
1010 tso->what_next = ThreadRunGHC;
1011 goto done;
1012 }
1013 else
1014 {
1015 // Freezing an STM transaction. Just aborting the
1016 // transaction would be wrong; this is what we used to
1017 // do, and it goes wrong if the ATOMICALLY_FRAME ever
1018 // gets back onto the stack again, which it will do if
1019 // the transaction is inside unsafePerformIO or
1020 // unsafeInterleaveIO and hence inside an UPDATE_FRAME.
1021 //
1022 // So we want to make it so that if the enclosing
1023 // computation is resumed, we will re-execute the
1024 // transaction. We therefore:
1025 //
1026 // 1. abort the current transaction
1027 // 3. replace the stack up to and including the
1028 // atomically frame with a closure representing
1029 // a call to "atomically x", where x is the code
1030 // of the transaction.
1031 // 4. continue stripping the stack
1032 //
1033 StgTRecHeader *trec = tso->trec;
1034 StgTRecHeader *outer = trec->enclosing_trec;
1035
1036 StgThunk *atomically;
1037 StgAtomicallyFrame *af = (StgAtomicallyFrame*)frame;
1038
1039 debugTraceCap(DEBUG_stm, cap,
1040 "raiseAsync: freezing atomically frame")
1041 stmAbortTransaction(cap, trec);
1042 stmFreeAbortedTRec(cap, trec);
1043 tso->trec = outer;
1044
1045 atomically = (StgThunk*)allocate(cap,sizeofW(StgThunk)+1);
1046 TICK_ALLOC_SE_THK(1,0);
1047 SET_HDR(atomically,&stg_atomically_info,af->header.prof.ccs);
1048 atomically->payload[0] = af->code;
1049
1050 // discard stack up to and including the ATOMICALLY_FRAME
1051 frame += sizeofW(StgAtomicallyFrame);
1052 sp = frame - 1;
1053
1054 // replace the ATOMICALLY_FRAME with call to atomically#
1055 sp[0] = (W_)atomically;
1056 continue;
1057 }
1058
1059 case CATCH_STM_FRAME:
1060 case CATCH_RETRY_FRAME:
1061 // CATCH frames within an atomically block: abort the
1062 // inner transaction and continue. Eventually we will
1063 // hit the outer transaction that will get frozen (see
1064 // above).
1065 //
1066 // In this case (unlike ordinary exceptions) we do not care
1067 // whether the transaction is valid or not because its
1068 // possible validity cannot have caused the exception
1069 // and will not be visible after the abort.
1070 {
1071 StgTRecHeader *trec = tso -> trec;
1072 StgTRecHeader *outer = trec -> enclosing_trec;
1073 debugTraceCap(DEBUG_stm, cap,
1074 "found atomically block delivering async exception");
1075 stmAbortTransaction(cap, trec);
1076 stmFreeAbortedTRec(cap, trec);
1077 tso -> trec = outer;
1078 break;
1079 };
1080
1081 default:
1082 break;
1083 }
1084
1085 // move on to the next stack frame
1086 frame += stack_frame_sizeW((StgClosure *)frame);
1087 }
1088
1089 done:
1090 IF_DEBUG(sanity, checkTSO(tso));
1091
1092 // wake it up
1093 if (tso->why_blocked != NotBlocked) {
1094 tso->why_blocked = NotBlocked;
1095 appendToRunQueue(cap,tso);
1096 }
1097
1098 return tso;
1099 }