Fix testsuite driver for a profiling compiler
[ghc.git] / rts / RaiseAsync.c
1 /* ---------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 1998-2006
4 *
5 * Asynchronous exceptions
6 *
7 * --------------------------------------------------------------------------*/
8
9 #include "PosixSource.h"
10 #include "Rts.h"
11
12 #include "sm/Storage.h"
13 #include "Threads.h"
14 #include "Trace.h"
15 #include "RaiseAsync.h"
16 #include "Schedule.h"
17 #include "Updates.h"
18 #include "STM.h"
19 #include "sm/Sanity.h"
20 #include "Profiling.h"
21 #include "Messages.h"
22 #if defined(mingw32_HOST_OS)
23 #include "win32/IOManager.h"
24 #endif
25
26 static StgTSO* raiseAsync (Capability *cap,
27 StgTSO *tso,
28 StgClosure *exception,
29 rtsBool stop_at_atomically,
30 StgUpdateFrame *stop_here);
31
32 static void removeFromQueues(Capability *cap, StgTSO *tso);
33
34 static void removeFromMVarBlockedQueue (StgTSO *tso);
35
36 static void throwToSendMsg (Capability *cap USED_IF_THREADS,
37 Capability *target_cap USED_IF_THREADS,
38 MessageThrowTo *msg USED_IF_THREADS);
39
40 /* -----------------------------------------------------------------------------
41 throwToSingleThreaded
42
43 This version of throwTo is safe to use if and only if one of the
44 following holds:
45
46 - !THREADED_RTS
47
48 - all the other threads in the system are stopped (eg. during GC).
49
50 - we surely own the target TSO (eg. we just took it from the
51 run queue of the current capability, or we are running it).
52
53 It doesn't cater for blocking the source thread until the exception
54 has been raised.
55 -------------------------------------------------------------------------- */
56
57 static void
58 throwToSingleThreaded__ (Capability *cap, StgTSO *tso, StgClosure *exception,
59 rtsBool stop_at_atomically, StgUpdateFrame *stop_here)
60 {
61 // Thread already dead?
62 if (tso->what_next == ThreadComplete || tso->what_next == ThreadKilled) {
63 return;
64 }
65
66 // Remove it from any blocking queues
67 removeFromQueues(cap,tso);
68
69 raiseAsync(cap, tso, exception, stop_at_atomically, stop_here);
70 }
71
72 void
73 throwToSingleThreaded (Capability *cap, StgTSO *tso, StgClosure *exception)
74 {
75 throwToSingleThreaded__(cap, tso, exception, rtsFalse, NULL);
76 }
77
78 void
79 throwToSingleThreaded_ (Capability *cap, StgTSO *tso, StgClosure *exception,
80 rtsBool stop_at_atomically)
81 {
82 throwToSingleThreaded__ (cap, tso, exception, stop_at_atomically, NULL);
83 }
84
85 void // cannot return a different TSO
86 suspendComputation (Capability *cap, StgTSO *tso, StgUpdateFrame *stop_here)
87 {
88 throwToSingleThreaded__ (cap, tso, NULL, rtsFalse, stop_here);
89 }
90
91 /* -----------------------------------------------------------------------------
92 throwToSelf
93
94 Useful for throwing an async exception in a thread from the
95 runtime. It handles unlocking the throwto message returned by
96 throwTo().
97
98 Note [Throw to self when masked]
99
100 When a StackOverflow occurs when the thread is masked, we want to
101 defer the exception to when the thread becomes unmasked/hits an
102 interruptible point. We already have a mechanism for doing this,
103 the blocked_exceptions list, but the use here is a bit unusual,
104 because an exception is normally only added to this list upon
105 an asynchronous 'throwTo' call (with all of the relevant
106 multithreaded nonsense). Morally, a stack overflow should be an
107 asynchronous exception sent by a thread to itself, and it should
108 have the same semantics. But there are a few key differences:
109
110 - If you actually tried to send an asynchronous exception to
111 yourself using throwTo, the exception would actually immediately
112 be delivered. This is because throwTo itself is considered an
113 interruptible point, so the exception is always deliverable. Thus,
114 ordinarily, we never end up with a message to onesself in the
115 blocked_exceptions queue.
116
117 - In the case of a StackOverflow, we don't actually care about the
118 wakeup semantics; when an exception is delivered, the thread that
119 originally threw the exception should be woken up, since throwTo
120 blocks until the exception is successfully thrown. Fortunately,
121 it is harmless to wakeup a thread that doesn't actually need waking
122 up, e.g. ourselves.
123
124 - No synchronization is necessary, because we own the TSO and the
125 capability. You can observe this by tracing through the execution
126 of throwTo. We skip synchronizing the message and inter-capability
127 communication.
128
129 We think this doesn't break any invariants, but do be careful!
130 -------------------------------------------------------------------------- */
131
132 void
133 throwToSelf (Capability *cap, StgTSO *tso, StgClosure *exception)
134 {
135 MessageThrowTo *m;
136
137 m = throwTo(cap, tso, tso, exception);
138
139 if (m != NULL) {
140 // throwTo leaves it locked
141 unlockClosure((StgClosure*)m, &stg_MSG_THROWTO_info);
142 }
143 }
144
145 /* -----------------------------------------------------------------------------
146 throwTo
147
148 This function may be used to throw an exception from one thread to
149 another, during the course of normal execution. This is a tricky
150 task: the target thread might be running on another CPU, or it
151 may be blocked and could be woken up at any point by another CPU.
152 We have some delicate synchronisation to do.
153
154 The underlying scheme when multiple Capabilities are in use is
155 message passing: when the target of a throwTo is on another
156 Capability, we send a message (a MessageThrowTo closure) to that
157 Capability.
158
159 If the throwTo needs to block because the target TSO is masking
160 exceptions (the TSO_BLOCKEX flag), then the message is placed on
161 the blocked_exceptions queue attached to the target TSO. When the
162 target TSO enters the unmasked state again, it must check the
163 queue. The blocked_exceptions queue is not locked; only the
164 Capability owning the TSO may modify it.
165
166 To make things simpler for throwTo, we always create the message
167 first before deciding what to do. The message may get sent, or it
168 may get attached to a TSO's blocked_exceptions queue, or the
169 exception may get thrown immediately and the message dropped,
170 depending on the current state of the target.
171
172 Currently we send a message if the target belongs to another
173 Capability, and it is
174
175 - NotBlocked, BlockedOnMsgThrowTo,
176 BlockedOnCCall_Interruptible
177
178 - or it is masking exceptions (TSO_BLOCKEX)
179
180 Currently, if the target is BlockedOnMVar, BlockedOnSTM, or
181 BlockedOnBlackHole then we acquire ownership of the TSO by locking
182 its parent container (e.g. the MVar) and then raise the exception.
183 We might change these cases to be more message-passing-like in the
184 future.
185
186 Returns:
187
188 NULL exception was raised, ok to continue
189
190 MessageThrowTo * exception was not raised; the source TSO
191 should now put itself in the state
192 BlockedOnMsgThrowTo, and when it is ready
193 it should unlock the mssage using
194 unlockClosure(msg, &stg_MSG_THROWTO_info);
195 If it decides not to raise the exception after
196 all, it can revoke it safely with
197 unlockClosure(msg, &stg_MSG_NULL_info);
198
199 -------------------------------------------------------------------------- */
200
201 MessageThrowTo *
202 throwTo (Capability *cap, // the Capability we hold
203 StgTSO *source, // the TSO sending the exception (or NULL)
204 StgTSO *target, // the TSO receiving the exception
205 StgClosure *exception) // the exception closure
206 {
207 MessageThrowTo *msg;
208
209 msg = (MessageThrowTo *) allocate(cap, sizeofW(MessageThrowTo));
210 // the message starts locked; see below
211 SET_HDR(msg, &stg_WHITEHOLE_info, CCS_SYSTEM);
212 msg->source = source;
213 msg->target = target;
214 msg->exception = exception;
215
216 switch (throwToMsg(cap, msg))
217 {
218 case THROWTO_SUCCESS:
219 // unlock the message now, otherwise we leave a WHITEHOLE in
220 // the heap (#6103)
221 SET_HDR(msg, &stg_MSG_THROWTO_info, CCS_SYSTEM);
222 return NULL;
223
224 case THROWTO_BLOCKED:
225 default:
226 // the caller will unlock the message when it is ready. We
227 // cannot unlock it yet, because the calling thread will need
228 // to tidy up its state first.
229 return msg;
230 }
231 }
232
233
234 nat
235 throwToMsg (Capability *cap, MessageThrowTo *msg)
236 {
237 StgWord status;
238 StgTSO *target = msg->target;
239 Capability *target_cap;
240
241 goto check_target;
242
243 retry:
244 write_barrier();
245 debugTrace(DEBUG_sched, "throwTo: retrying...");
246
247 check_target:
248 ASSERT(target != END_TSO_QUEUE);
249
250 // Thread already dead?
251 if (target->what_next == ThreadComplete
252 || target->what_next == ThreadKilled) {
253 return THROWTO_SUCCESS;
254 }
255
256 debugTraceCap(DEBUG_sched, cap,
257 "throwTo: from thread %lu to thread %lu",
258 (unsigned long)msg->source->id,
259 (unsigned long)msg->target->id);
260
261 #ifdef DEBUG
262 traceThreadStatus(DEBUG_sched, target);
263 #endif
264
265 target_cap = target->cap;
266 if (target->cap != cap) {
267 throwToSendMsg(cap, target_cap, msg);
268 return THROWTO_BLOCKED;
269 }
270
271 status = target->why_blocked;
272
273 switch (status) {
274 case NotBlocked:
275 {
276 if ((target->flags & TSO_BLOCKEX) == 0) {
277 // It's on our run queue and not blocking exceptions
278 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
279 return THROWTO_SUCCESS;
280 } else {
281 blockedThrowTo(cap,target,msg);
282 return THROWTO_BLOCKED;
283 }
284 }
285
286 case BlockedOnMsgThrowTo:
287 {
288 const StgInfoTable *i;
289 MessageThrowTo *m;
290
291 m = target->block_info.throwto;
292
293 // target is local to this cap, but has sent a throwto
294 // message to another cap.
295 //
296 // The source message is locked. We need to revoke the
297 // target's message so that we can raise the exception, so
298 // we attempt to lock it.
299
300 // There's a possibility of a deadlock if two threads are both
301 // trying to throwTo each other (or more generally, a cycle of
302 // threads). To break the symmetry we compare the addresses
303 // of the MessageThrowTo objects, and the one for which m <
304 // msg gets to spin, while the other can only try to lock
305 // once, but must then back off and unlock both before trying
306 // again.
307 if (m < msg) {
308 i = lockClosure((StgClosure *)m);
309 } else {
310 i = tryLockClosure((StgClosure *)m);
311 if (i == NULL) {
312 // debugBelch("collision\n");
313 throwToSendMsg(cap, target->cap, msg);
314 return THROWTO_BLOCKED;
315 }
316 }
317
318 if (i == &stg_MSG_NULL_info) {
319 // we know there's a MSG_TRY_WAKEUP on the way, so we
320 // might as well just do it now. The message will
321 // be a no-op when it arrives.
322 unlockClosure((StgClosure*)m, i);
323 tryWakeupThread(cap, target);
324 goto retry;
325 }
326
327 if (i != &stg_MSG_THROWTO_info) {
328 // if it's a MSG_NULL, this TSO has been woken up by another Cap
329 unlockClosure((StgClosure*)m, i);
330 goto retry;
331 }
332
333 if ((target->flags & TSO_BLOCKEX) &&
334 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
335 unlockClosure((StgClosure*)m, i);
336 blockedThrowTo(cap,target,msg);
337 return THROWTO_BLOCKED;
338 }
339
340 // nobody else can wake up this TSO after we claim the message
341 doneWithMsgThrowTo(m);
342
343 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
344 return THROWTO_SUCCESS;
345 }
346
347 case BlockedOnMVar:
348 case BlockedOnMVarRead:
349 {
350 /*
351 To establish ownership of this TSO, we need to acquire a
352 lock on the MVar that it is blocked on.
353 */
354 StgMVar *mvar;
355 StgInfoTable *info USED_IF_THREADS;
356
357 mvar = (StgMVar *)target->block_info.closure;
358
359 // ASSUMPTION: tso->block_info must always point to a
360 // closure. In the threaded RTS it does.
361 switch (get_itbl((StgClosure *)mvar)->type) {
362 case MVAR_CLEAN:
363 case MVAR_DIRTY:
364 break;
365 default:
366 goto retry;
367 }
368
369 info = lockClosure((StgClosure *)mvar);
370
371 // we have the MVar, let's check whether the thread
372 // is still blocked on the same MVar.
373 if ((target->why_blocked != BlockedOnMVar && target->why_blocked != BlockedOnMVarRead)
374 || (StgMVar *)target->block_info.closure != mvar) {
375 unlockClosure((StgClosure *)mvar, info);
376 goto retry;
377 }
378
379 if (target->_link == END_TSO_QUEUE) {
380 // the MVar operation has already completed. There is a
381 // MSG_TRY_WAKEUP on the way, but we can just wake up the
382 // thread now anyway and ignore the message when it
383 // arrives.
384 unlockClosure((StgClosure *)mvar, info);
385 tryWakeupThread(cap, target);
386 goto retry;
387 }
388
389 if ((target->flags & TSO_BLOCKEX) &&
390 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
391 blockedThrowTo(cap,target,msg);
392 unlockClosure((StgClosure *)mvar, info);
393 return THROWTO_BLOCKED;
394 } else {
395 // revoke the MVar operation
396 removeFromMVarBlockedQueue(target);
397 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
398 unlockClosure((StgClosure *)mvar, info);
399 return THROWTO_SUCCESS;
400 }
401 }
402
403 case BlockedOnBlackHole:
404 {
405 if (target->flags & TSO_BLOCKEX) {
406 // BlockedOnBlackHole is not interruptible.
407 blockedThrowTo(cap,target,msg);
408 return THROWTO_BLOCKED;
409 } else {
410 // Revoke the message by replacing it with IND. We're not
411 // locking anything here, so we might still get a TRY_WAKEUP
412 // message from the owner of the blackhole some time in the
413 // future, but that doesn't matter.
414 ASSERT(target->block_info.bh->header.info == &stg_MSG_BLACKHOLE_info);
415 OVERWRITE_INFO(target->block_info.bh, &stg_IND_info);
416 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
417 return THROWTO_SUCCESS;
418 }
419 }
420
421 case BlockedOnSTM:
422 lockTSO(target);
423 // Unblocking BlockedOnSTM threads requires the TSO to be
424 // locked; see STM.c:unpark_tso().
425 if (target->why_blocked != BlockedOnSTM) {
426 unlockTSO(target);
427 goto retry;
428 }
429 if ((target->flags & TSO_BLOCKEX) &&
430 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
431 blockedThrowTo(cap,target,msg);
432 unlockTSO(target);
433 return THROWTO_BLOCKED;
434 } else {
435 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
436 unlockTSO(target);
437 return THROWTO_SUCCESS;
438 }
439
440 case BlockedOnCCall_Interruptible:
441 #ifdef THREADED_RTS
442 {
443 Task *task = NULL;
444 // walk suspended_ccalls to find the correct worker thread
445 InCall *incall;
446 for (incall = cap->suspended_ccalls; incall != NULL; incall = incall->next) {
447 if (incall->suspended_tso == target) {
448 task = incall->task;
449 break;
450 }
451 }
452 if (task != NULL) {
453 blockedThrowTo(cap, target, msg);
454 if (!((target->flags & TSO_BLOCKEX) &&
455 ((target->flags & TSO_INTERRUPTIBLE) == 0))) {
456 interruptWorkerTask(task);
457 }
458 return THROWTO_BLOCKED;
459 } else {
460 debugTraceCap(DEBUG_sched, cap, "throwTo: could not find worker thread to kill");
461 }
462 // fall to next
463 }
464 #endif
465 case BlockedOnCCall:
466 blockedThrowTo(cap,target,msg);
467 return THROWTO_BLOCKED;
468
469 #ifndef THREADEDED_RTS
470 case BlockedOnRead:
471 case BlockedOnWrite:
472 case BlockedOnDelay:
473 #if defined(mingw32_HOST_OS)
474 case BlockedOnDoProc:
475 #endif
476 if ((target->flags & TSO_BLOCKEX) &&
477 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
478 blockedThrowTo(cap,target,msg);
479 return THROWTO_BLOCKED;
480 } else {
481 removeFromQueues(cap,target);
482 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
483 return THROWTO_SUCCESS;
484 }
485 #endif
486
487 case ThreadMigrating:
488 // if is is ThreadMigrating and tso->cap is ours, then it
489 // *must* be migrating *to* this capability. If it were
490 // migrating away from the capability, then tso->cap would
491 // point to the destination.
492 //
493 // There is a MSG_WAKEUP in the message queue for this thread,
494 // but we can just do it preemptively:
495 tryWakeupThread(cap, target);
496 // and now retry, the thread should be runnable.
497 goto retry;
498
499 default:
500 barf("throwTo: unrecognised why_blocked (%d)", target->why_blocked);
501 }
502 barf("throwTo");
503 }
504
505 static void
506 throwToSendMsg (Capability *cap STG_UNUSED,
507 Capability *target_cap USED_IF_THREADS,
508 MessageThrowTo *msg USED_IF_THREADS)
509
510 {
511 #ifdef THREADED_RTS
512 debugTraceCap(DEBUG_sched, cap, "throwTo: sending a throwto message to cap %lu", (unsigned long)target_cap->no);
513
514 sendMessage(cap, target_cap, (Message*)msg);
515 #endif
516 }
517
518 // Block a throwTo message on the target TSO's blocked_exceptions
519 // queue. The current Capability must own the target TSO in order to
520 // modify the blocked_exceptions queue.
521 void
522 blockedThrowTo (Capability *cap, StgTSO *target, MessageThrowTo *msg)
523 {
524 debugTraceCap(DEBUG_sched, cap, "throwTo: blocking on thread %lu",
525 (unsigned long)target->id);
526
527 ASSERT(target->cap == cap);
528
529 msg->link = target->blocked_exceptions;
530 target->blocked_exceptions = msg;
531 dirty_TSO(cap,target); // we modified the blocked_exceptions queue
532 }
533
534 /* -----------------------------------------------------------------------------
535 Waking up threads blocked in throwTo
536
537 There are two ways to do this: maybePerformBlockedException() will
538 perform the throwTo() for the thread at the head of the queue
539 immediately, and leave the other threads on the queue.
540 maybePerformBlockedException() also checks the TSO_BLOCKEX flag
541 before raising an exception.
542
543 awakenBlockedExceptionQueue() will wake up all the threads in the
544 queue, but not perform any throwTo() immediately. This might be
545 more appropriate when the target thread is the one actually running
546 (see Exception.cmm).
547
548 Returns: non-zero if an exception was raised, zero otherwise.
549 -------------------------------------------------------------------------- */
550
551 int
552 maybePerformBlockedException (Capability *cap, StgTSO *tso)
553 {
554 MessageThrowTo *msg;
555 const StgInfoTable *i;
556 StgTSO *source;
557
558 if (tso->what_next == ThreadComplete || tso->what_next == ThreadFinished) {
559 if (tso->blocked_exceptions != END_BLOCKED_EXCEPTIONS_QUEUE) {
560 awakenBlockedExceptionQueue(cap,tso);
561 return 1;
562 } else {
563 return 0;
564 }
565 }
566
567 if (tso->blocked_exceptions != END_BLOCKED_EXCEPTIONS_QUEUE &&
568 (tso->flags & TSO_BLOCKEX) != 0) {
569 debugTraceCap(DEBUG_sched, cap, "throwTo: thread %lu has blocked exceptions but is inside block", (unsigned long)tso->id);
570 }
571
572 if (tso->blocked_exceptions != END_BLOCKED_EXCEPTIONS_QUEUE
573 && ((tso->flags & TSO_BLOCKEX) == 0
574 || ((tso->flags & TSO_INTERRUPTIBLE) && interruptible(tso)))) {
575
576 // We unblock just the first thread on the queue, and perform
577 // its throw immediately.
578 loop:
579 msg = tso->blocked_exceptions;
580 if (msg == END_BLOCKED_EXCEPTIONS_QUEUE) return 0;
581 i = lockClosure((StgClosure*)msg);
582 tso->blocked_exceptions = (MessageThrowTo*)msg->link;
583 if (i == &stg_MSG_NULL_info) {
584 unlockClosure((StgClosure*)msg,i);
585 goto loop;
586 }
587
588 throwToSingleThreaded(cap, msg->target, msg->exception);
589 source = msg->source;
590 doneWithMsgThrowTo(msg);
591 tryWakeupThread(cap, source);
592 return 1;
593 }
594 return 0;
595 }
596
597 // awakenBlockedExceptionQueue(): Just wake up the whole queue of
598 // blocked exceptions.
599
600 void
601 awakenBlockedExceptionQueue (Capability *cap, StgTSO *tso)
602 {
603 MessageThrowTo *msg;
604 const StgInfoTable *i;
605 StgTSO *source;
606
607 for (msg = tso->blocked_exceptions; msg != END_BLOCKED_EXCEPTIONS_QUEUE;
608 msg = (MessageThrowTo*)msg->link) {
609 i = lockClosure((StgClosure *)msg);
610 if (i != &stg_MSG_NULL_info) {
611 source = msg->source;
612 doneWithMsgThrowTo(msg);
613 tryWakeupThread(cap, source);
614 } else {
615 unlockClosure((StgClosure *)msg,i);
616 }
617 }
618 tso->blocked_exceptions = END_BLOCKED_EXCEPTIONS_QUEUE;
619 }
620
621 /* -----------------------------------------------------------------------------
622 Remove a thread from blocking queues.
623
624 This is for use when we raise an exception in another thread, which
625 may be blocked.
626
627 Precondition: we have exclusive access to the TSO, via the same set
628 of conditions as throwToSingleThreaded() (c.f.).
629 -------------------------------------------------------------------------- */
630
631 static void
632 removeFromMVarBlockedQueue (StgTSO *tso)
633 {
634 StgMVar *mvar = (StgMVar*)tso->block_info.closure;
635 StgMVarTSOQueue *q = (StgMVarTSOQueue*)tso->_link;
636
637 if (q == (StgMVarTSOQueue*)END_TSO_QUEUE) {
638 // already removed from this MVar
639 return;
640 }
641
642 // Assume the MVar is locked. (not assertable; sometimes it isn't
643 // actually WHITEHOLE'd).
644
645 // We want to remove the MVAR_TSO_QUEUE object from the queue. It
646 // isn't doubly-linked so we can't actually remove it; instead we
647 // just overwrite it with an IND if possible and let the GC short
648 // it out. However, we have to be careful to maintain the deque
649 // structure:
650
651 if (mvar->head == q) {
652 mvar->head = q->link;
653 OVERWRITE_INFO(q, &stg_IND_info);
654 if (mvar->tail == q) {
655 mvar->tail = (StgMVarTSOQueue*)END_TSO_QUEUE;
656 }
657 }
658 else if (mvar->tail == q) {
659 // we can't replace it with an IND in this case, because then
660 // we lose the tail pointer when the GC shorts out the IND.
661 // So we use MSG_NULL as a kind of non-dupable indirection;
662 // these are ignored by takeMVar/putMVar.
663 OVERWRITE_INFO(q, &stg_MSG_NULL_info);
664 }
665 else {
666 OVERWRITE_INFO(q, &stg_IND_info);
667 }
668
669 // revoke the MVar operation
670 tso->_link = END_TSO_QUEUE;
671 }
672
673 static void
674 removeFromQueues(Capability *cap, StgTSO *tso)
675 {
676 switch (tso->why_blocked) {
677
678 case NotBlocked:
679 case ThreadMigrating:
680 return;
681
682 case BlockedOnSTM:
683 // Be careful: nothing to do here! We tell the scheduler that the
684 // thread is runnable and we leave it to the stack-walking code to
685 // abort the transaction while unwinding the stack. We should
686 // perhaps have a debugging test to make sure that this really
687 // happens and that the 'zombie' transaction does not get
688 // committed.
689 goto done;
690
691 case BlockedOnMVar:
692 case BlockedOnMVarRead:
693 removeFromMVarBlockedQueue(tso);
694 goto done;
695
696 case BlockedOnBlackHole:
697 // nothing to do
698 goto done;
699
700 case BlockedOnMsgThrowTo:
701 {
702 MessageThrowTo *m = tso->block_info.throwto;
703 // The message is locked by us, unless we got here via
704 // deleteAllThreads(), in which case we own all the
705 // capabilities.
706 // ASSERT(m->header.info == &stg_WHITEHOLE_info);
707
708 // unlock and revoke it at the same time
709 doneWithMsgThrowTo(m);
710 break;
711 }
712
713 #if !defined(THREADED_RTS)
714 case BlockedOnRead:
715 case BlockedOnWrite:
716 #if defined(mingw32_HOST_OS)
717 case BlockedOnDoProc:
718 #endif
719 removeThreadFromDeQueue(cap, &blocked_queue_hd, &blocked_queue_tl, tso);
720 #if defined(mingw32_HOST_OS)
721 /* (Cooperatively) signal that the worker thread should abort
722 * the request.
723 */
724 abandonWorkRequest(tso->block_info.async_result->reqID);
725 #endif
726 goto done;
727
728 case BlockedOnDelay:
729 removeThreadFromQueue(cap, &sleeping_queue, tso);
730 goto done;
731 #endif
732
733 default:
734 barf("removeFromQueues: %d", tso->why_blocked);
735 }
736
737 done:
738 tso->why_blocked = NotBlocked;
739 appendToRunQueue(cap, tso);
740 }
741
742 /* -----------------------------------------------------------------------------
743 * raiseAsync()
744 *
745 * The following function implements the magic for raising an
746 * asynchronous exception in an existing thread.
747 *
748 * We first remove the thread from any queue on which it might be
749 * blocked. The possible blockages are MVARs, BLOCKING_QUEUESs, and
750 * TSO blocked_exception queues.
751 *
752 * We strip the stack down to the innermost CATCH_FRAME, building
753 * thunks in the heap for all the active computations, so they can
754 * be restarted if necessary. When we reach a CATCH_FRAME, we build
755 * an application of the handler to the exception, and push it on
756 * the top of the stack.
757 *
758 * How exactly do we save all the active computations? We create an
759 * AP_STACK for every UpdateFrame on the stack. Entering one of these
760 * AP_STACKs pushes everything from the corresponding update frame
761 * upwards onto the stack. (Actually, it pushes everything up to the
762 * next update frame plus a pointer to the next AP_STACK object.
763 * Entering the next AP_STACK object pushes more onto the stack until we
764 * reach the last AP_STACK object - at which point the stack should look
765 * exactly as it did when we killed the TSO and we can continue
766 * execution by entering the closure on top of the stack.
767 *
768 * We can also kill a thread entirely - this happens if either (a) the
769 * exception passed to raiseAsync is NULL, or (b) there's no
770 * CATCH_FRAME on the stack. In either case, we strip the entire
771 * stack and replace the thread with a zombie.
772 *
773 * ToDo: in THREADED_RTS mode, this function is only safe if either
774 * (a) we hold all the Capabilities (eg. in GC, or if there is only
775 * one Capability), or (b) we own the Capability that the TSO is
776 * currently blocked on or on the run queue of.
777 *
778 * -------------------------------------------------------------------------- */
779
780 static StgTSO *
781 raiseAsync(Capability *cap, StgTSO *tso, StgClosure *exception,
782 rtsBool stop_at_atomically, StgUpdateFrame *stop_here)
783 {
784 StgRetInfoTable *info;
785 StgPtr sp, frame;
786 StgClosure *updatee;
787 nat i;
788 StgStack *stack;
789
790 debugTraceCap(DEBUG_sched, cap,
791 "raising exception in thread %ld.", (long)tso->id);
792
793 #if defined(PROFILING)
794 /*
795 * Debugging tool: on raising an exception, show where we are.
796 * See also Exception.cmm:stg_raisezh.
797 * This wasn't done for asynchronous exceptions originally; see #1450
798 */
799 if (RtsFlags.ProfFlags.showCCSOnException && exception != NULL)
800 {
801 fprintCCS_stderr(tso->prof.cccs,exception,tso);
802 }
803 #endif
804 // ASSUMES: the thread is not already complete or dead
805 // Upper layers should deal with that.
806 ASSERT(tso->what_next != ThreadComplete &&
807 tso->what_next != ThreadKilled);
808
809 // only if we own this TSO (except that deleteThread() calls this
810 ASSERT(tso->cap == cap);
811
812 stack = tso->stackobj;
813
814 // mark it dirty; we're about to change its stack.
815 dirty_TSO(cap, tso);
816 dirty_STACK(cap, stack);
817
818 sp = stack->sp;
819
820 if (stop_here != NULL) {
821 updatee = stop_here->updatee;
822 } else {
823 updatee = NULL;
824 }
825
826 // The stack freezing code assumes there's a closure pointer on
827 // the top of the stack, so we have to arrange that this is the case...
828 //
829 if (sp[0] == (W_)&stg_enter_info) {
830 sp++;
831 } else {
832 sp--;
833 sp[0] = (W_)&stg_dummy_ret_closure;
834 }
835
836 frame = sp + 1;
837 while (stop_here == NULL || frame < (StgPtr)stop_here) {
838
839 // 1. Let the top of the stack be the "current closure"
840 //
841 // 2. Walk up the stack until we find either an UPDATE_FRAME or a
842 // CATCH_FRAME.
843 //
844 // 3. If it's an UPDATE_FRAME, then make an AP_STACK containing the
845 // current closure applied to the chunk of stack up to (but not
846 // including) the update frame. This closure becomes the "current
847 // closure". Go back to step 2.
848 //
849 // 4. If it's a CATCH_FRAME, then leave the exception handler on
850 // top of the stack applied to the exception.
851 //
852 // 5. If it's a STOP_FRAME, then kill the thread.
853 //
854 // 6. If it's an UNDERFLOW_FRAME, then continue with the next
855 // stack chunk.
856 //
857 // NB: if we pass an ATOMICALLY_FRAME then abort the associated
858 // transaction
859
860 info = get_ret_itbl((StgClosure *)frame);
861
862 switch (info->i.type) {
863
864 case UPDATE_FRAME:
865 {
866 StgAP_STACK * ap;
867 nat words;
868
869 // First build an AP_STACK consisting of the stack chunk above the
870 // current update frame, with the top word on the stack as the
871 // fun field.
872 //
873 words = frame - sp - 1;
874 ap = (StgAP_STACK *)allocate(cap,AP_STACK_sizeW(words));
875
876 ap->size = words;
877 ap->fun = (StgClosure *)sp[0];
878 sp++;
879 for(i=0; i < (nat)words; ++i) {
880 ap->payload[i] = (StgClosure *)*sp++;
881 }
882
883 SET_HDR(ap,&stg_AP_STACK_info,
884 ((StgClosure *)frame)->header.prof.ccs /* ToDo */);
885 TICK_ALLOC_UP_THK(WDS(words+1),0);
886
887 //IF_DEBUG(scheduler,
888 // debugBelch("sched: Updating ");
889 // printPtr((P_)((StgUpdateFrame *)frame)->updatee);
890 // debugBelch(" with ");
891 // printObj((StgClosure *)ap);
892 // );
893
894 if (((StgUpdateFrame *)frame)->updatee == updatee) {
895 // If this update frame points to the same closure as
896 // the update frame further down the stack
897 // (stop_here), then don't perform the update. We
898 // want to keep the blackhole in this case, so we can
899 // detect and report the loop (#2783).
900 ap = (StgAP_STACK*)updatee;
901 } else {
902 // Perform the update
903 // TODO: this may waste some work, if the thunk has
904 // already been updated by another thread.
905 updateThunk(cap, tso,
906 ((StgUpdateFrame *)frame)->updatee, (StgClosure *)ap);
907 }
908
909 sp += sizeofW(StgUpdateFrame) - 1;
910 sp[0] = (W_)ap; // push onto stack
911 frame = sp + 1;
912 continue; //no need to bump frame
913 }
914
915 case UNDERFLOW_FRAME:
916 {
917 StgAP_STACK * ap;
918 nat words;
919
920 // First build an AP_STACK consisting of the stack chunk above the
921 // current update frame, with the top word on the stack as the
922 // fun field.
923 //
924 words = frame - sp - 1;
925 ap = (StgAP_STACK *)allocate(cap,AP_STACK_sizeW(words));
926
927 ap->size = words;
928 ap->fun = (StgClosure *)sp[0];
929 sp++;
930 for(i=0; i < (nat)words; ++i) {
931 ap->payload[i] = (StgClosure *)*sp++;
932 }
933
934 SET_HDR(ap,&stg_AP_STACK_NOUPD_info,
935 ((StgClosure *)frame)->header.prof.ccs /* ToDo */);
936 TICK_ALLOC_SE_THK(WDS(words+1),0);
937
938 stack->sp = sp;
939 threadStackUnderflow(cap,tso);
940 stack = tso->stackobj;
941 sp = stack->sp;
942
943 sp--;
944 sp[0] = (W_)ap;
945 frame = sp + 1;
946 continue;
947 }
948
949 case STOP_FRAME:
950 {
951 // We've stripped the entire stack, the thread is now dead.
952 tso->what_next = ThreadKilled;
953 stack->sp = frame + sizeofW(StgStopFrame);
954 goto done;
955 }
956
957 case CATCH_FRAME:
958 // If we find a CATCH_FRAME, and we've got an exception to raise,
959 // then build the THUNK raise(exception), and leave it on
960 // top of the CATCH_FRAME ready to enter.
961 //
962 {
963 StgCatchFrame *cf = (StgCatchFrame *)frame;
964 StgThunk *raise;
965
966 if (exception == NULL) break;
967
968 // we've got an exception to raise, so let's pass it to the
969 // handler in this frame.
970 //
971 raise = (StgThunk *)allocate(cap,sizeofW(StgThunk)+1);
972 TICK_ALLOC_SE_THK(WDS(1),0);
973 SET_HDR(raise,&stg_raise_info,cf->header.prof.ccs);
974 raise->payload[0] = exception;
975
976 // throw away the stack from Sp up to the CATCH_FRAME.
977 //
978 sp = frame - 1;
979
980 /* Ensure that async exceptions are blocked now, so we don't get
981 * a surprise exception before we get around to executing the
982 * handler.
983 */
984 tso->flags |= TSO_BLOCKEX;
985 if ((cf->exceptions_blocked & TSO_INTERRUPTIBLE) == 0) {
986 tso->flags &= ~TSO_INTERRUPTIBLE;
987 } else {
988 tso->flags |= TSO_INTERRUPTIBLE;
989 }
990
991 /* Put the newly-built THUNK on top of the stack, ready to execute
992 * when the thread restarts.
993 */
994 sp[0] = (W_)raise;
995 sp[-1] = (W_)&stg_enter_info;
996 stack->sp = sp-1;
997 tso->what_next = ThreadRunGHC;
998 goto done;
999 }
1000
1001 case ATOMICALLY_FRAME:
1002 if (stop_at_atomically) {
1003 ASSERT(tso->trec->enclosing_trec == NO_TREC);
1004 stmCondemnTransaction(cap, tso -> trec);
1005 stack->sp = frame - 2;
1006 // The ATOMICALLY_FRAME expects to be returned a
1007 // result from the transaction, which it stores in the
1008 // stack frame. Hence we arrange to return a dummy
1009 // result, so that the GC doesn't get upset (#3578).
1010 // Perhaps a better way would be to have a different
1011 // ATOMICALLY_FRAME instance for condemned
1012 // transactions, but I don't fully understand the
1013 // interaction with STM invariants.
1014 stack->sp[1] = (W_)&stg_NO_TREC_closure;
1015 stack->sp[0] = (W_)&stg_ret_p_info;
1016 tso->what_next = ThreadRunGHC;
1017 goto done;
1018 }
1019 else
1020 {
1021 // Freezing an STM transaction. Just aborting the
1022 // transaction would be wrong; this is what we used to
1023 // do, and it goes wrong if the ATOMICALLY_FRAME ever
1024 // gets back onto the stack again, which it will do if
1025 // the transaction is inside unsafePerformIO or
1026 // unsafeInterleaveIO and hence inside an UPDATE_FRAME.
1027 //
1028 // So we want to make it so that if the enclosing
1029 // computation is resumed, we will re-execute the
1030 // transaction. We therefore:
1031 //
1032 // 1. abort the current transaction
1033 // 3. replace the stack up to and including the
1034 // atomically frame with a closure representing
1035 // a call to "atomically x", where x is the code
1036 // of the transaction.
1037 // 4. continue stripping the stack
1038 //
1039 StgTRecHeader *trec = tso->trec;
1040 StgTRecHeader *outer = trec->enclosing_trec;
1041
1042 StgThunk *atomically;
1043 StgAtomicallyFrame *af = (StgAtomicallyFrame*)frame;
1044
1045 debugTraceCap(DEBUG_stm, cap,
1046 "raiseAsync: freezing atomically frame")
1047 stmAbortTransaction(cap, trec);
1048 stmFreeAbortedTRec(cap, trec);
1049 tso->trec = outer;
1050
1051 atomically = (StgThunk*)allocate(cap,sizeofW(StgThunk)+1);
1052 TICK_ALLOC_SE_THK(1,0);
1053 SET_HDR(atomically,&stg_atomically_info,af->header.prof.ccs);
1054 atomically->payload[0] = af->code;
1055
1056 // discard stack up to and including the ATOMICALLY_FRAME
1057 frame += sizeofW(StgAtomicallyFrame);
1058 sp = frame - 1;
1059
1060 // replace the ATOMICALLY_FRAME with call to atomically#
1061 sp[0] = (W_)atomically;
1062 continue;
1063 }
1064
1065 case CATCH_STM_FRAME:
1066 case CATCH_RETRY_FRAME:
1067 // CATCH frames within an atomically block: abort the
1068 // inner transaction and continue. Eventually we will
1069 // hit the outer transaction that will get frozen (see
1070 // above).
1071 //
1072 // In this case (unlike ordinary exceptions) we do not care
1073 // whether the transaction is valid or not because its
1074 // possible validity cannot have caused the exception
1075 // and will not be visible after the abort.
1076 {
1077 StgTRecHeader *trec = tso -> trec;
1078 StgTRecHeader *outer = trec -> enclosing_trec;
1079 debugTraceCap(DEBUG_stm, cap,
1080 "found atomically block delivering async exception");
1081 stmAbortTransaction(cap, trec);
1082 stmFreeAbortedTRec(cap, trec);
1083 tso -> trec = outer;
1084 break;
1085 };
1086
1087 default:
1088 break;
1089 }
1090
1091 // move on to the next stack frame
1092 frame += stack_frame_sizeW((StgClosure *)frame);
1093 }
1094
1095 done:
1096 IF_DEBUG(sanity, checkTSO(tso));
1097
1098 // wake it up
1099 if (tso->why_blocked != NotBlocked) {
1100 tso->why_blocked = NotBlocked;
1101 appendToRunQueue(cap,tso);
1102 }
1103
1104 return tso;
1105 }