use the new "prim %write_barrier()" in .cmm instead of calls to wb()
[ghc.git] / rts / RaiseAsync.c
1 /* ---------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 1998-2006
4 *
5 * Asynchronous exceptions
6 *
7 * --------------------------------------------------------------------------*/
8
9 #include "PosixSource.h"
10 #include "Rts.h"
11 #include "Threads.h"
12 #include "Trace.h"
13 #include "RaiseAsync.h"
14 #include "SMP.h"
15 #include "Schedule.h"
16 #include "Storage.h"
17 #include "Updates.h"
18 #include "STM.h"
19 #include "Sanity.h"
20
21 static void raiseAsync (Capability *cap,
22 StgTSO *tso,
23 StgClosure *exception,
24 rtsBool stop_at_atomically,
25 StgPtr stop_here);
26
27 static void removeFromQueues(Capability *cap, StgTSO *tso);
28
29 static void blockedThrowTo (StgTSO *source, StgTSO *target);
30
31 static void performBlockedException (Capability *cap,
32 StgTSO *source, StgTSO *target);
33
34 /* -----------------------------------------------------------------------------
35 throwToSingleThreaded
36
37 This version of throwTo is safe to use if and only if one of the
38 following holds:
39
40 - !THREADED_RTS
41
42 - all the other threads in the system are stopped (eg. during GC).
43
44 - we surely own the target TSO (eg. we just took it from the
45 run queue of the current capability, or we are running it).
46
47 It doesn't cater for blocking the source thread until the exception
48 has been raised.
49 -------------------------------------------------------------------------- */
50
51 void
52 throwToSingleThreaded(Capability *cap, StgTSO *tso, StgClosure *exception)
53 {
54 throwToSingleThreaded_(cap, tso, exception, rtsFalse, NULL);
55 }
56
57 void
58 throwToSingleThreaded_(Capability *cap, StgTSO *tso, StgClosure *exception,
59 rtsBool stop_at_atomically, StgPtr stop_here)
60 {
61 // Thread already dead?
62 if (tso->what_next == ThreadComplete || tso->what_next == ThreadKilled) {
63 return;
64 }
65
66 // Remove it from any blocking queues
67 removeFromQueues(cap,tso);
68
69 raiseAsync(cap, tso, exception, stop_at_atomically, stop_here);
70 }
71
72 void
73 suspendComputation(Capability *cap, StgTSO *tso, StgPtr stop_here)
74 {
75 // Thread already dead?
76 if (tso->what_next == ThreadComplete || tso->what_next == ThreadKilled) {
77 return;
78 }
79
80 // Remove it from any blocking queues
81 removeFromQueues(cap,tso);
82
83 raiseAsync(cap, tso, NULL, rtsFalse, stop_here);
84 }
85
86 /* -----------------------------------------------------------------------------
87 throwTo
88
89 This function may be used to throw an exception from one thread to
90 another, during the course of normal execution. This is a tricky
91 task: the target thread might be running on another CPU, or it
92 may be blocked and could be woken up at any point by another CPU.
93 We have some delicate synchronisation to do.
94
95 There is a completely safe fallback scheme: it is always possible
96 to just block the source TSO on the target TSO's blocked_exceptions
97 queue. This queue is locked using lockTSO()/unlockTSO(). It is
98 checked at regular intervals: before and after running a thread
99 (schedule() and threadPaused() respectively), and just before GC
100 (scheduleDoGC()). Activating a thread on this queue should be done
101 using maybePerformBlockedException(): this is done in the context
102 of the target thread, so the exception can be raised eagerly.
103
104 This fallback scheme works even if the target thread is complete or
105 killed: scheduleDoGC() will discover the blocked thread before the
106 target is GC'd.
107
108 Blocking the source thread on the target thread's blocked_exception
109 queue is also employed when the target thread is currently blocking
110 exceptions (ie. inside Control.Exception.block).
111
112 We could use the safe fallback scheme exclusively, but that
113 wouldn't be ideal: most calls to throwTo would block immediately,
114 possibly until the next GC, which might require the deadlock
115 detection mechanism to kick in. So we try to provide promptness
116 wherever possible.
117
118 We can promptly deliver the exception if the target thread is:
119
120 - runnable, on the same Capability as the source thread (because
121 we own the run queue and therefore the target thread).
122
123 - blocked, and we can obtain exclusive access to it. Obtaining
124 exclusive access to the thread depends on how it is blocked.
125
126 We must also be careful to not trip over threadStackOverflow(),
127 which might be moving the TSO to enlarge its stack.
128 lockTSO()/unlockTSO() are used here too.
129
130 Returns:
131
132 THROWTO_SUCCESS exception was raised, ok to continue
133
134 THROWTO_BLOCKED exception was not raised; block the source
135 thread then call throwToReleaseTarget() when
136 the source thread is properly tidied away.
137
138 -------------------------------------------------------------------------- */
139
140 nat
141 throwTo (Capability *cap, // the Capability we hold
142 StgTSO *source, // the TSO sending the exception
143 StgTSO *target, // the TSO receiving the exception
144 StgClosure *exception, // the exception closure
145 /*[out]*/ void **out USED_IF_THREADS)
146 {
147 StgWord status;
148
149 // follow ThreadRelocated links in the target first
150 while (target->what_next == ThreadRelocated) {
151 target = target->link;
152 // No, it might be a WHITEHOLE:
153 // ASSERT(get_itbl(target)->type == TSO);
154 }
155
156 debugTrace(DEBUG_sched, "throwTo: from thread %d to thread %d",
157 source->id, target->id);
158
159 #ifdef DEBUG
160 if (traceClass(DEBUG_sched)) {
161 debugTraceBegin("throwTo: target");
162 printThreadStatus(target);
163 debugTraceEnd();
164 }
165 #endif
166
167 goto check_target;
168 retry:
169 debugTrace(DEBUG_sched, "throwTo: retrying...");
170
171 check_target:
172 // Thread already dead?
173 if (target->what_next == ThreadComplete
174 || target->what_next == ThreadKilled) {
175 return THROWTO_SUCCESS;
176 }
177
178 status = target->why_blocked;
179
180 switch (status) {
181 case NotBlocked:
182 /* if status==NotBlocked, and target->cap == cap, then
183 we own this TSO and can raise the exception.
184
185 How do we establish this condition? Very carefully.
186
187 Let
188 P = (status == NotBlocked)
189 Q = (tso->cap == cap)
190
191 Now, if P & Q are true, then the TSO is locked and owned by
192 this capability. No other OS thread can steal it.
193
194 If P==0 and Q==1: the TSO is blocked, but attached to this
195 capabilty, and it can be stolen by another capability.
196
197 If P==1 and Q==0: the TSO is runnable on another
198 capability. At any time, the TSO may change from runnable
199 to blocked and vice versa, while it remains owned by
200 another capability.
201
202 Suppose we test like this:
203
204 p = P
205 q = Q
206 if (p && q) ...
207
208 this is defeated by another capability stealing a blocked
209 TSO from us to wake it up (Schedule.c:unblockOne()). The
210 other thread is doing
211
212 Q = 0
213 P = 1
214
215 assuming arbitrary reordering, we could see this
216 interleaving:
217
218 start: P==0 && Q==1
219 P = 1
220 p = P
221 q = Q
222 Q = 0
223 if (p && q) ...
224
225 so we need a memory barrier:
226
227 p = P
228 mb()
229 q = Q
230 if (p && q) ...
231
232 this avoids the problematic case. There are other cases
233 to consider, but this is the tricky one.
234
235 Note that we must be sure that unblockOne() does the
236 writes in the correct order: Q before P. The memory
237 barrier ensures that if we have seen the write to P, we
238 have also seen the write to Q.
239 */
240 {
241 Capability *target_cap;
242
243 write_barrier();
244 target_cap = target->cap;
245 if (target_cap == cap && (target->flags & TSO_BLOCKEX) == 0) {
246 // It's on our run queue and not blocking exceptions
247 raiseAsync(cap, target, exception, rtsFalse, NULL);
248 return THROWTO_SUCCESS;
249 } else {
250 // Otherwise, just block on the blocked_exceptions queue
251 // of the target thread. The queue will get looked at
252 // soon enough: it is checked before and after running a
253 // thread, and during GC.
254 lockTSO(target);
255
256 // Avoid race with threadStackOverflow, which may have
257 // just moved this TSO.
258 if (target->what_next == ThreadRelocated) {
259 unlockTSO(target);
260 target = target->link;
261 goto retry;
262 }
263 blockedThrowTo(source,target);
264 *out = target;
265 return THROWTO_BLOCKED;
266 }
267 }
268
269 case BlockedOnMVar:
270 {
271 /*
272 To establish ownership of this TSO, we need to acquire a
273 lock on the MVar that it is blocked on.
274 */
275 StgMVar *mvar;
276 StgInfoTable *info USED_IF_THREADS;
277
278 mvar = (StgMVar *)target->block_info.closure;
279
280 // ASSUMPTION: tso->block_info must always point to a
281 // closure. In the threaded RTS it does.
282 if (get_itbl(mvar)->type != MVAR) goto retry;
283
284 info = lockClosure((StgClosure *)mvar);
285
286 if (target->what_next == ThreadRelocated) {
287 target = target->link;
288 unlockClosure((StgClosure *)mvar,info);
289 goto retry;
290 }
291 // we have the MVar, let's check whether the thread
292 // is still blocked on the same MVar.
293 if (target->why_blocked != BlockedOnMVar
294 || (StgMVar *)target->block_info.closure != mvar) {
295 unlockClosure((StgClosure *)mvar, info);
296 goto retry;
297 }
298
299 if ((target->flags & TSO_BLOCKEX) &&
300 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
301 lockClosure((StgClosure *)target);
302 blockedThrowTo(source,target);
303 unlockClosure((StgClosure *)mvar, info);
304 *out = target;
305 return THROWTO_BLOCKED; // caller releases TSO
306 } else {
307 removeThreadFromMVarQueue(mvar, target);
308 raiseAsync(cap, target, exception, rtsFalse, NULL);
309 unblockOne(cap, target);
310 unlockClosure((StgClosure *)mvar, info);
311 return THROWTO_SUCCESS;
312 }
313 }
314
315 case BlockedOnBlackHole:
316 {
317 ACQUIRE_LOCK(&sched_mutex);
318 // double checking the status after the memory barrier:
319 if (target->why_blocked != BlockedOnBlackHole) {
320 RELEASE_LOCK(&sched_mutex);
321 goto retry;
322 }
323
324 if (target->flags & TSO_BLOCKEX) {
325 lockTSO(target);
326 blockedThrowTo(source,target);
327 RELEASE_LOCK(&sched_mutex);
328 *out = target;
329 return THROWTO_BLOCKED; // caller releases TSO
330 } else {
331 removeThreadFromQueue(&blackhole_queue, target);
332 raiseAsync(cap, target, exception, rtsFalse, NULL);
333 unblockOne(cap, target);
334 RELEASE_LOCK(&sched_mutex);
335 return THROWTO_SUCCESS;
336 }
337 }
338
339 case BlockedOnException:
340 {
341 StgTSO *target2;
342 StgInfoTable *info;
343
344 /*
345 To obtain exclusive access to a BlockedOnException thread,
346 we must call lockClosure() on the TSO on which it is blocked.
347 Since the TSO might change underneath our feet, after we
348 call lockClosure() we must check that
349
350 (a) the closure we locked is actually a TSO
351 (b) the original thread is still BlockedOnException,
352 (c) the original thread is still blocked on the TSO we locked
353 and (d) the target thread has not been relocated.
354
355 We synchronise with threadStackOverflow() (which relocates
356 threads) using lockClosure()/unlockClosure().
357 */
358 target2 = target->block_info.tso;
359
360 info = lockClosure((StgClosure *)target2);
361 if (info != &stg_TSO_info) {
362 unlockClosure((StgClosure *)target2, info);
363 goto retry;
364 }
365 if (target->what_next == ThreadRelocated) {
366 target = target->link;
367 unlockTSO(target2);
368 goto retry;
369 }
370 if (target2->what_next == ThreadRelocated) {
371 target->block_info.tso = target2->link;
372 unlockTSO(target2);
373 goto retry;
374 }
375 if (target->why_blocked != BlockedOnException
376 || target->block_info.tso != target2) {
377 unlockTSO(target2);
378 goto retry;
379 }
380
381 /*
382 Now we have exclusive rights to the target TSO...
383
384 If it is blocking exceptions, add the source TSO to its
385 blocked_exceptions queue. Otherwise, raise the exception.
386 */
387 if ((target->flags & TSO_BLOCKEX) &&
388 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
389 lockTSO(target);
390 blockedThrowTo(source,target);
391 unlockTSO(target2);
392 *out = target;
393 return THROWTO_BLOCKED;
394 } else {
395 removeThreadFromQueue(&target2->blocked_exceptions, target);
396 raiseAsync(cap, target, exception, rtsFalse, NULL);
397 unblockOne(cap, target);
398 unlockTSO(target2);
399 return THROWTO_SUCCESS;
400 }
401 }
402
403 case BlockedOnSTM:
404 lockTSO(target);
405 // Unblocking BlockedOnSTM threads requires the TSO to be
406 // locked; see STM.c:unpark_tso().
407 if (target->why_blocked != BlockedOnSTM) {
408 goto retry;
409 }
410 if ((target->flags & TSO_BLOCKEX) &&
411 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
412 blockedThrowTo(source,target);
413 *out = target;
414 return THROWTO_BLOCKED;
415 } else {
416 raiseAsync(cap, target, exception, rtsFalse, NULL);
417 unblockOne(cap, target);
418 unlockTSO(target);
419 return THROWTO_SUCCESS;
420 }
421
422 case BlockedOnCCall:
423 case BlockedOnCCall_NoUnblockExc:
424 // I don't think it's possible to acquire ownership of a
425 // BlockedOnCCall thread. We just assume that the target
426 // thread is blocking exceptions, and block on its
427 // blocked_exception queue.
428 lockTSO(target);
429 blockedThrowTo(source,target);
430 *out = target;
431 return THROWTO_BLOCKED;
432
433 #ifndef THREADEDED_RTS
434 case BlockedOnRead:
435 case BlockedOnWrite:
436 case BlockedOnDelay:
437 if ((target->flags & TSO_BLOCKEX) &&
438 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
439 blockedThrowTo(source,target);
440 return THROWTO_BLOCKED;
441 } else {
442 removeFromQueues(cap,target);
443 raiseAsync(cap, target, exception, rtsFalse, NULL);
444 return THROWTO_SUCCESS;
445 }
446 #endif
447
448 default:
449 barf("throwTo: unrecognised why_blocked value");
450 }
451 barf("throwTo");
452 }
453
454 // Block a TSO on another TSO's blocked_exceptions queue.
455 // Precondition: we hold an exclusive lock on the target TSO (this is
456 // complex to achieve as there's no single lock on a TSO; see
457 // throwTo()).
458 static void
459 blockedThrowTo (StgTSO *source, StgTSO *target)
460 {
461 debugTrace(DEBUG_sched, "throwTo: blocking on thread %d", target->id);
462 source->link = target->blocked_exceptions;
463 target->blocked_exceptions = source;
464 dirtyTSO(target); // we modified the blocked_exceptions queue
465
466 source->block_info.tso = target;
467 write_barrier(); // throwTo_exception *must* be visible if BlockedOnException is.
468 source->why_blocked = BlockedOnException;
469 }
470
471
472 #ifdef THREADED_RTS
473 void
474 throwToReleaseTarget (void *tso)
475 {
476 unlockTSO((StgTSO *)tso);
477 }
478 #endif
479
480 /* -----------------------------------------------------------------------------
481 Waking up threads blocked in throwTo
482
483 There are two ways to do this: maybePerformBlockedException() will
484 perform the throwTo() for the thread at the head of the queue
485 immediately, and leave the other threads on the queue.
486 maybePerformBlockedException() also checks the TSO_BLOCKEX flag
487 before raising an exception.
488
489 awakenBlockedExceptionQueue() will wake up all the threads in the
490 queue, but not perform any throwTo() immediately. This might be
491 more appropriate when the target thread is the one actually running
492 (see Exception.cmm).
493 -------------------------------------------------------------------------- */
494
495 void
496 maybePerformBlockedException (Capability *cap, StgTSO *tso)
497 {
498 StgTSO *source;
499
500 if (tso->blocked_exceptions != END_TSO_QUEUE
501 && ((tso->flags & TSO_BLOCKEX) == 0
502 || ((tso->flags & TSO_INTERRUPTIBLE) && interruptible(tso)))) {
503
504 // Lock the TSO, this gives us exclusive access to the queue
505 lockTSO(tso);
506
507 // Check the queue again; it might have changed before we
508 // locked it.
509 if (tso->blocked_exceptions == END_TSO_QUEUE) {
510 unlockTSO(tso);
511 return;
512 }
513
514 // We unblock just the first thread on the queue, and perform
515 // its throw immediately.
516 source = tso->blocked_exceptions;
517 performBlockedException(cap, source, tso);
518 tso->blocked_exceptions = unblockOne_(cap, source,
519 rtsFalse/*no migrate*/);
520 unlockTSO(tso);
521 }
522 }
523
524 void
525 awakenBlockedExceptionQueue (Capability *cap, StgTSO *tso)
526 {
527 if (tso->blocked_exceptions != END_TSO_QUEUE) {
528 lockTSO(tso);
529 awakenBlockedQueue(cap, tso->blocked_exceptions);
530 tso->blocked_exceptions = END_TSO_QUEUE;
531 unlockTSO(tso);
532 }
533 }
534
535 static void
536 performBlockedException (Capability *cap, StgTSO *source, StgTSO *target)
537 {
538 StgClosure *exception;
539
540 ASSERT(source->why_blocked == BlockedOnException);
541 ASSERT(source->block_info.tso->id == target->id);
542 ASSERT(source->sp[0] == (StgWord)&stg_block_throwto_info);
543 ASSERT(((StgTSO *)source->sp[1])->id == target->id);
544 // check ids not pointers, because the thread might be relocated
545
546 exception = (StgClosure *)source->sp[2];
547 throwToSingleThreaded(cap, target, exception);
548 source->sp += 3;
549 }
550
551 /* -----------------------------------------------------------------------------
552 Remove a thread from blocking queues.
553
554 This is for use when we raise an exception in another thread, which
555 may be blocked.
556 This has nothing to do with the UnblockThread event in GranSim. -- HWL
557 -------------------------------------------------------------------------- */
558
559 #if defined(GRAN) || defined(PARALLEL_HASKELL)
560 /*
561 NB: only the type of the blocking queue is different in GranSim and GUM
562 the operations on the queue-elements are the same
563 long live polymorphism!
564
565 Locks: sched_mutex is held upon entry and exit.
566
567 */
568 static void
569 removeFromQueues(Capability *cap, StgTSO *tso)
570 {
571 StgBlockingQueueElement *t, **last;
572
573 switch (tso->why_blocked) {
574
575 case NotBlocked:
576 return; /* not blocked */
577
578 case BlockedOnSTM:
579 // Be careful: nothing to do here! We tell the scheduler that the thread
580 // is runnable and we leave it to the stack-walking code to abort the
581 // transaction while unwinding the stack. We should perhaps have a debugging
582 // test to make sure that this really happens and that the 'zombie' transaction
583 // does not get committed.
584 goto done;
585
586 case BlockedOnMVar:
587 ASSERT(get_itbl(tso->block_info.closure)->type == MVAR);
588 {
589 StgBlockingQueueElement *last_tso = END_BQ_QUEUE;
590 StgMVar *mvar = (StgMVar *)(tso->block_info.closure);
591
592 last = (StgBlockingQueueElement **)&mvar->head;
593 for (t = (StgBlockingQueueElement *)mvar->head;
594 t != END_BQ_QUEUE;
595 last = &t->link, last_tso = t, t = t->link) {
596 if (t == (StgBlockingQueueElement *)tso) {
597 *last = (StgBlockingQueueElement *)tso->link;
598 if (mvar->tail == tso) {
599 mvar->tail = (StgTSO *)last_tso;
600 }
601 goto done;
602 }
603 }
604 barf("removeFromQueues (MVAR): TSO not found");
605 }
606
607 case BlockedOnBlackHole:
608 ASSERT(get_itbl(tso->block_info.closure)->type == BLACKHOLE_BQ);
609 {
610 StgBlockingQueue *bq = (StgBlockingQueue *)(tso->block_info.closure);
611
612 last = &bq->blocking_queue;
613 for (t = bq->blocking_queue;
614 t != END_BQ_QUEUE;
615 last = &t->link, t = t->link) {
616 if (t == (StgBlockingQueueElement *)tso) {
617 *last = (StgBlockingQueueElement *)tso->link;
618 goto done;
619 }
620 }
621 barf("removeFromQueues (BLACKHOLE): TSO not found");
622 }
623
624 case BlockedOnException:
625 {
626 StgTSO *target = tso->block_info.tso;
627
628 ASSERT(get_itbl(target)->type == TSO);
629
630 while (target->what_next == ThreadRelocated) {
631 target = target2->link;
632 ASSERT(get_itbl(target)->type == TSO);
633 }
634
635 last = (StgBlockingQueueElement **)&target->blocked_exceptions;
636 for (t = (StgBlockingQueueElement *)target->blocked_exceptions;
637 t != END_BQ_QUEUE;
638 last = &t->link, t = t->link) {
639 ASSERT(get_itbl(t)->type == TSO);
640 if (t == (StgBlockingQueueElement *)tso) {
641 *last = (StgBlockingQueueElement *)tso->link;
642 goto done;
643 }
644 }
645 barf("removeFromQueues (Exception): TSO not found");
646 }
647
648 case BlockedOnRead:
649 case BlockedOnWrite:
650 #if defined(mingw32_HOST_OS)
651 case BlockedOnDoProc:
652 #endif
653 {
654 /* take TSO off blocked_queue */
655 StgBlockingQueueElement *prev = NULL;
656 for (t = (StgBlockingQueueElement *)blocked_queue_hd; t != END_BQ_QUEUE;
657 prev = t, t = t->link) {
658 if (t == (StgBlockingQueueElement *)tso) {
659 if (prev == NULL) {
660 blocked_queue_hd = (StgTSO *)t->link;
661 if ((StgBlockingQueueElement *)blocked_queue_tl == t) {
662 blocked_queue_tl = END_TSO_QUEUE;
663 }
664 } else {
665 prev->link = t->link;
666 if ((StgBlockingQueueElement *)blocked_queue_tl == t) {
667 blocked_queue_tl = (StgTSO *)prev;
668 }
669 }
670 #if defined(mingw32_HOST_OS)
671 /* (Cooperatively) signal that the worker thread should abort
672 * the request.
673 */
674 abandonWorkRequest(tso->block_info.async_result->reqID);
675 #endif
676 goto done;
677 }
678 }
679 barf("removeFromQueues (I/O): TSO not found");
680 }
681
682 case BlockedOnDelay:
683 {
684 /* take TSO off sleeping_queue */
685 StgBlockingQueueElement *prev = NULL;
686 for (t = (StgBlockingQueueElement *)sleeping_queue; t != END_BQ_QUEUE;
687 prev = t, t = t->link) {
688 if (t == (StgBlockingQueueElement *)tso) {
689 if (prev == NULL) {
690 sleeping_queue = (StgTSO *)t->link;
691 } else {
692 prev->link = t->link;
693 }
694 goto done;
695 }
696 }
697 barf("removeFromQueues (delay): TSO not found");
698 }
699
700 default:
701 barf("removeFromQueues");
702 }
703
704 done:
705 tso->link = END_TSO_QUEUE;
706 tso->why_blocked = NotBlocked;
707 tso->block_info.closure = NULL;
708 pushOnRunQueue(cap,tso);
709 }
710 #else
711 static void
712 removeFromQueues(Capability *cap, StgTSO *tso)
713 {
714 switch (tso->why_blocked) {
715
716 case NotBlocked:
717 return;
718
719 case BlockedOnSTM:
720 // Be careful: nothing to do here! We tell the scheduler that the
721 // thread is runnable and we leave it to the stack-walking code to
722 // abort the transaction while unwinding the stack. We should
723 // perhaps have a debugging test to make sure that this really
724 // happens and that the 'zombie' transaction does not get
725 // committed.
726 goto done;
727
728 case BlockedOnMVar:
729 removeThreadFromMVarQueue((StgMVar *)tso->block_info.closure, tso);
730 goto done;
731
732 case BlockedOnBlackHole:
733 removeThreadFromQueue(&blackhole_queue, tso);
734 goto done;
735
736 case BlockedOnException:
737 {
738 StgTSO *target = tso->block_info.tso;
739
740 // NO: when called by threadPaused(), we probably have this
741 // TSO already locked (WHITEHOLEd) because we just placed
742 // ourselves on its queue.
743 // ASSERT(get_itbl(target)->type == TSO);
744
745 while (target->what_next == ThreadRelocated) {
746 target = target->link;
747 }
748
749 removeThreadFromQueue(&target->blocked_exceptions, tso);
750 goto done;
751 }
752
753 #if !defined(THREADED_RTS)
754 case BlockedOnRead:
755 case BlockedOnWrite:
756 #if defined(mingw32_HOST_OS)
757 case BlockedOnDoProc:
758 #endif
759 removeThreadFromDeQueue(&blocked_queue_hd, &blocked_queue_tl, tso);
760 #if defined(mingw32_HOST_OS)
761 /* (Cooperatively) signal that the worker thread should abort
762 * the request.
763 */
764 abandonWorkRequest(tso->block_info.async_result->reqID);
765 #endif
766 goto done;
767
768 case BlockedOnDelay:
769 removeThreadFromQueue(&sleeping_queue, tso);
770 goto done;
771 #endif
772
773 default:
774 barf("removeFromQueues");
775 }
776
777 done:
778 tso->link = END_TSO_QUEUE;
779 tso->why_blocked = NotBlocked;
780 tso->block_info.closure = NULL;
781 appendToRunQueue(cap,tso);
782
783 // We might have just migrated this TSO to our Capability:
784 if (tso->bound) {
785 tso->bound->cap = cap;
786 }
787 tso->cap = cap;
788 }
789 #endif
790
791 /* -----------------------------------------------------------------------------
792 * raiseAsync()
793 *
794 * The following function implements the magic for raising an
795 * asynchronous exception in an existing thread.
796 *
797 * We first remove the thread from any queue on which it might be
798 * blocked. The possible blockages are MVARs and BLACKHOLE_BQs.
799 *
800 * We strip the stack down to the innermost CATCH_FRAME, building
801 * thunks in the heap for all the active computations, so they can
802 * be restarted if necessary. When we reach a CATCH_FRAME, we build
803 * an application of the handler to the exception, and push it on
804 * the top of the stack.
805 *
806 * How exactly do we save all the active computations? We create an
807 * AP_STACK for every UpdateFrame on the stack. Entering one of these
808 * AP_STACKs pushes everything from the corresponding update frame
809 * upwards onto the stack. (Actually, it pushes everything up to the
810 * next update frame plus a pointer to the next AP_STACK object.
811 * Entering the next AP_STACK object pushes more onto the stack until we
812 * reach the last AP_STACK object - at which point the stack should look
813 * exactly as it did when we killed the TSO and we can continue
814 * execution by entering the closure on top of the stack.
815 *
816 * We can also kill a thread entirely - this happens if either (a) the
817 * exception passed to raiseAsync is NULL, or (b) there's no
818 * CATCH_FRAME on the stack. In either case, we strip the entire
819 * stack and replace the thread with a zombie.
820 *
821 * ToDo: in THREADED_RTS mode, this function is only safe if either
822 * (a) we hold all the Capabilities (eg. in GC, or if there is only
823 * one Capability), or (b) we own the Capability that the TSO is
824 * currently blocked on or on the run queue of.
825 *
826 * -------------------------------------------------------------------------- */
827
828 static void
829 raiseAsync(Capability *cap, StgTSO *tso, StgClosure *exception,
830 rtsBool stop_at_atomically, StgPtr stop_here)
831 {
832 StgRetInfoTable *info;
833 StgPtr sp, frame;
834 nat i;
835
836 debugTrace(DEBUG_sched,
837 "raising exception in thread %ld.", (long)tso->id);
838
839 // mark it dirty; we're about to change its stack.
840 dirtyTSO(tso);
841
842 sp = tso->sp;
843
844 // ASSUMES: the thread is not already complete or dead. Upper
845 // layers should deal with that.
846 ASSERT(tso->what_next != ThreadComplete && tso->what_next != ThreadKilled);
847
848 // The stack freezing code assumes there's a closure pointer on
849 // the top of the stack, so we have to arrange that this is the case...
850 //
851 if (sp[0] == (W_)&stg_enter_info) {
852 sp++;
853 } else {
854 sp--;
855 sp[0] = (W_)&stg_dummy_ret_closure;
856 }
857
858 frame = sp + 1;
859 while (stop_here == NULL || frame < stop_here) {
860
861 // 1. Let the top of the stack be the "current closure"
862 //
863 // 2. Walk up the stack until we find either an UPDATE_FRAME or a
864 // CATCH_FRAME.
865 //
866 // 3. If it's an UPDATE_FRAME, then make an AP_STACK containing the
867 // current closure applied to the chunk of stack up to (but not
868 // including) the update frame. This closure becomes the "current
869 // closure". Go back to step 2.
870 //
871 // 4. If it's a CATCH_FRAME, then leave the exception handler on
872 // top of the stack applied to the exception.
873 //
874 // 5. If it's a STOP_FRAME, then kill the thread.
875 //
876 // NB: if we pass an ATOMICALLY_FRAME then abort the associated
877 // transaction
878
879 info = get_ret_itbl((StgClosure *)frame);
880
881 switch (info->i.type) {
882
883 case UPDATE_FRAME:
884 {
885 StgAP_STACK * ap;
886 nat words;
887
888 // First build an AP_STACK consisting of the stack chunk above the
889 // current update frame, with the top word on the stack as the
890 // fun field.
891 //
892 words = frame - sp - 1;
893 ap = (StgAP_STACK *)allocateLocal(cap,AP_STACK_sizeW(words));
894
895 ap->size = words;
896 ap->fun = (StgClosure *)sp[0];
897 sp++;
898 for(i=0; i < (nat)words; ++i) {
899 ap->payload[i] = (StgClosure *)*sp++;
900 }
901
902 SET_HDR(ap,&stg_AP_STACK_info,
903 ((StgClosure *)frame)->header.prof.ccs /* ToDo */);
904 TICK_ALLOC_UP_THK(words+1,0);
905
906 //IF_DEBUG(scheduler,
907 // debugBelch("sched: Updating ");
908 // printPtr((P_)((StgUpdateFrame *)frame)->updatee);
909 // debugBelch(" with ");
910 // printObj((StgClosure *)ap);
911 // );
912
913 // Replace the updatee with an indirection
914 //
915 // Warning: if we're in a loop, more than one update frame on
916 // the stack may point to the same object. Be careful not to
917 // overwrite an IND_OLDGEN in this case, because we'll screw
918 // up the mutable lists. To be on the safe side, don't
919 // overwrite any kind of indirection at all. See also
920 // threadSqueezeStack in GC.c, where we have to make a similar
921 // check.
922 //
923 if (!closure_IND(((StgUpdateFrame *)frame)->updatee)) {
924 // revert the black hole
925 UPD_IND_NOLOCK(((StgUpdateFrame *)frame)->updatee,
926 (StgClosure *)ap);
927 }
928 sp += sizeofW(StgUpdateFrame) - 1;
929 sp[0] = (W_)ap; // push onto stack
930 frame = sp + 1;
931 continue; //no need to bump frame
932 }
933
934 case STOP_FRAME:
935 // We've stripped the entire stack, the thread is now dead.
936 tso->what_next = ThreadKilled;
937 tso->sp = frame + sizeofW(StgStopFrame);
938 return;
939
940 case CATCH_FRAME:
941 // If we find a CATCH_FRAME, and we've got an exception to raise,
942 // then build the THUNK raise(exception), and leave it on
943 // top of the CATCH_FRAME ready to enter.
944 //
945 {
946 #ifdef PROFILING
947 StgCatchFrame *cf = (StgCatchFrame *)frame;
948 #endif
949 StgThunk *raise;
950
951 if (exception == NULL) break;
952
953 // we've got an exception to raise, so let's pass it to the
954 // handler in this frame.
955 //
956 raise = (StgThunk *)allocateLocal(cap,sizeofW(StgThunk)+1);
957 TICK_ALLOC_SE_THK(1,0);
958 SET_HDR(raise,&stg_raise_info,cf->header.prof.ccs);
959 raise->payload[0] = exception;
960
961 // throw away the stack from Sp up to the CATCH_FRAME.
962 //
963 sp = frame - 1;
964
965 /* Ensure that async excpetions are blocked now, so we don't get
966 * a surprise exception before we get around to executing the
967 * handler.
968 */
969 tso->flags |= TSO_BLOCKEX | TSO_INTERRUPTIBLE;
970
971 /* Put the newly-built THUNK on top of the stack, ready to execute
972 * when the thread restarts.
973 */
974 sp[0] = (W_)raise;
975 sp[-1] = (W_)&stg_enter_info;
976 tso->sp = sp-1;
977 tso->what_next = ThreadRunGHC;
978 IF_DEBUG(sanity, checkTSO(tso));
979 return;
980 }
981
982 case ATOMICALLY_FRAME:
983 if (stop_at_atomically) {
984 ASSERT(stmGetEnclosingTRec(tso->trec) == NO_TREC);
985 stmCondemnTransaction(cap, tso -> trec);
986 #ifdef REG_R1
987 tso->sp = frame;
988 #else
989 // R1 is not a register: the return convention for IO in
990 // this case puts the return value on the stack, so we
991 // need to set up the stack to return to the atomically
992 // frame properly...
993 tso->sp = frame - 2;
994 tso->sp[1] = (StgWord) &stg_NO_FINALIZER_closure; // why not?
995 tso->sp[0] = (StgWord) &stg_ut_1_0_unreg_info;
996 #endif
997 tso->what_next = ThreadRunGHC;
998 return;
999 }
1000 // Not stop_at_atomically... fall through and abort the
1001 // transaction.
1002
1003 case CATCH_RETRY_FRAME:
1004 // IF we find an ATOMICALLY_FRAME then we abort the
1005 // current transaction and propagate the exception. In
1006 // this case (unlike ordinary exceptions) we do not care
1007 // whether the transaction is valid or not because its
1008 // possible validity cannot have caused the exception
1009 // and will not be visible after the abort.
1010 debugTrace(DEBUG_stm,
1011 "found atomically block delivering async exception");
1012
1013 StgTRecHeader *trec = tso -> trec;
1014 StgTRecHeader *outer = stmGetEnclosingTRec(trec);
1015 stmAbortTransaction(cap, trec);
1016 tso -> trec = outer;
1017 break;
1018
1019 default:
1020 break;
1021 }
1022
1023 // move on to the next stack frame
1024 frame += stack_frame_sizeW((StgClosure *)frame);
1025 }
1026
1027 // if we got here, then we stopped at stop_here
1028 ASSERT(stop_here != NULL);
1029 }
1030
1031