1 /* ---------------------------------------------------------------------------
3 * (c) The GHC Team, 2006
5 * Thread-related functionality
7 * --------------------------------------------------------------------------*/
9 #include "PosixSource.h"
12 #include "Capability.h"
18 #include "ThreadLabels.h"
21 #include "RaiseAsync.h"
24 #include "sm/Sanity.h"
25 #include "sm/Storage.h"
29 /* Next thread ID to allocate.
32 static StgThreadID next_thread_id
= 1;
34 /* The smallest stack size that makes any sense is:
35 * RESERVED_STACK_WORDS (so we can get back from the stack overflow)
36 * + sizeofW(StgStopFrame) (the stg_stop_thread_info frame)
37 * + 1 (the closure to enter)
39 * + 1 (spare slot req'd by stg_ap_v_ret)
41 * A thread with this stack will bomb immediately with a stack
42 * overflow, which will increase its stack size.
44 #define MIN_STACK_WORDS (RESERVED_STACK_WORDS + sizeofW(StgStopFrame) + 3)
46 /* ---------------------------------------------------------------------------
49 The new thread starts with the given stack size. Before the
50 scheduler can run, however, this thread needs to have a closure
51 (and possibly some arguments) pushed on its stack. See
52 pushClosure() in Schedule.h.
54 createGenThread() and createIOThread() (in SchedAPI.h) are
55 convenient packaged versions of this function.
56 ------------------------------------------------------------------------ */
58 createThread(Capability
*cap
, W_ size
)
64 /* sched_mutex is *not* required */
66 /* catch ridiculously small stack sizes */
67 if (size
< MIN_STACK_WORDS
+ sizeofW(StgStack
) + sizeofW(StgTSO
)) {
68 size
= MIN_STACK_WORDS
+ sizeofW(StgStack
) + sizeofW(StgTSO
);
71 /* The size argument we are given includes all the per-thread
77 * This is so that we can use a nice round power of 2 for the
78 * default stack size (e.g. 1k), and if we're allocating lots of
79 * threads back-to-back they'll fit nicely in a block. It's a bit
80 * of a benchmark hack, but it doesn't do any harm.
82 stack_size
= round_to_mblocks(size
- sizeofW(StgTSO
));
83 stack
= (StgStack
*)allocate(cap
, stack_size
);
84 TICK_ALLOC_STACK(stack_size
);
85 SET_HDR(stack
, &stg_STACK_info
, cap
->r
.rCCCS
);
86 stack
->stack_size
= stack_size
- sizeofW(StgStack
);
87 stack
->sp
= stack
->stack
+ stack
->stack_size
;
90 tso
= (StgTSO
*)allocate(cap
, sizeofW(StgTSO
));
92 SET_HDR(tso
, &stg_TSO_info
, CCS_SYSTEM
);
94 // Always start with the compiled code evaluator
95 tso
->what_next
= ThreadRunGHC
;
96 tso
->why_blocked
= NotBlocked
;
97 tso
->block_info
.closure
= (StgClosure
*)END_TSO_QUEUE
;
98 tso
->blocked_exceptions
= END_BLOCKED_EXCEPTIONS_QUEUE
;
99 tso
->bq
= (StgBlockingQueue
*)END_TSO_QUEUE
;
102 tso
->_link
= END_TSO_QUEUE
;
104 tso
->saved_errno
= 0;
108 tso
->stackobj
= stack
;
109 tso
->tot_stack_size
= stack
->stack_size
;
111 ASSIGN_Int64((W_
*)&(tso
->alloc_limit
), 0);
116 tso
->prof
.cccs
= CCS_MAIN
;
119 // put a stop frame on the stack
120 stack
->sp
-= sizeofW(StgStopFrame
);
121 SET_HDR((StgClosure
*)stack
->sp
,
122 (StgInfoTable
*)&stg_stop_thread_info
,CCS_SYSTEM
);
124 /* Link the new thread on the global thread list.
126 ACQUIRE_LOCK(&sched_mutex
);
127 tso
->id
= next_thread_id
++; // while we have the mutex
128 tso
->global_link
= g0
->threads
;
130 RELEASE_LOCK(&sched_mutex
);
132 // ToDo: report the stack size in the event?
133 traceEventCreateThread(cap
, tso
);
138 /* ---------------------------------------------------------------------------
139 * Comparing Thread ids.
141 * This is used from STG land in the implementation of the
142 * instances of Eq/Ord for ThreadIds.
143 * ------------------------------------------------------------------------ */
146 cmp_thread(StgPtr tso1
, StgPtr tso2
)
148 StgThreadID id1
= ((StgTSO
*)tso1
)->id
;
149 StgThreadID id2
= ((StgTSO
*)tso2
)->id
;
151 if (id1
< id2
) return (-1);
152 if (id1
> id2
) return 1;
156 /* ---------------------------------------------------------------------------
157 * Fetching the ThreadID from an StgTSO.
159 * This is used in the implementation of Show for ThreadIds.
160 * ------------------------------------------------------------------------ */
162 rts_getThreadId(StgPtr tso
)
164 return ((StgTSO
*)tso
)->id
;
167 /* ---------------------------------------------------------------------------
168 * Getting & setting the thread allocation limit
169 * ------------------------------------------------------------------------ */
170 HsInt64
rts_getThreadAllocationCounter(StgPtr tso
)
172 // NB. doesn't take into account allocation in the current nursery
173 // block, so it might be off by up to 4k.
174 return PK_Int64((W_
*)&(((StgTSO
*)tso
)->alloc_limit
));
177 void rts_setThreadAllocationCounter(StgPtr tso
, HsInt64 i
)
179 ASSIGN_Int64((W_
*)&(((StgTSO
*)tso
)->alloc_limit
), i
);
182 void rts_enableThreadAllocationLimit(StgPtr tso
)
184 ((StgTSO
*)tso
)->flags
|= TSO_ALLOC_LIMIT
;
187 void rts_disableThreadAllocationLimit(StgPtr tso
)
189 ((StgTSO
*)tso
)->flags
&= ~TSO_ALLOC_LIMIT
;
192 /* -----------------------------------------------------------------------------
193 Remove a thread from a queue.
194 Fails fatally if the TSO is not on the queue.
195 -------------------------------------------------------------------------- */
197 rtsBool
// returns True if we modified queue
198 removeThreadFromQueue (Capability
*cap
, StgTSO
**queue
, StgTSO
*tso
)
203 for (t
= *queue
; t
!= END_TSO_QUEUE
; prev
= t
, t
= t
->_link
) {
206 setTSOLink(cap
,prev
,t
->_link
);
207 t
->_link
= END_TSO_QUEUE
;
211 t
->_link
= END_TSO_QUEUE
;
216 barf("removeThreadFromQueue: not found");
219 rtsBool
// returns True if we modified head or tail
220 removeThreadFromDeQueue (Capability
*cap
,
221 StgTSO
**head
, StgTSO
**tail
, StgTSO
*tso
)
224 rtsBool flag
= rtsFalse
;
227 for (t
= *head
; t
!= END_TSO_QUEUE
; prev
= t
, t
= t
->_link
) {
230 setTSOLink(cap
,prev
,t
->_link
);
236 t
->_link
= END_TSO_QUEUE
;
241 *tail
= END_TSO_QUEUE
;
249 barf("removeThreadFromDeQueue: not found");
252 /* ----------------------------------------------------------------------------
255 Attempt to wake up a thread. tryWakeupThread is idempotent: it is
256 always safe to call it too many times, but it is not safe in
257 general to omit a call.
259 ------------------------------------------------------------------------- */
262 tryWakeupThread (Capability
*cap
, StgTSO
*tso
)
264 traceEventThreadWakeup (cap
, tso
, tso
->cap
->no
);
270 msg
= (MessageWakeup
*)allocate(cap
,sizeofW(MessageWakeup
));
271 SET_HDR(msg
, &stg_MSG_TRY_WAKEUP_info
, CCS_SYSTEM
);
273 sendMessage(cap
, tso
->cap
, (Message
*)msg
);
274 debugTraceCap(DEBUG_sched
, cap
, "message: try wakeup thread %ld on cap %d",
275 (W_
)tso
->id
, tso
->cap
->no
);
280 switch (tso
->why_blocked
)
283 case BlockedOnMVarRead
:
285 if (tso
->_link
== END_TSO_QUEUE
) {
286 tso
->block_info
.closure
= (StgClosure
*)END_TSO_QUEUE
;
293 case BlockedOnMsgThrowTo
:
295 const StgInfoTable
*i
;
297 i
= lockClosure(tso
->block_info
.closure
);
298 unlockClosure(tso
->block_info
.closure
, i
);
299 if (i
!= &stg_MSG_NULL_info
) {
300 debugTraceCap(DEBUG_sched
, cap
, "thread %ld still blocked on throwto (%p)",
301 (W_
)tso
->id
, tso
->block_info
.throwto
->header
.info
);
305 // remove the block frame from the stack
306 ASSERT(tso
->stackobj
->sp
[0] == (StgWord
)&stg_block_throwto_info
);
307 tso
->stackobj
->sp
+= 3;
311 case BlockedOnBlackHole
:
313 case ThreadMigrating
:
317 // otherwise, do nothing
322 // just run the thread now, if the BH is not really available,
323 // we'll block again.
324 tso
->why_blocked
= NotBlocked
;
325 appendToRunQueue(cap
,tso
);
327 // We used to set the context switch flag here, which would
328 // trigger a context switch a short time in the future (at the end
329 // of the current nursery block). The idea is that we have just
330 // woken up a thread, so we may need to load-balance and migrate
331 // threads to other CPUs. On the other hand, setting the context
332 // switch flag here unfairly penalises the current thread by
333 // yielding its time slice too early.
335 // The synthetic benchmark nofib/smp/chan can be used to show the
336 // difference quite clearly.
338 // cap->context_switch = 1;
341 /* ----------------------------------------------------------------------------
343 ------------------------------------------------------------------------- */
346 migrateThread (Capability
*from
, StgTSO
*tso
, Capability
*to
)
348 traceEventMigrateThread (from
, tso
, to
->no
);
349 // ThreadMigrating tells the target cap that it needs to be added to
350 // the run queue when it receives the MSG_TRY_WAKEUP.
351 tso
->why_blocked
= ThreadMigrating
;
353 tryWakeupThread(from
, tso
);
356 /* ----------------------------------------------------------------------------
359 wakes up all the threads on the specified queue.
360 ------------------------------------------------------------------------- */
363 wakeBlockingQueue(Capability
*cap
, StgBlockingQueue
*bq
)
365 MessageBlackHole
*msg
;
366 const StgInfoTable
*i
;
368 ASSERT(bq
->header
.info
== &stg_BLOCKING_QUEUE_DIRTY_info
||
369 bq
->header
.info
== &stg_BLOCKING_QUEUE_CLEAN_info
);
371 for (msg
= bq
->queue
; msg
!= (MessageBlackHole
*)END_TSO_QUEUE
;
373 i
= msg
->header
.info
;
374 if (i
!= &stg_IND_info
) {
375 ASSERT(i
== &stg_MSG_BLACKHOLE_info
);
376 tryWakeupThread(cap
,msg
->tso
);
380 // overwrite the BQ with an indirection so it will be
381 // collected at the next GC.
382 #if defined(DEBUG) && !defined(THREADED_RTS)
383 // XXX FILL_SLOP, but not if THREADED_RTS because in that case
384 // another thread might be looking at this BLOCKING_QUEUE and
385 // checking the owner field at the same time.
386 bq
->bh
= 0; bq
->queue
= 0; bq
->owner
= 0;
388 OVERWRITE_INFO(bq
, &stg_IND_info
);
391 // If we update a closure that we know we BLACKHOLE'd, and the closure
392 // no longer points to the current TSO as its owner, then there may be
393 // an orphaned BLOCKING_QUEUE closure with blocked threads attached to
394 // it. We therefore traverse the BLOCKING_QUEUEs attached to the
395 // current TSO to see if any can now be woken up.
397 checkBlockingQueues (Capability
*cap
, StgTSO
*tso
)
399 StgBlockingQueue
*bq
, *next
;
402 debugTraceCap(DEBUG_sched
, cap
,
403 "collision occurred; checking blocking queues for thread %ld",
406 for (bq
= tso
->bq
; bq
!= (StgBlockingQueue
*)END_TSO_QUEUE
; bq
= next
) {
409 if (bq
->header
.info
== &stg_IND_info
) {
410 // ToDo: could short it out right here, to avoid
411 // traversing this IND multiple times.
417 if (p
->header
.info
!= &stg_BLACKHOLE_info
||
418 ((StgInd
*)p
)->indirectee
!= (StgClosure
*)bq
)
420 wakeBlockingQueue(cap
,bq
);
425 /* ----------------------------------------------------------------------------
428 Update a thunk with a value. In order to do this, we need to know
429 which TSO owns (or is evaluating) the thunk, in case we need to
430 awaken any threads that are blocked on it.
431 ------------------------------------------------------------------------- */
434 updateThunk (Capability
*cap
, StgTSO
*tso
, StgClosure
*thunk
, StgClosure
*val
)
438 const StgInfoTable
*i
;
440 i
= thunk
->header
.info
;
441 if (i
!= &stg_BLACKHOLE_info
&&
442 i
!= &stg_CAF_BLACKHOLE_info
&&
443 i
!= &__stg_EAGER_BLACKHOLE_info
&&
444 i
!= &stg_WHITEHOLE_info
) {
445 updateWithIndirection(cap
, thunk
, val
);
449 v
= ((StgInd
*)thunk
)->indirectee
;
451 updateWithIndirection(cap
, thunk
, val
);
453 // sometimes the TSO is locked when we reach here, so its header
454 // might be WHITEHOLE. Hence check for the correct owner using
455 // pointer equality first.
456 if ((StgTSO
*)v
== tso
) {
461 if (i
== &stg_TSO_info
) {
462 checkBlockingQueues(cap
, tso
);
466 if (i
!= &stg_BLOCKING_QUEUE_CLEAN_info
&&
467 i
!= &stg_BLOCKING_QUEUE_DIRTY_info
) {
468 checkBlockingQueues(cap
, tso
);
472 owner
= ((StgBlockingQueue
*)v
)->owner
;
475 checkBlockingQueues(cap
, tso
);
477 wakeBlockingQueue(cap
, (StgBlockingQueue
*)v
);
481 /* ---------------------------------------------------------------------------
482 * rtsSupportsBoundThreads(): is the RTS built to support bound threads?
483 * used by Control.Concurrent for error checking.
484 * ------------------------------------------------------------------------- */
487 rtsSupportsBoundThreads(void)
489 #if defined(THREADED_RTS)
492 return HS_BOOL_FALSE
;
496 /* ---------------------------------------------------------------------------
497 * isThreadBound(tso): check whether tso is bound to an OS thread.
498 * ------------------------------------------------------------------------- */
501 isThreadBound(StgTSO
* tso USED_IF_THREADS
)
503 #if defined(THREADED_RTS)
504 return (tso
->bound
!= NULL
);
509 /* -----------------------------------------------------------------------------
512 If the thread has reached its maximum stack size, then raise the
513 StackOverflow exception in the offending thread. Otherwise
514 relocate the TSO into a larger chunk of memory and adjust its stack
516 -------------------------------------------------------------------------- */
519 threadStackOverflow (Capability
*cap
, StgTSO
*tso
)
521 StgStack
*new_stack
, *old_stack
;
522 StgUnderflowFrame
*frame
;
525 IF_DEBUG(sanity
,checkTSO(tso
));
527 if (RtsFlags
.GcFlags
.maxStkSize
> 0
528 && tso
->tot_stack_size
>= RtsFlags
.GcFlags
.maxStkSize
) {
529 // #3677: In a stack overflow situation, stack squeezing may
530 // reduce the stack size, but we don't know whether it has been
531 // reduced enough for the stack check to succeed if we try
532 // again. Fortunately stack squeezing is idempotent, so all we
533 // need to do is record whether *any* squeezing happened. If we
534 // are at the stack's absolute -K limit, and stack squeezing
535 // happened, then we try running the thread again. The
536 // TSO_SQUEEZED flag is set by threadPaused() to tell us whether
537 // squeezing happened or not.
538 if (tso
->flags
& TSO_SQUEEZED
) {
543 "threadStackOverflow of TSO %ld (%p): stack too large (now %ld; max is %ld)",
544 (long)tso
->id
, tso
, (long)tso
->stackobj
->stack_size
,
545 RtsFlags
.GcFlags
.maxStkSize
);
547 /* If we're debugging, just print out the top of the stack */
548 printStackChunk(tso
->stackobj
->sp
,
549 stg_min(tso
->stackobj
->stack
+ tso
->stackobj
->stack_size
,
550 tso
->stackobj
->sp
+64)));
552 // Note [Throw to self when masked], also #767 and #8303.
553 throwToSelf(cap
, tso
, (StgClosure
*)stackOverflow_closure
);
558 // We also want to avoid enlarging the stack if squeezing has
559 // already released some of it. However, we don't want to get into
560 // a pathological situation where a thread has a nearly full stack
561 // (near its current limit, but not near the absolute -K limit),
562 // keeps allocating a little bit, squeezing removes a little bit,
563 // and then it runs again. So to avoid this, if we squeezed *and*
564 // there is still less than BLOCK_SIZE_W words free, then we enlarge
567 // NB: This reasoning only applies if the stack has been squeezed;
568 // if no squeezing has occurred, then BLOCK_SIZE_W free space does
569 // not mean there is enough stack to run; the thread may have
570 // requested a large amount of stack (see below). If the amount
571 // we squeezed is not enough to run the thread, we'll come back
572 // here (no squeezing will have occurred and thus we'll enlarge the
574 if ((tso
->flags
& TSO_SQUEEZED
) &&
575 ((W_
)(tso
->stackobj
->sp
- tso
->stackobj
->stack
) >= BLOCK_SIZE_W
)) {
579 old_stack
= tso
->stackobj
;
581 // If we used less than half of the previous stack chunk, then we
582 // must have failed a stack check for a large amount of stack. In
583 // this case we allocate a double-sized chunk to try to
584 // accommodate the large stack request. If that also fails, the
585 // next chunk will be 4x normal size, and so on.
587 // It would be better to have the mutator tell us how much stack
588 // was needed, as we do with heap allocations, but this works for
591 if (old_stack
->sp
> old_stack
->stack
+ old_stack
->stack_size
/ 2)
593 chunk_size
= stg_max(2 * (old_stack
->stack_size
+ sizeofW(StgStack
)),
594 RtsFlags
.GcFlags
.stkChunkSize
);
598 chunk_size
= RtsFlags
.GcFlags
.stkChunkSize
;
601 debugTraceCap(DEBUG_sched
, cap
,
602 "allocating new stack chunk of size %d bytes",
603 chunk_size
* sizeof(W_
));
605 // Charge the current thread for allocating stack. Stack usage is
606 // non-deterministic, because the chunk boundaries might vary from
607 // run to run, but accounting for this is better than not
608 // accounting for it, since a deep recursion will otherwise not be
609 // subject to allocation limits.
610 cap
->r
.rCurrentTSO
= tso
;
611 new_stack
= (StgStack
*) allocate(cap
, chunk_size
);
612 cap
->r
.rCurrentTSO
= NULL
;
614 SET_HDR(new_stack
, &stg_STACK_info
, old_stack
->header
.prof
.ccs
);
615 TICK_ALLOC_STACK(chunk_size
);
617 new_stack
->dirty
= 0; // begin clean, we'll mark it dirty below
618 new_stack
->stack_size
= chunk_size
- sizeofW(StgStack
);
619 new_stack
->sp
= new_stack
->stack
+ new_stack
->stack_size
;
621 tso
->tot_stack_size
+= new_stack
->stack_size
;
625 W_ chunk_words
, size
;
627 // find the boundary of the chunk of old stack we're going to
628 // copy to the new stack. We skip over stack frames until we
629 // reach the smaller of
631 // * the chunk buffer size (+RTS -kb)
632 // * the end of the old stack
634 for (sp
= old_stack
->sp
;
635 sp
< stg_min(old_stack
->sp
+ RtsFlags
.GcFlags
.stkChunkBufferSize
,
636 old_stack
->stack
+ old_stack
->stack_size
); )
638 size
= stack_frame_sizeW((StgClosure
*)sp
);
640 // if including this frame would exceed the size of the
641 // new stack (taking into account the underflow frame),
642 // then stop at the previous frame.
643 if (sp
+ size
> old_stack
->stack
+ (new_stack
->stack_size
-
644 sizeofW(StgUnderflowFrame
))) {
650 if (sp
== old_stack
->stack
+ old_stack
->stack_size
) {
652 // the old stack chunk is now empty, so we do *not* insert
653 // an underflow frame pointing back to it. There are two
654 // cases: either the old stack chunk was the last one, in
655 // which case it ends with a STOP_FRAME, or it is not the
656 // last one, and it already ends with an UNDERFLOW_FRAME
657 // pointing to the previous chunk. In the latter case, we
658 // will copy the UNDERFLOW_FRAME into the new stack chunk.
659 // In both cases, the old chunk will be subsequently GC'd.
661 // With the default settings, -ki1k -kb1k, this means the
662 // first stack chunk will be discarded after the first
663 // overflow, being replaced by a non-moving 32k chunk.
666 new_stack
->sp
-= sizeofW(StgUnderflowFrame
);
667 frame
= (StgUnderflowFrame
*)new_stack
->sp
;
668 frame
->info
= &stg_stack_underflow_frame_info
;
669 frame
->next_chunk
= old_stack
;
672 // copy the stack chunk between tso->sp and sp to
673 // new_tso->sp + (tso->sp - sp)
674 chunk_words
= sp
- old_stack
->sp
;
676 memcpy(/* dest */ new_stack
->sp
- chunk_words
,
677 /* source */ old_stack
->sp
,
678 /* size */ chunk_words
* sizeof(W_
));
680 old_stack
->sp
+= chunk_words
;
681 new_stack
->sp
-= chunk_words
;
684 tso
->stackobj
= new_stack
;
686 // we're about to run it, better mark it dirty
687 dirty_STACK(cap
, new_stack
);
689 IF_DEBUG(sanity
,checkTSO(tso
));
690 // IF_DEBUG(scheduler,printTSO(new_tso));
695 /* ---------------------------------------------------------------------------
696 Stack underflow - called from the stg_stack_underflow_info frame
697 ------------------------------------------------------------------------ */
699 W_
// returns offset to the return address
700 threadStackUnderflow (Capability
*cap
, StgTSO
*tso
)
702 StgStack
*new_stack
, *old_stack
;
703 StgUnderflowFrame
*frame
;
706 debugTraceCap(DEBUG_sched
, cap
, "stack underflow");
708 old_stack
= tso
->stackobj
;
710 frame
= (StgUnderflowFrame
*)(old_stack
->stack
+ old_stack
->stack_size
711 - sizeofW(StgUnderflowFrame
));
712 ASSERT(frame
->info
== &stg_stack_underflow_frame_info
);
714 new_stack
= (StgStack
*)frame
->next_chunk
;
715 tso
->stackobj
= new_stack
;
717 retvals
= (P_
)frame
- old_stack
->sp
;
720 // we have some return values to copy to the old stack
721 if ((W_
)(new_stack
->sp
- new_stack
->stack
) < retvals
)
723 barf("threadStackUnderflow: not enough space for return values");
726 new_stack
->sp
-= retvals
;
728 memcpy(/* dest */ new_stack
->sp
,
729 /* src */ old_stack
->sp
,
730 /* size */ retvals
* sizeof(W_
));
733 // empty the old stack. The GC may still visit this object
734 // because it is on the mutable list.
735 old_stack
->sp
= old_stack
->stack
+ old_stack
->stack_size
;
737 // restore the stack parameters, and update tot_stack_size
738 tso
->tot_stack_size
-= old_stack
->stack_size
;
740 // we're about to run it, better mark it dirty
741 dirty_STACK(cap
, new_stack
);
746 /* ----------------------------------------------------------------------------
747 * Debugging: why is a thread blocked
748 * ------------------------------------------------------------------------- */
752 printThreadBlockage(StgTSO
*tso
)
754 switch (tso
->why_blocked
) {
755 #if defined(mingw32_HOST_OS)
756 case BlockedOnDoProc
:
757 debugBelch("is blocked on proc (request: %u)", tso
->block_info
.async_result
->reqID
);
760 #if !defined(THREADED_RTS)
762 debugBelch("is blocked on read from fd %d", (int)(tso
->block_info
.fd
));
765 debugBelch("is blocked on write to fd %d", (int)(tso
->block_info
.fd
));
768 debugBelch("is blocked until %ld", (long)(tso
->block_info
.target
));
772 debugBelch("is blocked on an MVar @ %p", tso
->block_info
.closure
);
774 case BlockedOnMVarRead
:
775 debugBelch("is blocked on atomic MVar read @ %p", tso
->block_info
.closure
);
777 case BlockedOnBlackHole
:
778 debugBelch("is blocked on a black hole %p",
779 ((StgBlockingQueue
*)tso
->block_info
.bh
->bh
));
781 case BlockedOnMsgThrowTo
:
782 debugBelch("is blocked on a throwto message");
785 debugBelch("is not blocked");
787 case ThreadMigrating
:
788 debugBelch("is runnable, but not on the run queue");
791 debugBelch("is blocked on an external call");
793 case BlockedOnCCall_Interruptible
:
794 debugBelch("is blocked on an external call (but may be interrupted)");
797 debugBelch("is blocked on an STM operation");
800 barf("printThreadBlockage: strange tso->why_blocked: %d for TSO %d (%d)",
801 tso
->why_blocked
, tso
->id
, tso
);
807 printThreadStatus(StgTSO
*t
)
809 debugBelch("\tthread %4lu @ %p ", (unsigned long)t
->id
, (void *)t
);
811 void *label
= lookupThreadLabel(t
->id
);
812 if (label
) debugBelch("[\"%s\"] ",(char *)label
);
814 switch (t
->what_next
) {
816 debugBelch("has been killed");
819 debugBelch("has completed");
822 printThreadBlockage(t
);
825 debugBelch(" (TSO_DIRTY)");
831 printAllThreads(void)
837 debugBelch("all threads:\n");
839 for (i
= 0; i
< n_capabilities
; i
++) {
840 cap
= capabilities
[i
];
841 debugBelch("threads on capability %d:\n", cap
->no
);
842 for (t
= cap
->run_queue_hd
; t
!= END_TSO_QUEUE
; t
= t
->_link
) {
843 printThreadStatus(t
);
847 debugBelch("other threads:\n");
848 for (g
= 0; g
< RtsFlags
.GcFlags
.generations
; g
++) {
849 for (t
= generations
[g
].threads
; t
!= END_TSO_QUEUE
; t
= next
) {
850 if (t
->why_blocked
!= NotBlocked
) {
851 printThreadStatus(t
);
853 next
= t
->global_link
;
860 printThreadQueue(StgTSO
*t
)
863 for (; t
!= END_TSO_QUEUE
; t
= t
->_link
) {
864 printThreadStatus(t
);
867 debugBelch("%d threads on queue\n", i
);