Re-enable a flag-consistency check
[ghc.git] / rts / Threads.c
1 /* ---------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 2006
4 *
5 * Thread-related functionality
6 *
7 * --------------------------------------------------------------------------*/
8
9 #include "PosixSource.h"
10 #include "Rts.h"
11
12 #include "Capability.h"
13 #include "Updates.h"
14 #include "Threads.h"
15 #include "STM.h"
16 #include "Schedule.h"
17 #include "Trace.h"
18 #include "ThreadLabels.h"
19 #include "Updates.h"
20 #include "Messages.h"
21 #include "RaiseAsync.h"
22 #include "Prelude.h"
23 #include "Printer.h"
24 #include "sm/Sanity.h"
25 #include "sm/Storage.h"
26
27 #include <string.h>
28
29 /* Next thread ID to allocate.
30 * LOCK: sched_mutex
31 */
32 static StgThreadID next_thread_id = 1;
33
34 /* The smallest stack size that makes any sense is:
35 * RESERVED_STACK_WORDS (so we can get back from the stack overflow)
36 * + sizeofW(StgStopFrame) (the stg_stop_thread_info frame)
37 * + 1 (the closure to enter)
38 * + 1 (stg_ap_v_ret)
39 * + 1 (spare slot req'd by stg_ap_v_ret)
40 *
41 * A thread with this stack will bomb immediately with a stack
42 * overflow, which will increase its stack size.
43 */
44 #define MIN_STACK_WORDS (RESERVED_STACK_WORDS + sizeofW(StgStopFrame) + 3)
45
46 /* ---------------------------------------------------------------------------
47 Create a new thread.
48
49 The new thread starts with the given stack size. Before the
50 scheduler can run, however, this thread needs to have a closure
51 (and possibly some arguments) pushed on its stack. See
52 pushClosure() in Schedule.h.
53
54 createGenThread() and createIOThread() (in SchedAPI.h) are
55 convenient packaged versions of this function.
56
57 currently pri (priority) is only used in a GRAN setup -- HWL
58 ------------------------------------------------------------------------ */
59 StgTSO *
60 createThread(Capability *cap, W_ size)
61 {
62 StgTSO *tso;
63 StgStack *stack;
64 nat stack_size;
65
66 /* sched_mutex is *not* required */
67
68 /* catch ridiculously small stack sizes */
69 if (size < MIN_STACK_WORDS + sizeofW(StgStack) + sizeofW(StgTSO)) {
70 size = MIN_STACK_WORDS + sizeofW(StgStack) + sizeofW(StgTSO);
71 }
72
73 /* The size argument we are given includes all the per-thread
74 * overheads:
75 *
76 * - The TSO structure
77 * - The STACK header
78 *
79 * This is so that we can use a nice round power of 2 for the
80 * default stack size (e.g. 1k), and if we're allocating lots of
81 * threads back-to-back they'll fit nicely in a block. It's a bit
82 * of a benchmark hack, but it doesn't do any harm.
83 */
84 stack_size = round_to_mblocks(size - sizeofW(StgTSO));
85 stack = (StgStack *)allocate(cap, stack_size);
86 TICK_ALLOC_STACK(stack_size);
87 SET_HDR(stack, &stg_STACK_info, CCS_SYSTEM);
88 stack->stack_size = stack_size - sizeofW(StgStack);
89 stack->sp = stack->stack + stack->stack_size;
90 stack->dirty = 1;
91
92 tso = (StgTSO *)allocate(cap, sizeofW(StgTSO));
93 TICK_ALLOC_TSO();
94 SET_HDR(tso, &stg_TSO_info, CCS_SYSTEM);
95
96 // Always start with the compiled code evaluator
97 tso->what_next = ThreadRunGHC;
98 tso->why_blocked = NotBlocked;
99 tso->block_info.closure = (StgClosure *)END_TSO_QUEUE;
100 tso->blocked_exceptions = END_BLOCKED_EXCEPTIONS_QUEUE;
101 tso->bq = (StgBlockingQueue *)END_TSO_QUEUE;
102 tso->flags = 0;
103 tso->dirty = 1;
104 tso->_link = END_TSO_QUEUE;
105
106 tso->saved_errno = 0;
107 tso->bound = NULL;
108 tso->cap = cap;
109
110 tso->stackobj = stack;
111 tso->tot_stack_size = stack->stack_size;
112
113 tso->trec = NO_TREC;
114
115 #ifdef PROFILING
116 tso->prof.cccs = CCS_MAIN;
117 #endif
118
119 // put a stop frame on the stack
120 stack->sp -= sizeofW(StgStopFrame);
121 SET_HDR((StgClosure*)stack->sp,
122 (StgInfoTable *)&stg_stop_thread_info,CCS_SYSTEM);
123
124 /* Link the new thread on the global thread list.
125 */
126 ACQUIRE_LOCK(&sched_mutex);
127 tso->id = next_thread_id++; // while we have the mutex
128 tso->global_link = g0->threads;
129 g0->threads = tso;
130 RELEASE_LOCK(&sched_mutex);
131
132 // ToDo: report the stack size in the event?
133 traceEventCreateThread(cap, tso);
134
135 return tso;
136 }
137
138 /* ---------------------------------------------------------------------------
139 * Comparing Thread ids.
140 *
141 * This is used from STG land in the implementation of the
142 * instances of Eq/Ord for ThreadIds.
143 * ------------------------------------------------------------------------ */
144
145 int
146 cmp_thread(StgPtr tso1, StgPtr tso2)
147 {
148 StgThreadID id1 = ((StgTSO *)tso1)->id;
149 StgThreadID id2 = ((StgTSO *)tso2)->id;
150
151 if (id1 < id2) return (-1);
152 if (id1 > id2) return 1;
153 return 0;
154 }
155
156 /* ---------------------------------------------------------------------------
157 * Fetching the ThreadID from an StgTSO.
158 *
159 * This is used in the implementation of Show for ThreadIds.
160 * ------------------------------------------------------------------------ */
161 int
162 rts_getThreadId(StgPtr tso)
163 {
164 return ((StgTSO *)tso)->id;
165 }
166
167 /* -----------------------------------------------------------------------------
168 Remove a thread from a queue.
169 Fails fatally if the TSO is not on the queue.
170 -------------------------------------------------------------------------- */
171
172 rtsBool // returns True if we modified queue
173 removeThreadFromQueue (Capability *cap, StgTSO **queue, StgTSO *tso)
174 {
175 StgTSO *t, *prev;
176
177 prev = NULL;
178 for (t = *queue; t != END_TSO_QUEUE; prev = t, t = t->_link) {
179 if (t == tso) {
180 if (prev) {
181 setTSOLink(cap,prev,t->_link);
182 t->_link = END_TSO_QUEUE;
183 return rtsFalse;
184 } else {
185 *queue = t->_link;
186 t->_link = END_TSO_QUEUE;
187 return rtsTrue;
188 }
189 }
190 }
191 barf("removeThreadFromQueue: not found");
192 }
193
194 rtsBool // returns True if we modified head or tail
195 removeThreadFromDeQueue (Capability *cap,
196 StgTSO **head, StgTSO **tail, StgTSO *tso)
197 {
198 StgTSO *t, *prev;
199 rtsBool flag = rtsFalse;
200
201 prev = NULL;
202 for (t = *head; t != END_TSO_QUEUE; prev = t, t = t->_link) {
203 if (t == tso) {
204 if (prev) {
205 setTSOLink(cap,prev,t->_link);
206 flag = rtsFalse;
207 } else {
208 *head = t->_link;
209 flag = rtsTrue;
210 }
211 t->_link = END_TSO_QUEUE;
212 if (*tail == tso) {
213 if (prev) {
214 *tail = prev;
215 } else {
216 *tail = END_TSO_QUEUE;
217 }
218 return rtsTrue;
219 } else {
220 return flag;
221 }
222 }
223 }
224 barf("removeThreadFromMVarQueue: not found");
225 }
226
227 /* ----------------------------------------------------------------------------
228 tryWakeupThread()
229
230 Attempt to wake up a thread. tryWakeupThread is idempotent: it is
231 always safe to call it too many times, but it is not safe in
232 general to omit a call.
233
234 ------------------------------------------------------------------------- */
235
236 void
237 tryWakeupThread (Capability *cap, StgTSO *tso)
238 {
239 traceEventThreadWakeup (cap, tso, tso->cap->no);
240
241 #ifdef THREADED_RTS
242 if (tso->cap != cap)
243 {
244 MessageWakeup *msg;
245 msg = (MessageWakeup *)allocate(cap,sizeofW(MessageWakeup));
246 SET_HDR(msg, &stg_MSG_TRY_WAKEUP_info, CCS_SYSTEM);
247 msg->tso = tso;
248 sendMessage(cap, tso->cap, (Message*)msg);
249 debugTraceCap(DEBUG_sched, cap, "message: try wakeup thread %ld on cap %d",
250 (W_)tso->id, tso->cap->no);
251 return;
252 }
253 #endif
254
255 switch (tso->why_blocked)
256 {
257 case BlockedOnMVar:
258 {
259 if (tso->_link == END_TSO_QUEUE) {
260 tso->block_info.closure = (StgClosure*)END_TSO_QUEUE;
261 goto unblock;
262 } else {
263 return;
264 }
265 }
266
267 case BlockedOnMsgThrowTo:
268 {
269 const StgInfoTable *i;
270
271 i = lockClosure(tso->block_info.closure);
272 unlockClosure(tso->block_info.closure, i);
273 if (i != &stg_MSG_NULL_info) {
274 debugTraceCap(DEBUG_sched, cap, "thread %ld still blocked on throwto (%p)",
275 (W_)tso->id, tso->block_info.throwto->header.info);
276 return;
277 }
278
279 // remove the block frame from the stack
280 ASSERT(tso->stackobj->sp[0] == (StgWord)&stg_block_throwto_info);
281 tso->stackobj->sp += 3;
282 goto unblock;
283 }
284
285 case BlockedOnBlackHole:
286 case BlockedOnSTM:
287 case ThreadMigrating:
288 goto unblock;
289
290 default:
291 // otherwise, do nothing
292 return;
293 }
294
295 unblock:
296 // just run the thread now, if the BH is not really available,
297 // we'll block again.
298 tso->why_blocked = NotBlocked;
299 appendToRunQueue(cap,tso);
300
301 // We used to set the context switch flag here, which would
302 // trigger a context switch a short time in the future (at the end
303 // of the current nursery block). The idea is that we have just
304 // woken up a thread, so we may need to load-balance and migrate
305 // threads to other CPUs. On the other hand, setting the context
306 // switch flag here unfairly penalises the current thread by
307 // yielding its time slice too early.
308 //
309 // The synthetic benchmark nofib/smp/chan can be used to show the
310 // difference quite clearly.
311
312 // cap->context_switch = 1;
313 }
314
315 /* ----------------------------------------------------------------------------
316 migrateThread
317 ------------------------------------------------------------------------- */
318
319 void
320 migrateThread (Capability *from, StgTSO *tso, Capability *to)
321 {
322 traceEventMigrateThread (from, tso, to->no);
323 // ThreadMigrating tells the target cap that it needs to be added to
324 // the run queue when it receives the MSG_TRY_WAKEUP.
325 tso->why_blocked = ThreadMigrating;
326 tso->cap = to;
327 tryWakeupThread(from, tso);
328 }
329
330 /* ----------------------------------------------------------------------------
331 awakenBlockedQueue
332
333 wakes up all the threads on the specified queue.
334 ------------------------------------------------------------------------- */
335
336 void
337 wakeBlockingQueue(Capability *cap, StgBlockingQueue *bq)
338 {
339 MessageBlackHole *msg;
340 const StgInfoTable *i;
341
342 ASSERT(bq->header.info == &stg_BLOCKING_QUEUE_DIRTY_info ||
343 bq->header.info == &stg_BLOCKING_QUEUE_CLEAN_info );
344
345 for (msg = bq->queue; msg != (MessageBlackHole*)END_TSO_QUEUE;
346 msg = msg->link) {
347 i = msg->header.info;
348 if (i != &stg_IND_info) {
349 ASSERT(i == &stg_MSG_BLACKHOLE_info);
350 tryWakeupThread(cap,msg->tso);
351 }
352 }
353
354 // overwrite the BQ with an indirection so it will be
355 // collected at the next GC.
356 #if defined(DEBUG) && !defined(THREADED_RTS)
357 // XXX FILL_SLOP, but not if THREADED_RTS because in that case
358 // another thread might be looking at this BLOCKING_QUEUE and
359 // checking the owner field at the same time.
360 bq->bh = 0; bq->queue = 0; bq->owner = 0;
361 #endif
362 OVERWRITE_INFO(bq, &stg_IND_info);
363 }
364
365 // If we update a closure that we know we BLACKHOLE'd, and the closure
366 // no longer points to the current TSO as its owner, then there may be
367 // an orphaned BLOCKING_QUEUE closure with blocked threads attached to
368 // it. We therefore traverse the BLOCKING_QUEUEs attached to the
369 // current TSO to see if any can now be woken up.
370 void
371 checkBlockingQueues (Capability *cap, StgTSO *tso)
372 {
373 StgBlockingQueue *bq, *next;
374 StgClosure *p;
375
376 debugTraceCap(DEBUG_sched, cap,
377 "collision occurred; checking blocking queues for thread %ld",
378 (W_)tso->id);
379
380 for (bq = tso->bq; bq != (StgBlockingQueue*)END_TSO_QUEUE; bq = next) {
381 next = bq->link;
382
383 if (bq->header.info == &stg_IND_info) {
384 // ToDo: could short it out right here, to avoid
385 // traversing this IND multiple times.
386 continue;
387 }
388
389 p = bq->bh;
390
391 if (p->header.info != &stg_BLACKHOLE_info ||
392 ((StgInd *)p)->indirectee != (StgClosure*)bq)
393 {
394 wakeBlockingQueue(cap,bq);
395 }
396 }
397 }
398
399 /* ----------------------------------------------------------------------------
400 updateThunk
401
402 Update a thunk with a value. In order to do this, we need to know
403 which TSO owns (or is evaluating) the thunk, in case we need to
404 awaken any threads that are blocked on it.
405 ------------------------------------------------------------------------- */
406
407 void
408 updateThunk (Capability *cap, StgTSO *tso, StgClosure *thunk, StgClosure *val)
409 {
410 StgClosure *v;
411 StgTSO *owner;
412 const StgInfoTable *i;
413
414 i = thunk->header.info;
415 if (i != &stg_BLACKHOLE_info &&
416 i != &stg_CAF_BLACKHOLE_info &&
417 i != &__stg_EAGER_BLACKHOLE_info &&
418 i != &stg_WHITEHOLE_info) {
419 updateWithIndirection(cap, thunk, val);
420 return;
421 }
422
423 v = ((StgInd*)thunk)->indirectee;
424
425 updateWithIndirection(cap, thunk, val);
426
427 // sometimes the TSO is locked when we reach here, so its header
428 // might be WHITEHOLE. Hence check for the correct owner using
429 // pointer equality first.
430 if ((StgTSO*)v == tso) {
431 return;
432 }
433
434 i = v->header.info;
435 if (i == &stg_TSO_info) {
436 checkBlockingQueues(cap, tso);
437 return;
438 }
439
440 if (i != &stg_BLOCKING_QUEUE_CLEAN_info &&
441 i != &stg_BLOCKING_QUEUE_DIRTY_info) {
442 checkBlockingQueues(cap, tso);
443 return;
444 }
445
446 owner = ((StgBlockingQueue*)v)->owner;
447
448 if (owner != tso) {
449 checkBlockingQueues(cap, tso);
450 } else {
451 wakeBlockingQueue(cap, (StgBlockingQueue*)v);
452 }
453 }
454
455 /* ---------------------------------------------------------------------------
456 * rtsSupportsBoundThreads(): is the RTS built to support bound threads?
457 * used by Control.Concurrent for error checking.
458 * ------------------------------------------------------------------------- */
459
460 HsBool
461 rtsSupportsBoundThreads(void)
462 {
463 #if defined(THREADED_RTS)
464 return HS_BOOL_TRUE;
465 #else
466 return HS_BOOL_FALSE;
467 #endif
468 }
469
470 /* ---------------------------------------------------------------------------
471 * isThreadBound(tso): check whether tso is bound to an OS thread.
472 * ------------------------------------------------------------------------- */
473
474 StgBool
475 isThreadBound(StgTSO* tso USED_IF_THREADS)
476 {
477 #if defined(THREADED_RTS)
478 return (tso->bound != NULL);
479 #endif
480 return rtsFalse;
481 }
482
483 /* -----------------------------------------------------------------------------
484 Stack overflow
485
486 If the thread has reached its maximum stack size, then raise the
487 StackOverflow exception in the offending thread. Otherwise
488 relocate the TSO into a larger chunk of memory and adjust its stack
489 size appropriately.
490 -------------------------------------------------------------------------- */
491
492 void
493 threadStackOverflow (Capability *cap, StgTSO *tso)
494 {
495 StgStack *new_stack, *old_stack;
496 StgUnderflowFrame *frame;
497 W_ chunk_size;
498
499 IF_DEBUG(sanity,checkTSO(tso));
500
501 if (tso->tot_stack_size >= RtsFlags.GcFlags.maxStkSize
502 && !(tso->flags & TSO_BLOCKEX)) {
503 // NB. never raise a StackOverflow exception if the thread is
504 // inside Control.Exceptino.block. It is impractical to protect
505 // against stack overflow exceptions, since virtually anything
506 // can raise one (even 'catch'), so this is the only sensible
507 // thing to do here. See bug #767.
508 //
509
510 if (tso->flags & TSO_SQUEEZED) {
511 return;
512 }
513 // #3677: In a stack overflow situation, stack squeezing may
514 // reduce the stack size, but we don't know whether it has been
515 // reduced enough for the stack check to succeed if we try
516 // again. Fortunately stack squeezing is idempotent, so all we
517 // need to do is record whether *any* squeezing happened. If we
518 // are at the stack's absolute -K limit, and stack squeezing
519 // happened, then we try running the thread again. The
520 // TSO_SQUEEZED flag is set by threadPaused() to tell us whether
521 // squeezing happened or not.
522
523 debugTrace(DEBUG_gc,
524 "threadStackOverflow of TSO %ld (%p): stack too large (now %ld; max is %ld)",
525 (long)tso->id, tso, (long)tso->stackobj->stack_size,
526 RtsFlags.GcFlags.maxStkSize);
527 IF_DEBUG(gc,
528 /* If we're debugging, just print out the top of the stack */
529 printStackChunk(tso->stackobj->sp,
530 stg_min(tso->stackobj->stack + tso->stackobj->stack_size,
531 tso->stackobj->sp+64)));
532
533 // Send this thread the StackOverflow exception
534 throwToSingleThreaded(cap, tso, (StgClosure *)stackOverflow_closure);
535 }
536
537
538 // We also want to avoid enlarging the stack if squeezing has
539 // already released some of it. However, we don't want to get into
540 // a pathalogical situation where a thread has a nearly full stack
541 // (near its current limit, but not near the absolute -K limit),
542 // keeps allocating a little bit, squeezing removes a little bit,
543 // and then it runs again. So to avoid this, if we squeezed *and*
544 // there is still less than BLOCK_SIZE_W words free, then we enlarge
545 // the stack anyway.
546 if ((tso->flags & TSO_SQUEEZED) &&
547 ((W_)(tso->stackobj->sp - tso->stackobj->stack) >= BLOCK_SIZE_W)) {
548 return;
549 }
550
551 old_stack = tso->stackobj;
552
553 // If we used less than half of the previous stack chunk, then we
554 // must have failed a stack check for a large amount of stack. In
555 // this case we allocate a double-sized chunk to try to
556 // accommodate the large stack request. If that also fails, the
557 // next chunk will be 4x normal size, and so on.
558 //
559 // It would be better to have the mutator tell us how much stack
560 // was needed, as we do with heap allocations, but this works for
561 // now.
562 //
563 if (old_stack->sp > old_stack->stack + old_stack->stack_size / 2)
564 {
565 chunk_size = stg_max(2 * (old_stack->stack_size + sizeofW(StgStack)),
566 RtsFlags.GcFlags.stkChunkSize);
567 }
568 else
569 {
570 chunk_size = RtsFlags.GcFlags.stkChunkSize;
571 }
572
573 debugTraceCap(DEBUG_sched, cap,
574 "allocating new stack chunk of size %d bytes",
575 chunk_size * sizeof(W_));
576
577 new_stack = (StgStack*) allocate(cap, chunk_size);
578 SET_HDR(new_stack, &stg_STACK_info, CCS_SYSTEM);
579 TICK_ALLOC_STACK(chunk_size);
580
581 new_stack->dirty = 0; // begin clean, we'll mark it dirty below
582 new_stack->stack_size = chunk_size - sizeofW(StgStack);
583 new_stack->sp = new_stack->stack + new_stack->stack_size;
584
585 tso->tot_stack_size += new_stack->stack_size;
586
587 {
588 StgWord *sp;
589 W_ chunk_words, size;
590
591 // find the boundary of the chunk of old stack we're going to
592 // copy to the new stack. We skip over stack frames until we
593 // reach the smaller of
594 //
595 // * the chunk buffer size (+RTS -kb)
596 // * the end of the old stack
597 //
598 for (sp = old_stack->sp;
599 sp < stg_min(old_stack->sp + RtsFlags.GcFlags.stkChunkBufferSize,
600 old_stack->stack + old_stack->stack_size); )
601 {
602 size = stack_frame_sizeW((StgClosure*)sp);
603
604 // if including this frame would exceed the size of the
605 // new stack (taking into account the underflow frame),
606 // then stop at the previous frame.
607 if (sp + size > old_stack->stack + (new_stack->stack_size -
608 sizeofW(StgUnderflowFrame))) {
609 break;
610 }
611 sp += size;
612 }
613
614 if (sp == old_stack->stack + old_stack->stack_size) {
615 //
616 // the old stack chunk is now empty, so we do *not* insert
617 // an underflow frame pointing back to it. There are two
618 // cases: either the old stack chunk was the last one, in
619 // which case it ends with a STOP_FRAME, or it is not the
620 // last one, and it already ends with an UNDERFLOW_FRAME
621 // pointing to the previous chunk. In the latter case, we
622 // will copy the UNDERFLOW_FRAME into the new stack chunk.
623 // In both cases, the old chunk will be subsequently GC'd.
624 //
625 // With the default settings, -ki1k -kb1k, this means the
626 // first stack chunk will be discarded after the first
627 // overflow, being replaced by a non-moving 32k chunk.
628 //
629 } else {
630 new_stack->sp -= sizeofW(StgUnderflowFrame);
631 frame = (StgUnderflowFrame*)new_stack->sp;
632 frame->info = &stg_stack_underflow_frame_info;
633 frame->next_chunk = old_stack;
634 }
635
636 // copy the stack chunk between tso->sp and sp to
637 // new_tso->sp + (tso->sp - sp)
638 chunk_words = sp - old_stack->sp;
639
640 memcpy(/* dest */ new_stack->sp - chunk_words,
641 /* source */ old_stack->sp,
642 /* size */ chunk_words * sizeof(W_));
643
644 old_stack->sp += chunk_words;
645 new_stack->sp -= chunk_words;
646 }
647
648 tso->stackobj = new_stack;
649
650 // we're about to run it, better mark it dirty
651 dirty_STACK(cap, new_stack);
652
653 IF_DEBUG(sanity,checkTSO(tso));
654 // IF_DEBUG(scheduler,printTSO(new_tso));
655 }
656
657
658 /* ---------------------------------------------------------------------------
659 Stack underflow - called from the stg_stack_underflow_info frame
660 ------------------------------------------------------------------------ */
661
662 W_ // returns offset to the return address
663 threadStackUnderflow (Capability *cap, StgTSO *tso)
664 {
665 StgStack *new_stack, *old_stack;
666 StgUnderflowFrame *frame;
667 nat retvals;
668
669 debugTraceCap(DEBUG_sched, cap, "stack underflow");
670
671 old_stack = tso->stackobj;
672
673 frame = (StgUnderflowFrame*)(old_stack->stack + old_stack->stack_size
674 - sizeofW(StgUnderflowFrame));
675 ASSERT(frame->info == &stg_stack_underflow_frame_info);
676
677 new_stack = (StgStack*)frame->next_chunk;
678 tso->stackobj = new_stack;
679
680 retvals = (P_)frame - old_stack->sp;
681 if (retvals != 0)
682 {
683 // we have some return values to copy to the old stack
684 if ((W_)(new_stack->sp - new_stack->stack) < retvals)
685 {
686 barf("threadStackUnderflow: not enough space for return values");
687 }
688
689 new_stack->sp -= retvals;
690
691 memcpy(/* dest */ new_stack->sp,
692 /* src */ old_stack->sp,
693 /* size */ retvals * sizeof(W_));
694 }
695
696 // empty the old stack. The GC may still visit this object
697 // because it is on the mutable list.
698 old_stack->sp = old_stack->stack + old_stack->stack_size;
699
700 // restore the stack parameters, and update tot_stack_size
701 tso->tot_stack_size -= old_stack->stack_size;
702
703 // we're about to run it, better mark it dirty
704 dirty_STACK(cap, new_stack);
705
706 return retvals;
707 }
708
709 /* ----------------------------------------------------------------------------
710 * Debugging: why is a thread blocked
711 * ------------------------------------------------------------------------- */
712
713 #if DEBUG
714 void
715 printThreadBlockage(StgTSO *tso)
716 {
717 switch (tso->why_blocked) {
718 #if defined(mingw32_HOST_OS)
719 case BlockedOnDoProc:
720 debugBelch("is blocked on proc (request: %u)", tso->block_info.async_result->reqID);
721 break;
722 #endif
723 #if !defined(THREADED_RTS)
724 case BlockedOnRead:
725 debugBelch("is blocked on read from fd %d", (int)(tso->block_info.fd));
726 break;
727 case BlockedOnWrite:
728 debugBelch("is blocked on write to fd %d", (int)(tso->block_info.fd));
729 break;
730 case BlockedOnDelay:
731 debugBelch("is blocked until %ld", (long)(tso->block_info.target));
732 break;
733 #endif
734 case BlockedOnMVar:
735 debugBelch("is blocked on an MVar @ %p", tso->block_info.closure);
736 break;
737 case BlockedOnBlackHole:
738 debugBelch("is blocked on a black hole %p",
739 ((StgBlockingQueue*)tso->block_info.bh->bh));
740 break;
741 case BlockedOnMsgThrowTo:
742 debugBelch("is blocked on a throwto message");
743 break;
744 case NotBlocked:
745 debugBelch("is not blocked");
746 break;
747 case ThreadMigrating:
748 debugBelch("is runnable, but not on the run queue");
749 break;
750 case BlockedOnCCall:
751 debugBelch("is blocked on an external call");
752 break;
753 case BlockedOnCCall_Interruptible:
754 debugBelch("is blocked on an external call (but may be interrupted)");
755 break;
756 case BlockedOnSTM:
757 debugBelch("is blocked on an STM operation");
758 break;
759 default:
760 barf("printThreadBlockage: strange tso->why_blocked: %d for TSO %d (%d)",
761 tso->why_blocked, tso->id, tso);
762 }
763 }
764
765
766 void
767 printThreadStatus(StgTSO *t)
768 {
769 debugBelch("\tthread %4lu @ %p ", (unsigned long)t->id, (void *)t);
770 {
771 void *label = lookupThreadLabel(t->id);
772 if (label) debugBelch("[\"%s\"] ",(char *)label);
773 }
774 switch (t->what_next) {
775 case ThreadKilled:
776 debugBelch("has been killed");
777 break;
778 case ThreadComplete:
779 debugBelch("has completed");
780 break;
781 default:
782 printThreadBlockage(t);
783 }
784 if (t->dirty) {
785 debugBelch(" (TSO_DIRTY)");
786 }
787 debugBelch("\n");
788 }
789
790 void
791 printAllThreads(void)
792 {
793 StgTSO *t, *next;
794 nat i, g;
795 Capability *cap;
796
797 debugBelch("all threads:\n");
798
799 for (i = 0; i < n_capabilities; i++) {
800 cap = &capabilities[i];
801 debugBelch("threads on capability %d:\n", cap->no);
802 for (t = cap->run_queue_hd; t != END_TSO_QUEUE; t = t->_link) {
803 printThreadStatus(t);
804 }
805 }
806
807 debugBelch("other threads:\n");
808 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
809 for (t = generations[g].threads; t != END_TSO_QUEUE; t = next) {
810 if (t->why_blocked != NotBlocked) {
811 printThreadStatus(t);
812 }
813 next = t->global_link;
814 }
815 }
816 }
817
818 // useful from gdb
819 void
820 printThreadQueue(StgTSO *t)
821 {
822 nat i = 0;
823 for (; t != END_TSO_QUEUE; t = t->_link) {
824 printThreadStatus(t);
825 i++;
826 }
827 debugBelch("%d threads on queue\n", i);
828 }
829
830 #endif /* DEBUG */