s/pathalogical/pathological/
[ghc.git] / rts / Threads.c
1 /* ---------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 2006
4 *
5 * Thread-related functionality
6 *
7 * --------------------------------------------------------------------------*/
8
9 #include "PosixSource.h"
10 #include "Rts.h"
11
12 #include "Capability.h"
13 #include "Updates.h"
14 #include "Threads.h"
15 #include "STM.h"
16 #include "Schedule.h"
17 #include "Trace.h"
18 #include "ThreadLabels.h"
19 #include "Updates.h"
20 #include "Messages.h"
21 #include "RaiseAsync.h"
22 #include "Prelude.h"
23 #include "Printer.h"
24 #include "sm/Sanity.h"
25 #include "sm/Storage.h"
26
27 #include <string.h>
28
29 /* Next thread ID to allocate.
30 * LOCK: sched_mutex
31 */
32 static StgThreadID next_thread_id = 1;
33
34 /* The smallest stack size that makes any sense is:
35 * RESERVED_STACK_WORDS (so we can get back from the stack overflow)
36 * + sizeofW(StgStopFrame) (the stg_stop_thread_info frame)
37 * + 1 (the closure to enter)
38 * + 1 (stg_ap_v_ret)
39 * + 1 (spare slot req'd by stg_ap_v_ret)
40 *
41 * A thread with this stack will bomb immediately with a stack
42 * overflow, which will increase its stack size.
43 */
44 #define MIN_STACK_WORDS (RESERVED_STACK_WORDS + sizeofW(StgStopFrame) + 3)
45
46 /* ---------------------------------------------------------------------------
47 Create a new thread.
48
49 The new thread starts with the given stack size. Before the
50 scheduler can run, however, this thread needs to have a closure
51 (and possibly some arguments) pushed on its stack. See
52 pushClosure() in Schedule.h.
53
54 createGenThread() and createIOThread() (in SchedAPI.h) are
55 convenient packaged versions of this function.
56
57 currently pri (priority) is only used in a GRAN setup -- HWL
58 ------------------------------------------------------------------------ */
59 StgTSO *
60 createThread(Capability *cap, W_ size)
61 {
62 StgTSO *tso;
63 StgStack *stack;
64 nat stack_size;
65
66 /* sched_mutex is *not* required */
67
68 /* catch ridiculously small stack sizes */
69 if (size < MIN_STACK_WORDS + sizeofW(StgStack) + sizeofW(StgTSO)) {
70 size = MIN_STACK_WORDS + sizeofW(StgStack) + sizeofW(StgTSO);
71 }
72
73 /* The size argument we are given includes all the per-thread
74 * overheads:
75 *
76 * - The TSO structure
77 * - The STACK header
78 *
79 * This is so that we can use a nice round power of 2 for the
80 * default stack size (e.g. 1k), and if we're allocating lots of
81 * threads back-to-back they'll fit nicely in a block. It's a bit
82 * of a benchmark hack, but it doesn't do any harm.
83 */
84 stack_size = round_to_mblocks(size - sizeofW(StgTSO));
85 stack = (StgStack *)allocate(cap, stack_size);
86 TICK_ALLOC_STACK(stack_size);
87 SET_HDR(stack, &stg_STACK_info, cap->r.rCCCS);
88 stack->stack_size = stack_size - sizeofW(StgStack);
89 stack->sp = stack->stack + stack->stack_size;
90 stack->dirty = 1;
91
92 tso = (StgTSO *)allocate(cap, sizeofW(StgTSO));
93 TICK_ALLOC_TSO();
94 SET_HDR(tso, &stg_TSO_info, CCS_SYSTEM);
95
96 // Always start with the compiled code evaluator
97 tso->what_next = ThreadRunGHC;
98 tso->why_blocked = NotBlocked;
99 tso->block_info.closure = (StgClosure *)END_TSO_QUEUE;
100 tso->blocked_exceptions = END_BLOCKED_EXCEPTIONS_QUEUE;
101 tso->bq = (StgBlockingQueue *)END_TSO_QUEUE;
102 tso->flags = 0;
103 tso->dirty = 1;
104 tso->_link = END_TSO_QUEUE;
105
106 tso->saved_errno = 0;
107 tso->bound = NULL;
108 tso->cap = cap;
109
110 tso->stackobj = stack;
111 tso->tot_stack_size = stack->stack_size;
112
113 tso->trec = NO_TREC;
114
115 #ifdef PROFILING
116 tso->prof.cccs = CCS_MAIN;
117 #endif
118
119 // put a stop frame on the stack
120 stack->sp -= sizeofW(StgStopFrame);
121 SET_HDR((StgClosure*)stack->sp,
122 (StgInfoTable *)&stg_stop_thread_info,CCS_SYSTEM);
123
124 /* Link the new thread on the global thread list.
125 */
126 ACQUIRE_LOCK(&sched_mutex);
127 tso->id = next_thread_id++; // while we have the mutex
128 tso->global_link = g0->threads;
129 g0->threads = tso;
130 RELEASE_LOCK(&sched_mutex);
131
132 // ToDo: report the stack size in the event?
133 traceEventCreateThread(cap, tso);
134
135 return tso;
136 }
137
138 /* ---------------------------------------------------------------------------
139 * Comparing Thread ids.
140 *
141 * This is used from STG land in the implementation of the
142 * instances of Eq/Ord for ThreadIds.
143 * ------------------------------------------------------------------------ */
144
145 int
146 cmp_thread(StgPtr tso1, StgPtr tso2)
147 {
148 StgThreadID id1 = ((StgTSO *)tso1)->id;
149 StgThreadID id2 = ((StgTSO *)tso2)->id;
150
151 if (id1 < id2) return (-1);
152 if (id1 > id2) return 1;
153 return 0;
154 }
155
156 /* ---------------------------------------------------------------------------
157 * Fetching the ThreadID from an StgTSO.
158 *
159 * This is used in the implementation of Show for ThreadIds.
160 * ------------------------------------------------------------------------ */
161 int
162 rts_getThreadId(StgPtr tso)
163 {
164 return ((StgTSO *)tso)->id;
165 }
166
167 /* -----------------------------------------------------------------------------
168 Remove a thread from a queue.
169 Fails fatally if the TSO is not on the queue.
170 -------------------------------------------------------------------------- */
171
172 rtsBool // returns True if we modified queue
173 removeThreadFromQueue (Capability *cap, StgTSO **queue, StgTSO *tso)
174 {
175 StgTSO *t, *prev;
176
177 prev = NULL;
178 for (t = *queue; t != END_TSO_QUEUE; prev = t, t = t->_link) {
179 if (t == tso) {
180 if (prev) {
181 setTSOLink(cap,prev,t->_link);
182 t->_link = END_TSO_QUEUE;
183 return rtsFalse;
184 } else {
185 *queue = t->_link;
186 t->_link = END_TSO_QUEUE;
187 return rtsTrue;
188 }
189 }
190 }
191 barf("removeThreadFromQueue: not found");
192 }
193
194 rtsBool // returns True if we modified head or tail
195 removeThreadFromDeQueue (Capability *cap,
196 StgTSO **head, StgTSO **tail, StgTSO *tso)
197 {
198 StgTSO *t, *prev;
199 rtsBool flag = rtsFalse;
200
201 prev = NULL;
202 for (t = *head; t != END_TSO_QUEUE; prev = t, t = t->_link) {
203 if (t == tso) {
204 if (prev) {
205 setTSOLink(cap,prev,t->_link);
206 flag = rtsFalse;
207 } else {
208 *head = t->_link;
209 flag = rtsTrue;
210 }
211 t->_link = END_TSO_QUEUE;
212 if (*tail == tso) {
213 if (prev) {
214 *tail = prev;
215 } else {
216 *tail = END_TSO_QUEUE;
217 }
218 return rtsTrue;
219 } else {
220 return flag;
221 }
222 }
223 }
224 barf("removeThreadFromMVarQueue: not found");
225 }
226
227 /* ----------------------------------------------------------------------------
228 tryWakeupThread()
229
230 Attempt to wake up a thread. tryWakeupThread is idempotent: it is
231 always safe to call it too many times, but it is not safe in
232 general to omit a call.
233
234 ------------------------------------------------------------------------- */
235
236 void
237 tryWakeupThread (Capability *cap, StgTSO *tso)
238 {
239 traceEventThreadWakeup (cap, tso, tso->cap->no);
240
241 #ifdef THREADED_RTS
242 if (tso->cap != cap)
243 {
244 MessageWakeup *msg;
245 msg = (MessageWakeup *)allocate(cap,sizeofW(MessageWakeup));
246 SET_HDR(msg, &stg_MSG_TRY_WAKEUP_info, CCS_SYSTEM);
247 msg->tso = tso;
248 sendMessage(cap, tso->cap, (Message*)msg);
249 debugTraceCap(DEBUG_sched, cap, "message: try wakeup thread %ld on cap %d",
250 (W_)tso->id, tso->cap->no);
251 return;
252 }
253 #endif
254
255 switch (tso->why_blocked)
256 {
257 case BlockedOnMVar:
258 case BlockedOnMVarRead:
259 {
260 if (tso->_link == END_TSO_QUEUE) {
261 tso->block_info.closure = (StgClosure*)END_TSO_QUEUE;
262 goto unblock;
263 } else {
264 return;
265 }
266 }
267
268 case BlockedOnMsgThrowTo:
269 {
270 const StgInfoTable *i;
271
272 i = lockClosure(tso->block_info.closure);
273 unlockClosure(tso->block_info.closure, i);
274 if (i != &stg_MSG_NULL_info) {
275 debugTraceCap(DEBUG_sched, cap, "thread %ld still blocked on throwto (%p)",
276 (W_)tso->id, tso->block_info.throwto->header.info);
277 return;
278 }
279
280 // remove the block frame from the stack
281 ASSERT(tso->stackobj->sp[0] == (StgWord)&stg_block_throwto_info);
282 tso->stackobj->sp += 3;
283 goto unblock;
284 }
285
286 case BlockedOnBlackHole:
287 case BlockedOnSTM:
288 case ThreadMigrating:
289 goto unblock;
290
291 default:
292 // otherwise, do nothing
293 return;
294 }
295
296 unblock:
297 // just run the thread now, if the BH is not really available,
298 // we'll block again.
299 tso->why_blocked = NotBlocked;
300 appendToRunQueue(cap,tso);
301
302 // We used to set the context switch flag here, which would
303 // trigger a context switch a short time in the future (at the end
304 // of the current nursery block). The idea is that we have just
305 // woken up a thread, so we may need to load-balance and migrate
306 // threads to other CPUs. On the other hand, setting the context
307 // switch flag here unfairly penalises the current thread by
308 // yielding its time slice too early.
309 //
310 // The synthetic benchmark nofib/smp/chan can be used to show the
311 // difference quite clearly.
312
313 // cap->context_switch = 1;
314 }
315
316 /* ----------------------------------------------------------------------------
317 migrateThread
318 ------------------------------------------------------------------------- */
319
320 void
321 migrateThread (Capability *from, StgTSO *tso, Capability *to)
322 {
323 traceEventMigrateThread (from, tso, to->no);
324 // ThreadMigrating tells the target cap that it needs to be added to
325 // the run queue when it receives the MSG_TRY_WAKEUP.
326 tso->why_blocked = ThreadMigrating;
327 tso->cap = to;
328 tryWakeupThread(from, tso);
329 }
330
331 /* ----------------------------------------------------------------------------
332 awakenBlockedQueue
333
334 wakes up all the threads on the specified queue.
335 ------------------------------------------------------------------------- */
336
337 void
338 wakeBlockingQueue(Capability *cap, StgBlockingQueue *bq)
339 {
340 MessageBlackHole *msg;
341 const StgInfoTable *i;
342
343 ASSERT(bq->header.info == &stg_BLOCKING_QUEUE_DIRTY_info ||
344 bq->header.info == &stg_BLOCKING_QUEUE_CLEAN_info );
345
346 for (msg = bq->queue; msg != (MessageBlackHole*)END_TSO_QUEUE;
347 msg = msg->link) {
348 i = msg->header.info;
349 if (i != &stg_IND_info) {
350 ASSERT(i == &stg_MSG_BLACKHOLE_info);
351 tryWakeupThread(cap,msg->tso);
352 }
353 }
354
355 // overwrite the BQ with an indirection so it will be
356 // collected at the next GC.
357 #if defined(DEBUG) && !defined(THREADED_RTS)
358 // XXX FILL_SLOP, but not if THREADED_RTS because in that case
359 // another thread might be looking at this BLOCKING_QUEUE and
360 // checking the owner field at the same time.
361 bq->bh = 0; bq->queue = 0; bq->owner = 0;
362 #endif
363 OVERWRITE_INFO(bq, &stg_IND_info);
364 }
365
366 // If we update a closure that we know we BLACKHOLE'd, and the closure
367 // no longer points to the current TSO as its owner, then there may be
368 // an orphaned BLOCKING_QUEUE closure with blocked threads attached to
369 // it. We therefore traverse the BLOCKING_QUEUEs attached to the
370 // current TSO to see if any can now be woken up.
371 void
372 checkBlockingQueues (Capability *cap, StgTSO *tso)
373 {
374 StgBlockingQueue *bq, *next;
375 StgClosure *p;
376
377 debugTraceCap(DEBUG_sched, cap,
378 "collision occurred; checking blocking queues for thread %ld",
379 (W_)tso->id);
380
381 for (bq = tso->bq; bq != (StgBlockingQueue*)END_TSO_QUEUE; bq = next) {
382 next = bq->link;
383
384 if (bq->header.info == &stg_IND_info) {
385 // ToDo: could short it out right here, to avoid
386 // traversing this IND multiple times.
387 continue;
388 }
389
390 p = bq->bh;
391
392 if (p->header.info != &stg_BLACKHOLE_info ||
393 ((StgInd *)p)->indirectee != (StgClosure*)bq)
394 {
395 wakeBlockingQueue(cap,bq);
396 }
397 }
398 }
399
400 /* ----------------------------------------------------------------------------
401 updateThunk
402
403 Update a thunk with a value. In order to do this, we need to know
404 which TSO owns (or is evaluating) the thunk, in case we need to
405 awaken any threads that are blocked on it.
406 ------------------------------------------------------------------------- */
407
408 void
409 updateThunk (Capability *cap, StgTSO *tso, StgClosure *thunk, StgClosure *val)
410 {
411 StgClosure *v;
412 StgTSO *owner;
413 const StgInfoTable *i;
414
415 i = thunk->header.info;
416 if (i != &stg_BLACKHOLE_info &&
417 i != &stg_CAF_BLACKHOLE_info &&
418 i != &__stg_EAGER_BLACKHOLE_info &&
419 i != &stg_WHITEHOLE_info) {
420 updateWithIndirection(cap, thunk, val);
421 return;
422 }
423
424 v = ((StgInd*)thunk)->indirectee;
425
426 updateWithIndirection(cap, thunk, val);
427
428 // sometimes the TSO is locked when we reach here, so its header
429 // might be WHITEHOLE. Hence check for the correct owner using
430 // pointer equality first.
431 if ((StgTSO*)v == tso) {
432 return;
433 }
434
435 i = v->header.info;
436 if (i == &stg_TSO_info) {
437 checkBlockingQueues(cap, tso);
438 return;
439 }
440
441 if (i != &stg_BLOCKING_QUEUE_CLEAN_info &&
442 i != &stg_BLOCKING_QUEUE_DIRTY_info) {
443 checkBlockingQueues(cap, tso);
444 return;
445 }
446
447 owner = ((StgBlockingQueue*)v)->owner;
448
449 if (owner != tso) {
450 checkBlockingQueues(cap, tso);
451 } else {
452 wakeBlockingQueue(cap, (StgBlockingQueue*)v);
453 }
454 }
455
456 /* ---------------------------------------------------------------------------
457 * rtsSupportsBoundThreads(): is the RTS built to support bound threads?
458 * used by Control.Concurrent for error checking.
459 * ------------------------------------------------------------------------- */
460
461 HsBool
462 rtsSupportsBoundThreads(void)
463 {
464 #if defined(THREADED_RTS)
465 return HS_BOOL_TRUE;
466 #else
467 return HS_BOOL_FALSE;
468 #endif
469 }
470
471 /* ---------------------------------------------------------------------------
472 * isThreadBound(tso): check whether tso is bound to an OS thread.
473 * ------------------------------------------------------------------------- */
474
475 StgBool
476 isThreadBound(StgTSO* tso USED_IF_THREADS)
477 {
478 #if defined(THREADED_RTS)
479 return (tso->bound != NULL);
480 #endif
481 return rtsFalse;
482 }
483
484 /* -----------------------------------------------------------------------------
485 Stack overflow
486
487 If the thread has reached its maximum stack size, then raise the
488 StackOverflow exception in the offending thread. Otherwise
489 relocate the TSO into a larger chunk of memory and adjust its stack
490 size appropriately.
491 -------------------------------------------------------------------------- */
492
493 void
494 threadStackOverflow (Capability *cap, StgTSO *tso)
495 {
496 StgStack *new_stack, *old_stack;
497 StgUnderflowFrame *frame;
498 W_ chunk_size;
499
500 IF_DEBUG(sanity,checkTSO(tso));
501
502 if (tso->tot_stack_size >= RtsFlags.GcFlags.maxStkSize
503 && !(tso->flags & TSO_BLOCKEX)) {
504 // NB. never raise a StackOverflow exception if the thread is
505 // inside Control.Exceptino.block. It is impractical to protect
506 // against stack overflow exceptions, since virtually anything
507 // can raise one (even 'catch'), so this is the only sensible
508 // thing to do here. See bug #767.
509 //
510
511 if (tso->flags & TSO_SQUEEZED) {
512 return;
513 }
514 // #3677: In a stack overflow situation, stack squeezing may
515 // reduce the stack size, but we don't know whether it has been
516 // reduced enough for the stack check to succeed if we try
517 // again. Fortunately stack squeezing is idempotent, so all we
518 // need to do is record whether *any* squeezing happened. If we
519 // are at the stack's absolute -K limit, and stack squeezing
520 // happened, then we try running the thread again. The
521 // TSO_SQUEEZED flag is set by threadPaused() to tell us whether
522 // squeezing happened or not.
523
524 debugTrace(DEBUG_gc,
525 "threadStackOverflow of TSO %ld (%p): stack too large (now %ld; max is %ld)",
526 (long)tso->id, tso, (long)tso->stackobj->stack_size,
527 RtsFlags.GcFlags.maxStkSize);
528 IF_DEBUG(gc,
529 /* If we're debugging, just print out the top of the stack */
530 printStackChunk(tso->stackobj->sp,
531 stg_min(tso->stackobj->stack + tso->stackobj->stack_size,
532 tso->stackobj->sp+64)));
533
534 // Send this thread the StackOverflow exception
535 throwToSingleThreaded(cap, tso, (StgClosure *)stackOverflow_closure);
536 }
537
538
539 // We also want to avoid enlarging the stack if squeezing has
540 // already released some of it. However, we don't want to get into
541 // a pathological situation where a thread has a nearly full stack
542 // (near its current limit, but not near the absolute -K limit),
543 // keeps allocating a little bit, squeezing removes a little bit,
544 // and then it runs again. So to avoid this, if we squeezed *and*
545 // there is still less than BLOCK_SIZE_W words free, then we enlarge
546 // the stack anyway.
547 if ((tso->flags & TSO_SQUEEZED) &&
548 ((W_)(tso->stackobj->sp - tso->stackobj->stack) >= BLOCK_SIZE_W)) {
549 return;
550 }
551
552 old_stack = tso->stackobj;
553
554 // If we used less than half of the previous stack chunk, then we
555 // must have failed a stack check for a large amount of stack. In
556 // this case we allocate a double-sized chunk to try to
557 // accommodate the large stack request. If that also fails, the
558 // next chunk will be 4x normal size, and so on.
559 //
560 // It would be better to have the mutator tell us how much stack
561 // was needed, as we do with heap allocations, but this works for
562 // now.
563 //
564 if (old_stack->sp > old_stack->stack + old_stack->stack_size / 2)
565 {
566 chunk_size = stg_max(2 * (old_stack->stack_size + sizeofW(StgStack)),
567 RtsFlags.GcFlags.stkChunkSize);
568 }
569 else
570 {
571 chunk_size = RtsFlags.GcFlags.stkChunkSize;
572 }
573
574 debugTraceCap(DEBUG_sched, cap,
575 "allocating new stack chunk of size %d bytes",
576 chunk_size * sizeof(W_));
577
578 new_stack = (StgStack*) allocate(cap, chunk_size);
579 SET_HDR(new_stack, &stg_STACK_info, old_stack->header.prof.ccs);
580 TICK_ALLOC_STACK(chunk_size);
581
582 new_stack->dirty = 0; // begin clean, we'll mark it dirty below
583 new_stack->stack_size = chunk_size - sizeofW(StgStack);
584 new_stack->sp = new_stack->stack + new_stack->stack_size;
585
586 tso->tot_stack_size += new_stack->stack_size;
587
588 {
589 StgWord *sp;
590 W_ chunk_words, size;
591
592 // find the boundary of the chunk of old stack we're going to
593 // copy to the new stack. We skip over stack frames until we
594 // reach the smaller of
595 //
596 // * the chunk buffer size (+RTS -kb)
597 // * the end of the old stack
598 //
599 for (sp = old_stack->sp;
600 sp < stg_min(old_stack->sp + RtsFlags.GcFlags.stkChunkBufferSize,
601 old_stack->stack + old_stack->stack_size); )
602 {
603 size = stack_frame_sizeW((StgClosure*)sp);
604
605 // if including this frame would exceed the size of the
606 // new stack (taking into account the underflow frame),
607 // then stop at the previous frame.
608 if (sp + size > old_stack->stack + (new_stack->stack_size -
609 sizeofW(StgUnderflowFrame))) {
610 break;
611 }
612 sp += size;
613 }
614
615 if (sp == old_stack->stack + old_stack->stack_size) {
616 //
617 // the old stack chunk is now empty, so we do *not* insert
618 // an underflow frame pointing back to it. There are two
619 // cases: either the old stack chunk was the last one, in
620 // which case it ends with a STOP_FRAME, or it is not the
621 // last one, and it already ends with an UNDERFLOW_FRAME
622 // pointing to the previous chunk. In the latter case, we
623 // will copy the UNDERFLOW_FRAME into the new stack chunk.
624 // In both cases, the old chunk will be subsequently GC'd.
625 //
626 // With the default settings, -ki1k -kb1k, this means the
627 // first stack chunk will be discarded after the first
628 // overflow, being replaced by a non-moving 32k chunk.
629 //
630 } else {
631 new_stack->sp -= sizeofW(StgUnderflowFrame);
632 frame = (StgUnderflowFrame*)new_stack->sp;
633 frame->info = &stg_stack_underflow_frame_info;
634 frame->next_chunk = old_stack;
635 }
636
637 // copy the stack chunk between tso->sp and sp to
638 // new_tso->sp + (tso->sp - sp)
639 chunk_words = sp - old_stack->sp;
640
641 memcpy(/* dest */ new_stack->sp - chunk_words,
642 /* source */ old_stack->sp,
643 /* size */ chunk_words * sizeof(W_));
644
645 old_stack->sp += chunk_words;
646 new_stack->sp -= chunk_words;
647 }
648
649 tso->stackobj = new_stack;
650
651 // we're about to run it, better mark it dirty
652 dirty_STACK(cap, new_stack);
653
654 IF_DEBUG(sanity,checkTSO(tso));
655 // IF_DEBUG(scheduler,printTSO(new_tso));
656 }
657
658
659 /* ---------------------------------------------------------------------------
660 Stack underflow - called from the stg_stack_underflow_info frame
661 ------------------------------------------------------------------------ */
662
663 W_ // returns offset to the return address
664 threadStackUnderflow (Capability *cap, StgTSO *tso)
665 {
666 StgStack *new_stack, *old_stack;
667 StgUnderflowFrame *frame;
668 nat retvals;
669
670 debugTraceCap(DEBUG_sched, cap, "stack underflow");
671
672 old_stack = tso->stackobj;
673
674 frame = (StgUnderflowFrame*)(old_stack->stack + old_stack->stack_size
675 - sizeofW(StgUnderflowFrame));
676 ASSERT(frame->info == &stg_stack_underflow_frame_info);
677
678 new_stack = (StgStack*)frame->next_chunk;
679 tso->stackobj = new_stack;
680
681 retvals = (P_)frame - old_stack->sp;
682 if (retvals != 0)
683 {
684 // we have some return values to copy to the old stack
685 if ((W_)(new_stack->sp - new_stack->stack) < retvals)
686 {
687 barf("threadStackUnderflow: not enough space for return values");
688 }
689
690 new_stack->sp -= retvals;
691
692 memcpy(/* dest */ new_stack->sp,
693 /* src */ old_stack->sp,
694 /* size */ retvals * sizeof(W_));
695 }
696
697 // empty the old stack. The GC may still visit this object
698 // because it is on the mutable list.
699 old_stack->sp = old_stack->stack + old_stack->stack_size;
700
701 // restore the stack parameters, and update tot_stack_size
702 tso->tot_stack_size -= old_stack->stack_size;
703
704 // we're about to run it, better mark it dirty
705 dirty_STACK(cap, new_stack);
706
707 return retvals;
708 }
709
710 /* ----------------------------------------------------------------------------
711 * Debugging: why is a thread blocked
712 * ------------------------------------------------------------------------- */
713
714 #if DEBUG
715 void
716 printThreadBlockage(StgTSO *tso)
717 {
718 switch (tso->why_blocked) {
719 #if defined(mingw32_HOST_OS)
720 case BlockedOnDoProc:
721 debugBelch("is blocked on proc (request: %u)", tso->block_info.async_result->reqID);
722 break;
723 #endif
724 #if !defined(THREADED_RTS)
725 case BlockedOnRead:
726 debugBelch("is blocked on read from fd %d", (int)(tso->block_info.fd));
727 break;
728 case BlockedOnWrite:
729 debugBelch("is blocked on write to fd %d", (int)(tso->block_info.fd));
730 break;
731 case BlockedOnDelay:
732 debugBelch("is blocked until %ld", (long)(tso->block_info.target));
733 break;
734 #endif
735 case BlockedOnMVar:
736 debugBelch("is blocked on an MVar @ %p", tso->block_info.closure);
737 break;
738 case BlockedOnMVarRead:
739 debugBelch("is blocked on atomic MVar read @ %p", tso->block_info.closure);
740 break;
741 case BlockedOnBlackHole:
742 debugBelch("is blocked on a black hole %p",
743 ((StgBlockingQueue*)tso->block_info.bh->bh));
744 break;
745 case BlockedOnMsgThrowTo:
746 debugBelch("is blocked on a throwto message");
747 break;
748 case NotBlocked:
749 debugBelch("is not blocked");
750 break;
751 case ThreadMigrating:
752 debugBelch("is runnable, but not on the run queue");
753 break;
754 case BlockedOnCCall:
755 debugBelch("is blocked on an external call");
756 break;
757 case BlockedOnCCall_Interruptible:
758 debugBelch("is blocked on an external call (but may be interrupted)");
759 break;
760 case BlockedOnSTM:
761 debugBelch("is blocked on an STM operation");
762 break;
763 default:
764 barf("printThreadBlockage: strange tso->why_blocked: %d for TSO %d (%d)",
765 tso->why_blocked, tso->id, tso);
766 }
767 }
768
769
770 void
771 printThreadStatus(StgTSO *t)
772 {
773 debugBelch("\tthread %4lu @ %p ", (unsigned long)t->id, (void *)t);
774 {
775 void *label = lookupThreadLabel(t->id);
776 if (label) debugBelch("[\"%s\"] ",(char *)label);
777 }
778 switch (t->what_next) {
779 case ThreadKilled:
780 debugBelch("has been killed");
781 break;
782 case ThreadComplete:
783 debugBelch("has completed");
784 break;
785 default:
786 printThreadBlockage(t);
787 }
788 if (t->dirty) {
789 debugBelch(" (TSO_DIRTY)");
790 }
791 debugBelch("\n");
792 }
793
794 void
795 printAllThreads(void)
796 {
797 StgTSO *t, *next;
798 nat i, g;
799 Capability *cap;
800
801 debugBelch("all threads:\n");
802
803 for (i = 0; i < n_capabilities; i++) {
804 cap = capabilities[i];
805 debugBelch("threads on capability %d:\n", cap->no);
806 for (t = cap->run_queue_hd; t != END_TSO_QUEUE; t = t->_link) {
807 printThreadStatus(t);
808 }
809 }
810
811 debugBelch("other threads:\n");
812 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
813 for (t = generations[g].threads; t != END_TSO_QUEUE; t = next) {
814 if (t->why_blocked != NotBlocked) {
815 printThreadStatus(t);
816 }
817 next = t->global_link;
818 }
819 }
820 }
821
822 // useful from gdb
823 void
824 printThreadQueue(StgTSO *t)
825 {
826 nat i = 0;
827 for (; t != END_TSO_QUEUE; t = t->_link) {
828 printThreadStatus(t);
829 i++;
830 }
831 debugBelch("%d threads on queue\n", i);
832 }
833
834 #endif /* DEBUG */