Typofix.
[ghc.git] / rts / Threads.c
1 /* ---------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 2006
4 *
5 * Thread-related functionality
6 *
7 * --------------------------------------------------------------------------*/
8
9 #include "PosixSource.h"
10 #include "Rts.h"
11
12 #include "Capability.h"
13 #include "Updates.h"
14 #include "Threads.h"
15 #include "STM.h"
16 #include "Schedule.h"
17 #include "Trace.h"
18 #include "ThreadLabels.h"
19 #include "Updates.h"
20 #include "Messages.h"
21 #include "RaiseAsync.h"
22 #include "Prelude.h"
23 #include "Printer.h"
24 #include "sm/Sanity.h"
25 #include "sm/Storage.h"
26
27 #include <string.h>
28
29 /* Next thread ID to allocate.
30 * LOCK: sched_mutex
31 */
32 static StgThreadID next_thread_id = 1;
33
34 /* The smallest stack size that makes any sense is:
35 * RESERVED_STACK_WORDS (so we can get back from the stack overflow)
36 * + sizeofW(StgStopFrame) (the stg_stop_thread_info frame)
37 * + 1 (the closure to enter)
38 * + 1 (stg_ap_v_ret)
39 * + 1 (spare slot req'd by stg_ap_v_ret)
40 *
41 * A thread with this stack will bomb immediately with a stack
42 * overflow, which will increase its stack size.
43 */
44 #define MIN_STACK_WORDS (RESERVED_STACK_WORDS + sizeofW(StgStopFrame) + 3)
45
46 /* ---------------------------------------------------------------------------
47 Create a new thread.
48
49 The new thread starts with the given stack size. Before the
50 scheduler can run, however, this thread needs to have a closure
51 (and possibly some arguments) pushed on its stack. See
52 pushClosure() in Schedule.h.
53
54 createGenThread() and createIOThread() (in SchedAPI.h) are
55 convenient packaged versions of this function.
56
57 currently pri (priority) is only used in a GRAN setup -- HWL
58 ------------------------------------------------------------------------ */
59 StgTSO *
60 createThread(Capability *cap, W_ size)
61 {
62 StgTSO *tso;
63 StgStack *stack;
64 nat stack_size;
65
66 /* sched_mutex is *not* required */
67
68 /* catch ridiculously small stack sizes */
69 if (size < MIN_STACK_WORDS + sizeofW(StgStack) + sizeofW(StgTSO)) {
70 size = MIN_STACK_WORDS + sizeofW(StgStack) + sizeofW(StgTSO);
71 }
72
73 /* The size argument we are given includes all the per-thread
74 * overheads:
75 *
76 * - The TSO structure
77 * - The STACK header
78 *
79 * This is so that we can use a nice round power of 2 for the
80 * default stack size (e.g. 1k), and if we're allocating lots of
81 * threads back-to-back they'll fit nicely in a block. It's a bit
82 * of a benchmark hack, but it doesn't do any harm.
83 */
84 stack_size = round_to_mblocks(size - sizeofW(StgTSO));
85 stack = (StgStack *)allocate(cap, stack_size);
86 TICK_ALLOC_STACK(stack_size);
87 SET_HDR(stack, &stg_STACK_info, cap->r.rCCCS);
88 stack->stack_size = stack_size - sizeofW(StgStack);
89 stack->sp = stack->stack + stack->stack_size;
90 stack->dirty = 1;
91
92 tso = (StgTSO *)allocate(cap, sizeofW(StgTSO));
93 TICK_ALLOC_TSO();
94 SET_HDR(tso, &stg_TSO_info, CCS_SYSTEM);
95
96 // Always start with the compiled code evaluator
97 tso->what_next = ThreadRunGHC;
98 tso->why_blocked = NotBlocked;
99 tso->block_info.closure = (StgClosure *)END_TSO_QUEUE;
100 tso->blocked_exceptions = END_BLOCKED_EXCEPTIONS_QUEUE;
101 tso->bq = (StgBlockingQueue *)END_TSO_QUEUE;
102 tso->flags = 0;
103 tso->dirty = 1;
104 tso->_link = END_TSO_QUEUE;
105
106 tso->saved_errno = 0;
107 tso->bound = NULL;
108 tso->cap = cap;
109
110 tso->stackobj = stack;
111 tso->tot_stack_size = stack->stack_size;
112
113 tso->trec = NO_TREC;
114
115 #ifdef PROFILING
116 tso->prof.cccs = CCS_MAIN;
117 #endif
118
119 // put a stop frame on the stack
120 stack->sp -= sizeofW(StgStopFrame);
121 SET_HDR((StgClosure*)stack->sp,
122 (StgInfoTable *)&stg_stop_thread_info,CCS_SYSTEM);
123
124 /* Link the new thread on the global thread list.
125 */
126 ACQUIRE_LOCK(&sched_mutex);
127 tso->id = next_thread_id++; // while we have the mutex
128 tso->global_link = g0->threads;
129 g0->threads = tso;
130 RELEASE_LOCK(&sched_mutex);
131
132 // ToDo: report the stack size in the event?
133 traceEventCreateThread(cap, tso);
134
135 return tso;
136 }
137
138 /* ---------------------------------------------------------------------------
139 * Comparing Thread ids.
140 *
141 * This is used from STG land in the implementation of the
142 * instances of Eq/Ord for ThreadIds.
143 * ------------------------------------------------------------------------ */
144
145 int
146 cmp_thread(StgPtr tso1, StgPtr tso2)
147 {
148 StgThreadID id1 = ((StgTSO *)tso1)->id;
149 StgThreadID id2 = ((StgTSO *)tso2)->id;
150
151 if (id1 < id2) return (-1);
152 if (id1 > id2) return 1;
153 return 0;
154 }
155
156 /* ---------------------------------------------------------------------------
157 * Fetching the ThreadID from an StgTSO.
158 *
159 * This is used in the implementation of Show for ThreadIds.
160 * ------------------------------------------------------------------------ */
161 int
162 rts_getThreadId(StgPtr tso)
163 {
164 return ((StgTSO *)tso)->id;
165 }
166
167 /* -----------------------------------------------------------------------------
168 Remove a thread from a queue.
169 Fails fatally if the TSO is not on the queue.
170 -------------------------------------------------------------------------- */
171
172 rtsBool // returns True if we modified queue
173 removeThreadFromQueue (Capability *cap, StgTSO **queue, StgTSO *tso)
174 {
175 StgTSO *t, *prev;
176
177 prev = NULL;
178 for (t = *queue; t != END_TSO_QUEUE; prev = t, t = t->_link) {
179 if (t == tso) {
180 if (prev) {
181 setTSOLink(cap,prev,t->_link);
182 t->_link = END_TSO_QUEUE;
183 return rtsFalse;
184 } else {
185 *queue = t->_link;
186 t->_link = END_TSO_QUEUE;
187 return rtsTrue;
188 }
189 }
190 }
191 barf("removeThreadFromQueue: not found");
192 }
193
194 rtsBool // returns True if we modified head or tail
195 removeThreadFromDeQueue (Capability *cap,
196 StgTSO **head, StgTSO **tail, StgTSO *tso)
197 {
198 StgTSO *t, *prev;
199 rtsBool flag = rtsFalse;
200
201 prev = NULL;
202 for (t = *head; t != END_TSO_QUEUE; prev = t, t = t->_link) {
203 if (t == tso) {
204 if (prev) {
205 setTSOLink(cap,prev,t->_link);
206 flag = rtsFalse;
207 } else {
208 *head = t->_link;
209 flag = rtsTrue;
210 }
211 t->_link = END_TSO_QUEUE;
212 if (*tail == tso) {
213 if (prev) {
214 *tail = prev;
215 } else {
216 *tail = END_TSO_QUEUE;
217 }
218 return rtsTrue;
219 } else {
220 return flag;
221 }
222 }
223 }
224 barf("removeThreadFromMVarQueue: not found");
225 }
226
227 /* ----------------------------------------------------------------------------
228 tryWakeupThread()
229
230 Attempt to wake up a thread. tryWakeupThread is idempotent: it is
231 always safe to call it too many times, but it is not safe in
232 general to omit a call.
233
234 ------------------------------------------------------------------------- */
235
236 void
237 tryWakeupThread (Capability *cap, StgTSO *tso)
238 {
239 traceEventThreadWakeup (cap, tso, tso->cap->no);
240
241 #ifdef THREADED_RTS
242 if (tso->cap != cap)
243 {
244 MessageWakeup *msg;
245 msg = (MessageWakeup *)allocate(cap,sizeofW(MessageWakeup));
246 SET_HDR(msg, &stg_MSG_TRY_WAKEUP_info, CCS_SYSTEM);
247 msg->tso = tso;
248 sendMessage(cap, tso->cap, (Message*)msg);
249 debugTraceCap(DEBUG_sched, cap, "message: try wakeup thread %ld on cap %d",
250 (W_)tso->id, tso->cap->no);
251 return;
252 }
253 #endif
254
255 switch (tso->why_blocked)
256 {
257 case BlockedOnMVar:
258 case BlockedOnMVarRead:
259 {
260 if (tso->_link == END_TSO_QUEUE) {
261 tso->block_info.closure = (StgClosure*)END_TSO_QUEUE;
262 goto unblock;
263 } else {
264 return;
265 }
266 }
267
268 case BlockedOnMsgThrowTo:
269 {
270 const StgInfoTable *i;
271
272 i = lockClosure(tso->block_info.closure);
273 unlockClosure(tso->block_info.closure, i);
274 if (i != &stg_MSG_NULL_info) {
275 debugTraceCap(DEBUG_sched, cap, "thread %ld still blocked on throwto (%p)",
276 (W_)tso->id, tso->block_info.throwto->header.info);
277 return;
278 }
279
280 // remove the block frame from the stack
281 ASSERT(tso->stackobj->sp[0] == (StgWord)&stg_block_throwto_info);
282 tso->stackobj->sp += 3;
283 goto unblock;
284 }
285
286 case BlockedOnBlackHole:
287 case BlockedOnSTM:
288 case ThreadMigrating:
289 goto unblock;
290
291 default:
292 // otherwise, do nothing
293 return;
294 }
295
296 unblock:
297 // just run the thread now, if the BH is not really available,
298 // we'll block again.
299 tso->why_blocked = NotBlocked;
300 appendToRunQueue(cap,tso);
301
302 // We used to set the context switch flag here, which would
303 // trigger a context switch a short time in the future (at the end
304 // of the current nursery block). The idea is that we have just
305 // woken up a thread, so we may need to load-balance and migrate
306 // threads to other CPUs. On the other hand, setting the context
307 // switch flag here unfairly penalises the current thread by
308 // yielding its time slice too early.
309 //
310 // The synthetic benchmark nofib/smp/chan can be used to show the
311 // difference quite clearly.
312
313 // cap->context_switch = 1;
314 }
315
316 /* ----------------------------------------------------------------------------
317 migrateThread
318 ------------------------------------------------------------------------- */
319
320 void
321 migrateThread (Capability *from, StgTSO *tso, Capability *to)
322 {
323 traceEventMigrateThread (from, tso, to->no);
324 // ThreadMigrating tells the target cap that it needs to be added to
325 // the run queue when it receives the MSG_TRY_WAKEUP.
326 tso->why_blocked = ThreadMigrating;
327 tso->cap = to;
328 tryWakeupThread(from, tso);
329 }
330
331 /* ----------------------------------------------------------------------------
332 awakenBlockedQueue
333
334 wakes up all the threads on the specified queue.
335 ------------------------------------------------------------------------- */
336
337 void
338 wakeBlockingQueue(Capability *cap, StgBlockingQueue *bq)
339 {
340 MessageBlackHole *msg;
341 const StgInfoTable *i;
342
343 ASSERT(bq->header.info == &stg_BLOCKING_QUEUE_DIRTY_info ||
344 bq->header.info == &stg_BLOCKING_QUEUE_CLEAN_info );
345
346 for (msg = bq->queue; msg != (MessageBlackHole*)END_TSO_QUEUE;
347 msg = msg->link) {
348 i = msg->header.info;
349 if (i != &stg_IND_info) {
350 ASSERT(i == &stg_MSG_BLACKHOLE_info);
351 tryWakeupThread(cap,msg->tso);
352 }
353 }
354
355 // overwrite the BQ with an indirection so it will be
356 // collected at the next GC.
357 #if defined(DEBUG) && !defined(THREADED_RTS)
358 // XXX FILL_SLOP, but not if THREADED_RTS because in that case
359 // another thread might be looking at this BLOCKING_QUEUE and
360 // checking the owner field at the same time.
361 bq->bh = 0; bq->queue = 0; bq->owner = 0;
362 #endif
363 OVERWRITE_INFO(bq, &stg_IND_info);
364 }
365
366 // If we update a closure that we know we BLACKHOLE'd, and the closure
367 // no longer points to the current TSO as its owner, then there may be
368 // an orphaned BLOCKING_QUEUE closure with blocked threads attached to
369 // it. We therefore traverse the BLOCKING_QUEUEs attached to the
370 // current TSO to see if any can now be woken up.
371 void
372 checkBlockingQueues (Capability *cap, StgTSO *tso)
373 {
374 StgBlockingQueue *bq, *next;
375 StgClosure *p;
376
377 debugTraceCap(DEBUG_sched, cap,
378 "collision occurred; checking blocking queues for thread %ld",
379 (W_)tso->id);
380
381 for (bq = tso->bq; bq != (StgBlockingQueue*)END_TSO_QUEUE; bq = next) {
382 next = bq->link;
383
384 if (bq->header.info == &stg_IND_info) {
385 // ToDo: could short it out right here, to avoid
386 // traversing this IND multiple times.
387 continue;
388 }
389
390 p = bq->bh;
391
392 if (p->header.info != &stg_BLACKHOLE_info ||
393 ((StgInd *)p)->indirectee != (StgClosure*)bq)
394 {
395 wakeBlockingQueue(cap,bq);
396 }
397 }
398 }
399
400 /* ----------------------------------------------------------------------------
401 updateThunk
402
403 Update a thunk with a value. In order to do this, we need to know
404 which TSO owns (or is evaluating) the thunk, in case we need to
405 awaken any threads that are blocked on it.
406 ------------------------------------------------------------------------- */
407
408 void
409 updateThunk (Capability *cap, StgTSO *tso, StgClosure *thunk, StgClosure *val)
410 {
411 StgClosure *v;
412 StgTSO *owner;
413 const StgInfoTable *i;
414
415 i = thunk->header.info;
416 if (i != &stg_BLACKHOLE_info &&
417 i != &stg_CAF_BLACKHOLE_info &&
418 i != &__stg_EAGER_BLACKHOLE_info &&
419 i != &stg_WHITEHOLE_info) {
420 updateWithIndirection(cap, thunk, val);
421 return;
422 }
423
424 v = ((StgInd*)thunk)->indirectee;
425
426 updateWithIndirection(cap, thunk, val);
427
428 // sometimes the TSO is locked when we reach here, so its header
429 // might be WHITEHOLE. Hence check for the correct owner using
430 // pointer equality first.
431 if ((StgTSO*)v == tso) {
432 return;
433 }
434
435 i = v->header.info;
436 if (i == &stg_TSO_info) {
437 checkBlockingQueues(cap, tso);
438 return;
439 }
440
441 if (i != &stg_BLOCKING_QUEUE_CLEAN_info &&
442 i != &stg_BLOCKING_QUEUE_DIRTY_info) {
443 checkBlockingQueues(cap, tso);
444 return;
445 }
446
447 owner = ((StgBlockingQueue*)v)->owner;
448
449 if (owner != tso) {
450 checkBlockingQueues(cap, tso);
451 } else {
452 wakeBlockingQueue(cap, (StgBlockingQueue*)v);
453 }
454 }
455
456 /* ---------------------------------------------------------------------------
457 * rtsSupportsBoundThreads(): is the RTS built to support bound threads?
458 * used by Control.Concurrent for error checking.
459 * ------------------------------------------------------------------------- */
460
461 HsBool
462 rtsSupportsBoundThreads(void)
463 {
464 #if defined(THREADED_RTS)
465 return HS_BOOL_TRUE;
466 #else
467 return HS_BOOL_FALSE;
468 #endif
469 }
470
471 /* ---------------------------------------------------------------------------
472 * isThreadBound(tso): check whether tso is bound to an OS thread.
473 * ------------------------------------------------------------------------- */
474
475 StgBool
476 isThreadBound(StgTSO* tso USED_IF_THREADS)
477 {
478 #if defined(THREADED_RTS)
479 return (tso->bound != NULL);
480 #endif
481 return rtsFalse;
482 }
483
484 /* -----------------------------------------------------------------------------
485 Stack overflow
486
487 If the thread has reached its maximum stack size, then raise the
488 StackOverflow exception in the offending thread. Otherwise
489 relocate the TSO into a larger chunk of memory and adjust its stack
490 size appropriately.
491 -------------------------------------------------------------------------- */
492
493 void
494 threadStackOverflow (Capability *cap, StgTSO *tso)
495 {
496 StgStack *new_stack, *old_stack;
497 StgUnderflowFrame *frame;
498 W_ chunk_size;
499
500 IF_DEBUG(sanity,checkTSO(tso));
501
502 if (tso->tot_stack_size >= RtsFlags.GcFlags.maxStkSize
503 && !(tso->flags & TSO_BLOCKEX)) {
504 // NB. never raise a StackOverflow exception if the thread is
505 // inside Control.Exception.mask. It is impractical to protect
506 // against stack overflow exceptions, since virtually anything
507 // can raise one (even 'catch'), so this is the only sensible
508 // thing to do here. See bug #767.
509 //
510
511 if (tso->flags & TSO_SQUEEZED) {
512 return;
513 }
514 // #3677: In a stack overflow situation, stack squeezing may
515 // reduce the stack size, but we don't know whether it has been
516 // reduced enough for the stack check to succeed if we try
517 // again. Fortunately stack squeezing is idempotent, so all we
518 // need to do is record whether *any* squeezing happened. If we
519 // are at the stack's absolute -K limit, and stack squeezing
520 // happened, then we try running the thread again. The
521 // TSO_SQUEEZED flag is set by threadPaused() to tell us whether
522 // squeezing happened or not.
523
524 debugTrace(DEBUG_gc,
525 "threadStackOverflow of TSO %ld (%p): stack too large (now %ld; max is %ld)",
526 (long)tso->id, tso, (long)tso->stackobj->stack_size,
527 RtsFlags.GcFlags.maxStkSize);
528 IF_DEBUG(gc,
529 /* If we're debugging, just print out the top of the stack */
530 printStackChunk(tso->stackobj->sp,
531 stg_min(tso->stackobj->stack + tso->stackobj->stack_size,
532 tso->stackobj->sp+64)));
533
534 // Send this thread the StackOverflow exception
535 throwToSingleThreaded(cap, tso, (StgClosure *)stackOverflow_closure);
536 }
537
538
539 // We also want to avoid enlarging the stack if squeezing has
540 // already released some of it. However, we don't want to get into
541 // a pathological situation where a thread has a nearly full stack
542 // (near its current limit, but not near the absolute -K limit),
543 // keeps allocating a little bit, squeezing removes a little bit,
544 // and then it runs again. So to avoid this, if we squeezed *and*
545 // there is still less than BLOCK_SIZE_W words free, then we enlarge
546 // the stack anyway.
547 //
548 // NB: This reasoning only applies if the stack has been squeezed;
549 // if no squeezing has occurred, then BLOCK_SIZE_W free space does
550 // not mean there is enough stack to run; the thread may have
551 // requested a large amount of stack (see below). If the amount
552 // we squeezed is not enough to run the thread, we'll come back
553 // here (no squeezing will have occurred and thus we'll enlarge the
554 // stack.)
555 if ((tso->flags & TSO_SQUEEZED) &&
556 ((W_)(tso->stackobj->sp - tso->stackobj->stack) >= BLOCK_SIZE_W)) {
557 return;
558 }
559
560 old_stack = tso->stackobj;
561
562 // If we used less than half of the previous stack chunk, then we
563 // must have failed a stack check for a large amount of stack. In
564 // this case we allocate a double-sized chunk to try to
565 // accommodate the large stack request. If that also fails, the
566 // next chunk will be 4x normal size, and so on.
567 //
568 // It would be better to have the mutator tell us how much stack
569 // was needed, as we do with heap allocations, but this works for
570 // now.
571 //
572 if (old_stack->sp > old_stack->stack + old_stack->stack_size / 2)
573 {
574 chunk_size = stg_max(2 * (old_stack->stack_size + sizeofW(StgStack)),
575 RtsFlags.GcFlags.stkChunkSize);
576 }
577 else
578 {
579 chunk_size = RtsFlags.GcFlags.stkChunkSize;
580 }
581
582 debugTraceCap(DEBUG_sched, cap,
583 "allocating new stack chunk of size %d bytes",
584 chunk_size * sizeof(W_));
585
586 new_stack = (StgStack*) allocate(cap, chunk_size);
587 SET_HDR(new_stack, &stg_STACK_info, old_stack->header.prof.ccs);
588 TICK_ALLOC_STACK(chunk_size);
589
590 new_stack->dirty = 0; // begin clean, we'll mark it dirty below
591 new_stack->stack_size = chunk_size - sizeofW(StgStack);
592 new_stack->sp = new_stack->stack + new_stack->stack_size;
593
594 tso->tot_stack_size += new_stack->stack_size;
595
596 {
597 StgWord *sp;
598 W_ chunk_words, size;
599
600 // find the boundary of the chunk of old stack we're going to
601 // copy to the new stack. We skip over stack frames until we
602 // reach the smaller of
603 //
604 // * the chunk buffer size (+RTS -kb)
605 // * the end of the old stack
606 //
607 for (sp = old_stack->sp;
608 sp < stg_min(old_stack->sp + RtsFlags.GcFlags.stkChunkBufferSize,
609 old_stack->stack + old_stack->stack_size); )
610 {
611 size = stack_frame_sizeW((StgClosure*)sp);
612
613 // if including this frame would exceed the size of the
614 // new stack (taking into account the underflow frame),
615 // then stop at the previous frame.
616 if (sp + size > old_stack->stack + (new_stack->stack_size -
617 sizeofW(StgUnderflowFrame))) {
618 break;
619 }
620 sp += size;
621 }
622
623 if (sp == old_stack->stack + old_stack->stack_size) {
624 //
625 // the old stack chunk is now empty, so we do *not* insert
626 // an underflow frame pointing back to it. There are two
627 // cases: either the old stack chunk was the last one, in
628 // which case it ends with a STOP_FRAME, or it is not the
629 // last one, and it already ends with an UNDERFLOW_FRAME
630 // pointing to the previous chunk. In the latter case, we
631 // will copy the UNDERFLOW_FRAME into the new stack chunk.
632 // In both cases, the old chunk will be subsequently GC'd.
633 //
634 // With the default settings, -ki1k -kb1k, this means the
635 // first stack chunk will be discarded after the first
636 // overflow, being replaced by a non-moving 32k chunk.
637 //
638 } else {
639 new_stack->sp -= sizeofW(StgUnderflowFrame);
640 frame = (StgUnderflowFrame*)new_stack->sp;
641 frame->info = &stg_stack_underflow_frame_info;
642 frame->next_chunk = old_stack;
643 }
644
645 // copy the stack chunk between tso->sp and sp to
646 // new_tso->sp + (tso->sp - sp)
647 chunk_words = sp - old_stack->sp;
648
649 memcpy(/* dest */ new_stack->sp - chunk_words,
650 /* source */ old_stack->sp,
651 /* size */ chunk_words * sizeof(W_));
652
653 old_stack->sp += chunk_words;
654 new_stack->sp -= chunk_words;
655 }
656
657 tso->stackobj = new_stack;
658
659 // we're about to run it, better mark it dirty
660 dirty_STACK(cap, new_stack);
661
662 IF_DEBUG(sanity,checkTSO(tso));
663 // IF_DEBUG(scheduler,printTSO(new_tso));
664 }
665
666
667 /* ---------------------------------------------------------------------------
668 Stack underflow - called from the stg_stack_underflow_info frame
669 ------------------------------------------------------------------------ */
670
671 W_ // returns offset to the return address
672 threadStackUnderflow (Capability *cap, StgTSO *tso)
673 {
674 StgStack *new_stack, *old_stack;
675 StgUnderflowFrame *frame;
676 nat retvals;
677
678 debugTraceCap(DEBUG_sched, cap, "stack underflow");
679
680 old_stack = tso->stackobj;
681
682 frame = (StgUnderflowFrame*)(old_stack->stack + old_stack->stack_size
683 - sizeofW(StgUnderflowFrame));
684 ASSERT(frame->info == &stg_stack_underflow_frame_info);
685
686 new_stack = (StgStack*)frame->next_chunk;
687 tso->stackobj = new_stack;
688
689 retvals = (P_)frame - old_stack->sp;
690 if (retvals != 0)
691 {
692 // we have some return values to copy to the old stack
693 if ((W_)(new_stack->sp - new_stack->stack) < retvals)
694 {
695 barf("threadStackUnderflow: not enough space for return values");
696 }
697
698 new_stack->sp -= retvals;
699
700 memcpy(/* dest */ new_stack->sp,
701 /* src */ old_stack->sp,
702 /* size */ retvals * sizeof(W_));
703 }
704
705 // empty the old stack. The GC may still visit this object
706 // because it is on the mutable list.
707 old_stack->sp = old_stack->stack + old_stack->stack_size;
708
709 // restore the stack parameters, and update tot_stack_size
710 tso->tot_stack_size -= old_stack->stack_size;
711
712 // we're about to run it, better mark it dirty
713 dirty_STACK(cap, new_stack);
714
715 return retvals;
716 }
717
718 /* ----------------------------------------------------------------------------
719 * Debugging: why is a thread blocked
720 * ------------------------------------------------------------------------- */
721
722 #if DEBUG
723 void
724 printThreadBlockage(StgTSO *tso)
725 {
726 switch (tso->why_blocked) {
727 #if defined(mingw32_HOST_OS)
728 case BlockedOnDoProc:
729 debugBelch("is blocked on proc (request: %u)", tso->block_info.async_result->reqID);
730 break;
731 #endif
732 #if !defined(THREADED_RTS)
733 case BlockedOnRead:
734 debugBelch("is blocked on read from fd %d", (int)(tso->block_info.fd));
735 break;
736 case BlockedOnWrite:
737 debugBelch("is blocked on write to fd %d", (int)(tso->block_info.fd));
738 break;
739 case BlockedOnDelay:
740 debugBelch("is blocked until %ld", (long)(tso->block_info.target));
741 break;
742 #endif
743 case BlockedOnMVar:
744 debugBelch("is blocked on an MVar @ %p", tso->block_info.closure);
745 break;
746 case BlockedOnMVarRead:
747 debugBelch("is blocked on atomic MVar read @ %p", tso->block_info.closure);
748 break;
749 case BlockedOnBlackHole:
750 debugBelch("is blocked on a black hole %p",
751 ((StgBlockingQueue*)tso->block_info.bh->bh));
752 break;
753 case BlockedOnMsgThrowTo:
754 debugBelch("is blocked on a throwto message");
755 break;
756 case NotBlocked:
757 debugBelch("is not blocked");
758 break;
759 case ThreadMigrating:
760 debugBelch("is runnable, but not on the run queue");
761 break;
762 case BlockedOnCCall:
763 debugBelch("is blocked on an external call");
764 break;
765 case BlockedOnCCall_Interruptible:
766 debugBelch("is blocked on an external call (but may be interrupted)");
767 break;
768 case BlockedOnSTM:
769 debugBelch("is blocked on an STM operation");
770 break;
771 default:
772 barf("printThreadBlockage: strange tso->why_blocked: %d for TSO %d (%d)",
773 tso->why_blocked, tso->id, tso);
774 }
775 }
776
777
778 void
779 printThreadStatus(StgTSO *t)
780 {
781 debugBelch("\tthread %4lu @ %p ", (unsigned long)t->id, (void *)t);
782 {
783 void *label = lookupThreadLabel(t->id);
784 if (label) debugBelch("[\"%s\"] ",(char *)label);
785 }
786 switch (t->what_next) {
787 case ThreadKilled:
788 debugBelch("has been killed");
789 break;
790 case ThreadComplete:
791 debugBelch("has completed");
792 break;
793 default:
794 printThreadBlockage(t);
795 }
796 if (t->dirty) {
797 debugBelch(" (TSO_DIRTY)");
798 }
799 debugBelch("\n");
800 }
801
802 void
803 printAllThreads(void)
804 {
805 StgTSO *t, *next;
806 nat i, g;
807 Capability *cap;
808
809 debugBelch("all threads:\n");
810
811 for (i = 0; i < n_capabilities; i++) {
812 cap = capabilities[i];
813 debugBelch("threads on capability %d:\n", cap->no);
814 for (t = cap->run_queue_hd; t != END_TSO_QUEUE; t = t->_link) {
815 printThreadStatus(t);
816 }
817 }
818
819 debugBelch("other threads:\n");
820 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
821 for (t = generations[g].threads; t != END_TSO_QUEUE; t = next) {
822 if (t->why_blocked != NotBlocked) {
823 printThreadStatus(t);
824 }
825 next = t->global_link;
826 }
827 }
828 }
829
830 // useful from gdb
831 void
832 printThreadQueue(StgTSO *t)
833 {
834 nat i = 0;
835 for (; t != END_TSO_QUEUE; t = t->_link) {
836 printThreadStatus(t);
837 i++;
838 }
839 debugBelch("%d threads on queue\n", i);
840 }
841
842 #endif /* DEBUG */