users_guide: Various spelling fixes
[ghc.git] / rts / Threads.c
1 /* ---------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 2006
4 *
5 * Thread-related functionality
6 *
7 * --------------------------------------------------------------------------*/
8
9 #include "PosixSource.h"
10 #include "Rts.h"
11
12 #include "Capability.h"
13 #include "Updates.h"
14 #include "Threads.h"
15 #include "STM.h"
16 #include "Schedule.h"
17 #include "Trace.h"
18 #include "ThreadLabels.h"
19 #include "Updates.h"
20 #include "Messages.h"
21 #include "RaiseAsync.h"
22 #include "Prelude.h"
23 #include "Printer.h"
24 #include "sm/Sanity.h"
25 #include "sm/Storage.h"
26
27 #include <string.h>
28
29 /* Next thread ID to allocate.
30 * LOCK: sched_mutex
31 */
32 static StgThreadID next_thread_id = 1;
33
34 /* The smallest stack size that makes any sense is:
35 * RESERVED_STACK_WORDS (so we can get back from the stack overflow)
36 * + sizeofW(StgStopFrame) (the stg_stop_thread_info frame)
37 * + 1 (the closure to enter)
38 * + 1 (stg_ap_v_ret)
39 * + 1 (spare slot req'd by stg_ap_v_ret)
40 *
41 * A thread with this stack will bomb immediately with a stack
42 * overflow, which will increase its stack size.
43 */
44 #define MIN_STACK_WORDS (RESERVED_STACK_WORDS + sizeofW(StgStopFrame) + 3)
45
46 /* ---------------------------------------------------------------------------
47 Create a new thread.
48
49 The new thread starts with the given stack size. Before the
50 scheduler can run, however, this thread needs to have a closure
51 (and possibly some arguments) pushed on its stack. See
52 pushClosure() in Schedule.h.
53
54 createGenThread() and createIOThread() (in SchedAPI.h) are
55 convenient packaged versions of this function.
56 ------------------------------------------------------------------------ */
57 StgTSO *
58 createThread(Capability *cap, W_ size)
59 {
60 StgTSO *tso;
61 StgStack *stack;
62 nat stack_size;
63
64 /* sched_mutex is *not* required */
65
66 /* catch ridiculously small stack sizes */
67 if (size < MIN_STACK_WORDS + sizeofW(StgStack) + sizeofW(StgTSO)) {
68 size = MIN_STACK_WORDS + sizeofW(StgStack) + sizeofW(StgTSO);
69 }
70
71 /* The size argument we are given includes all the per-thread
72 * overheads:
73 *
74 * - The TSO structure
75 * - The STACK header
76 *
77 * This is so that we can use a nice round power of 2 for the
78 * default stack size (e.g. 1k), and if we're allocating lots of
79 * threads back-to-back they'll fit nicely in a block. It's a bit
80 * of a benchmark hack, but it doesn't do any harm.
81 */
82 stack_size = round_to_mblocks(size - sizeofW(StgTSO));
83 stack = (StgStack *)allocate(cap, stack_size);
84 TICK_ALLOC_STACK(stack_size);
85 SET_HDR(stack, &stg_STACK_info, cap->r.rCCCS);
86 stack->stack_size = stack_size - sizeofW(StgStack);
87 stack->sp = stack->stack + stack->stack_size;
88 stack->dirty = 1;
89
90 tso = (StgTSO *)allocate(cap, sizeofW(StgTSO));
91 TICK_ALLOC_TSO();
92 SET_HDR(tso, &stg_TSO_info, CCS_SYSTEM);
93
94 // Always start with the compiled code evaluator
95 tso->what_next = ThreadRunGHC;
96 tso->why_blocked = NotBlocked;
97 tso->block_info.closure = (StgClosure *)END_TSO_QUEUE;
98 tso->blocked_exceptions = END_BLOCKED_EXCEPTIONS_QUEUE;
99 tso->bq = (StgBlockingQueue *)END_TSO_QUEUE;
100 tso->flags = 0;
101 tso->dirty = 1;
102 tso->_link = END_TSO_QUEUE;
103
104 tso->saved_errno = 0;
105 tso->bound = NULL;
106 tso->cap = cap;
107
108 tso->stackobj = stack;
109 tso->tot_stack_size = stack->stack_size;
110
111 ASSIGN_Int64((W_*)&(tso->alloc_limit), 0);
112
113 tso->trec = NO_TREC;
114
115 #ifdef PROFILING
116 tso->prof.cccs = CCS_MAIN;
117 #endif
118
119 // put a stop frame on the stack
120 stack->sp -= sizeofW(StgStopFrame);
121 SET_HDR((StgClosure*)stack->sp,
122 (StgInfoTable *)&stg_stop_thread_info,CCS_SYSTEM);
123
124 /* Link the new thread on the global thread list.
125 */
126 ACQUIRE_LOCK(&sched_mutex);
127 tso->id = next_thread_id++; // while we have the mutex
128 tso->global_link = g0->threads;
129 g0->threads = tso;
130 RELEASE_LOCK(&sched_mutex);
131
132 // ToDo: report the stack size in the event?
133 traceEventCreateThread(cap, tso);
134
135 return tso;
136 }
137
138 /* ---------------------------------------------------------------------------
139 * Comparing Thread ids.
140 *
141 * This is used from STG land in the implementation of the
142 * instances of Eq/Ord for ThreadIds.
143 * ------------------------------------------------------------------------ */
144
145 int
146 cmp_thread(StgPtr tso1, StgPtr tso2)
147 {
148 StgThreadID id1 = ((StgTSO *)tso1)->id;
149 StgThreadID id2 = ((StgTSO *)tso2)->id;
150
151 if (id1 < id2) return (-1);
152 if (id1 > id2) return 1;
153 return 0;
154 }
155
156 /* ---------------------------------------------------------------------------
157 * Fetching the ThreadID from an StgTSO.
158 *
159 * This is used in the implementation of Show for ThreadIds.
160 * ------------------------------------------------------------------------ */
161 int
162 rts_getThreadId(StgPtr tso)
163 {
164 return ((StgTSO *)tso)->id;
165 }
166
167 /* ---------------------------------------------------------------------------
168 * Getting & setting the thread allocation limit
169 * ------------------------------------------------------------------------ */
170 HsInt64 rts_getThreadAllocationCounter(StgPtr tso)
171 {
172 // NB. doesn't take into account allocation in the current nursery
173 // block, so it might be off by up to 4k.
174 return PK_Int64((W_*)&(((StgTSO *)tso)->alloc_limit));
175 }
176
177 void rts_setThreadAllocationCounter(StgPtr tso, HsInt64 i)
178 {
179 ASSIGN_Int64((W_*)&(((StgTSO *)tso)->alloc_limit), i);
180 }
181
182 void rts_enableThreadAllocationLimit(StgPtr tso)
183 {
184 ((StgTSO *)tso)->flags |= TSO_ALLOC_LIMIT;
185 }
186
187 void rts_disableThreadAllocationLimit(StgPtr tso)
188 {
189 ((StgTSO *)tso)->flags &= ~TSO_ALLOC_LIMIT;
190 }
191
192 /* -----------------------------------------------------------------------------
193 Remove a thread from a queue.
194 Fails fatally if the TSO is not on the queue.
195 -------------------------------------------------------------------------- */
196
197 rtsBool // returns True if we modified queue
198 removeThreadFromQueue (Capability *cap, StgTSO **queue, StgTSO *tso)
199 {
200 StgTSO *t, *prev;
201
202 prev = NULL;
203 for (t = *queue; t != END_TSO_QUEUE; prev = t, t = t->_link) {
204 if (t == tso) {
205 if (prev) {
206 setTSOLink(cap,prev,t->_link);
207 t->_link = END_TSO_QUEUE;
208 return rtsFalse;
209 } else {
210 *queue = t->_link;
211 t->_link = END_TSO_QUEUE;
212 return rtsTrue;
213 }
214 }
215 }
216 barf("removeThreadFromQueue: not found");
217 }
218
219 rtsBool // returns True if we modified head or tail
220 removeThreadFromDeQueue (Capability *cap,
221 StgTSO **head, StgTSO **tail, StgTSO *tso)
222 {
223 StgTSO *t, *prev;
224 rtsBool flag = rtsFalse;
225
226 prev = NULL;
227 for (t = *head; t != END_TSO_QUEUE; prev = t, t = t->_link) {
228 if (t == tso) {
229 if (prev) {
230 setTSOLink(cap,prev,t->_link);
231 flag = rtsFalse;
232 } else {
233 *head = t->_link;
234 flag = rtsTrue;
235 }
236 t->_link = END_TSO_QUEUE;
237 if (*tail == tso) {
238 if (prev) {
239 *tail = prev;
240 } else {
241 *tail = END_TSO_QUEUE;
242 }
243 return rtsTrue;
244 } else {
245 return flag;
246 }
247 }
248 }
249 barf("removeThreadFromDeQueue: not found");
250 }
251
252 /* ----------------------------------------------------------------------------
253 tryWakeupThread()
254
255 Attempt to wake up a thread. tryWakeupThread is idempotent: it is
256 always safe to call it too many times, but it is not safe in
257 general to omit a call.
258
259 ------------------------------------------------------------------------- */
260
261 void
262 tryWakeupThread (Capability *cap, StgTSO *tso)
263 {
264 traceEventThreadWakeup (cap, tso, tso->cap->no);
265
266 #ifdef THREADED_RTS
267 if (tso->cap != cap)
268 {
269 MessageWakeup *msg;
270 msg = (MessageWakeup *)allocate(cap,sizeofW(MessageWakeup));
271 SET_HDR(msg, &stg_MSG_TRY_WAKEUP_info, CCS_SYSTEM);
272 msg->tso = tso;
273 sendMessage(cap, tso->cap, (Message*)msg);
274 debugTraceCap(DEBUG_sched, cap, "message: try wakeup thread %ld on cap %d",
275 (W_)tso->id, tso->cap->no);
276 return;
277 }
278 #endif
279
280 switch (tso->why_blocked)
281 {
282 case BlockedOnMVar:
283 case BlockedOnMVarRead:
284 {
285 if (tso->_link == END_TSO_QUEUE) {
286 tso->block_info.closure = (StgClosure*)END_TSO_QUEUE;
287 goto unblock;
288 } else {
289 return;
290 }
291 }
292
293 case BlockedOnMsgThrowTo:
294 {
295 const StgInfoTable *i;
296
297 i = lockClosure(tso->block_info.closure);
298 unlockClosure(tso->block_info.closure, i);
299 if (i != &stg_MSG_NULL_info) {
300 debugTraceCap(DEBUG_sched, cap, "thread %ld still blocked on throwto (%p)",
301 (W_)tso->id, tso->block_info.throwto->header.info);
302 return;
303 }
304
305 // remove the block frame from the stack
306 ASSERT(tso->stackobj->sp[0] == (StgWord)&stg_block_throwto_info);
307 tso->stackobj->sp += 3;
308 goto unblock;
309 }
310
311 case BlockedOnBlackHole:
312 case BlockedOnSTM:
313 case ThreadMigrating:
314 goto unblock;
315
316 default:
317 // otherwise, do nothing
318 return;
319 }
320
321 unblock:
322 // just run the thread now, if the BH is not really available,
323 // we'll block again.
324 tso->why_blocked = NotBlocked;
325 appendToRunQueue(cap,tso);
326
327 // We used to set the context switch flag here, which would
328 // trigger a context switch a short time in the future (at the end
329 // of the current nursery block). The idea is that we have just
330 // woken up a thread, so we may need to load-balance and migrate
331 // threads to other CPUs. On the other hand, setting the context
332 // switch flag here unfairly penalises the current thread by
333 // yielding its time slice too early.
334 //
335 // The synthetic benchmark nofib/smp/chan can be used to show the
336 // difference quite clearly.
337
338 // cap->context_switch = 1;
339 }
340
341 /* ----------------------------------------------------------------------------
342 migrateThread
343 ------------------------------------------------------------------------- */
344
345 void
346 migrateThread (Capability *from, StgTSO *tso, Capability *to)
347 {
348 traceEventMigrateThread (from, tso, to->no);
349 // ThreadMigrating tells the target cap that it needs to be added to
350 // the run queue when it receives the MSG_TRY_WAKEUP.
351 tso->why_blocked = ThreadMigrating;
352 tso->cap = to;
353 tryWakeupThread(from, tso);
354 }
355
356 /* ----------------------------------------------------------------------------
357 awakenBlockedQueue
358
359 wakes up all the threads on the specified queue.
360 ------------------------------------------------------------------------- */
361
362 void
363 wakeBlockingQueue(Capability *cap, StgBlockingQueue *bq)
364 {
365 MessageBlackHole *msg;
366 const StgInfoTable *i;
367
368 ASSERT(bq->header.info == &stg_BLOCKING_QUEUE_DIRTY_info ||
369 bq->header.info == &stg_BLOCKING_QUEUE_CLEAN_info );
370
371 for (msg = bq->queue; msg != (MessageBlackHole*)END_TSO_QUEUE;
372 msg = msg->link) {
373 i = msg->header.info;
374 if (i != &stg_IND_info) {
375 ASSERT(i == &stg_MSG_BLACKHOLE_info);
376 tryWakeupThread(cap,msg->tso);
377 }
378 }
379
380 // overwrite the BQ with an indirection so it will be
381 // collected at the next GC.
382 #if defined(DEBUG) && !defined(THREADED_RTS)
383 // XXX FILL_SLOP, but not if THREADED_RTS because in that case
384 // another thread might be looking at this BLOCKING_QUEUE and
385 // checking the owner field at the same time.
386 bq->bh = 0; bq->queue = 0; bq->owner = 0;
387 #endif
388 OVERWRITE_INFO(bq, &stg_IND_info);
389 }
390
391 // If we update a closure that we know we BLACKHOLE'd, and the closure
392 // no longer points to the current TSO as its owner, then there may be
393 // an orphaned BLOCKING_QUEUE closure with blocked threads attached to
394 // it. We therefore traverse the BLOCKING_QUEUEs attached to the
395 // current TSO to see if any can now be woken up.
396 void
397 checkBlockingQueues (Capability *cap, StgTSO *tso)
398 {
399 StgBlockingQueue *bq, *next;
400 StgClosure *p;
401
402 debugTraceCap(DEBUG_sched, cap,
403 "collision occurred; checking blocking queues for thread %ld",
404 (W_)tso->id);
405
406 for (bq = tso->bq; bq != (StgBlockingQueue*)END_TSO_QUEUE; bq = next) {
407 next = bq->link;
408
409 if (bq->header.info == &stg_IND_info) {
410 // ToDo: could short it out right here, to avoid
411 // traversing this IND multiple times.
412 continue;
413 }
414
415 p = bq->bh;
416
417 if (p->header.info != &stg_BLACKHOLE_info ||
418 ((StgInd *)p)->indirectee != (StgClosure*)bq)
419 {
420 wakeBlockingQueue(cap,bq);
421 }
422 }
423 }
424
425 /* ----------------------------------------------------------------------------
426 updateThunk
427
428 Update a thunk with a value. In order to do this, we need to know
429 which TSO owns (or is evaluating) the thunk, in case we need to
430 awaken any threads that are blocked on it.
431 ------------------------------------------------------------------------- */
432
433 void
434 updateThunk (Capability *cap, StgTSO *tso, StgClosure *thunk, StgClosure *val)
435 {
436 StgClosure *v;
437 StgTSO *owner;
438 const StgInfoTable *i;
439
440 i = thunk->header.info;
441 if (i != &stg_BLACKHOLE_info &&
442 i != &stg_CAF_BLACKHOLE_info &&
443 i != &__stg_EAGER_BLACKHOLE_info &&
444 i != &stg_WHITEHOLE_info) {
445 updateWithIndirection(cap, thunk, val);
446 return;
447 }
448
449 v = ((StgInd*)thunk)->indirectee;
450
451 updateWithIndirection(cap, thunk, val);
452
453 // sometimes the TSO is locked when we reach here, so its header
454 // might be WHITEHOLE. Hence check for the correct owner using
455 // pointer equality first.
456 if ((StgTSO*)v == tso) {
457 return;
458 }
459
460 i = v->header.info;
461 if (i == &stg_TSO_info) {
462 checkBlockingQueues(cap, tso);
463 return;
464 }
465
466 if (i != &stg_BLOCKING_QUEUE_CLEAN_info &&
467 i != &stg_BLOCKING_QUEUE_DIRTY_info) {
468 checkBlockingQueues(cap, tso);
469 return;
470 }
471
472 owner = ((StgBlockingQueue*)v)->owner;
473
474 if (owner != tso) {
475 checkBlockingQueues(cap, tso);
476 } else {
477 wakeBlockingQueue(cap, (StgBlockingQueue*)v);
478 }
479 }
480
481 /* ---------------------------------------------------------------------------
482 * rtsSupportsBoundThreads(): is the RTS built to support bound threads?
483 * used by Control.Concurrent for error checking.
484 * ------------------------------------------------------------------------- */
485
486 HsBool
487 rtsSupportsBoundThreads(void)
488 {
489 #if defined(THREADED_RTS)
490 return HS_BOOL_TRUE;
491 #else
492 return HS_BOOL_FALSE;
493 #endif
494 }
495
496 /* ---------------------------------------------------------------------------
497 * isThreadBound(tso): check whether tso is bound to an OS thread.
498 * ------------------------------------------------------------------------- */
499
500 StgBool
501 isThreadBound(StgTSO* tso USED_IF_THREADS)
502 {
503 #if defined(THREADED_RTS)
504 return (tso->bound != NULL);
505 #endif
506 return rtsFalse;
507 }
508
509 /* -----------------------------------------------------------------------------
510 Stack overflow
511
512 If the thread has reached its maximum stack size, then raise the
513 StackOverflow exception in the offending thread. Otherwise
514 relocate the TSO into a larger chunk of memory and adjust its stack
515 size appropriately.
516 -------------------------------------------------------------------------- */
517
518 void
519 threadStackOverflow (Capability *cap, StgTSO *tso)
520 {
521 StgStack *new_stack, *old_stack;
522 StgUnderflowFrame *frame;
523 W_ chunk_size;
524
525 IF_DEBUG(sanity,checkTSO(tso));
526
527 if (RtsFlags.GcFlags.maxStkSize > 0
528 && tso->tot_stack_size >= RtsFlags.GcFlags.maxStkSize) {
529 // #3677: In a stack overflow situation, stack squeezing may
530 // reduce the stack size, but we don't know whether it has been
531 // reduced enough for the stack check to succeed if we try
532 // again. Fortunately stack squeezing is idempotent, so all we
533 // need to do is record whether *any* squeezing happened. If we
534 // are at the stack's absolute -K limit, and stack squeezing
535 // happened, then we try running the thread again. The
536 // TSO_SQUEEZED flag is set by threadPaused() to tell us whether
537 // squeezing happened or not.
538 if (tso->flags & TSO_SQUEEZED) {
539 return;
540 }
541
542 debugTrace(DEBUG_gc,
543 "threadStackOverflow of TSO %ld (%p): stack too large (now %ld; max is %ld)",
544 (long)tso->id, tso, (long)tso->stackobj->stack_size,
545 RtsFlags.GcFlags.maxStkSize);
546 IF_DEBUG(gc,
547 /* If we're debugging, just print out the top of the stack */
548 printStackChunk(tso->stackobj->sp,
549 stg_min(tso->stackobj->stack + tso->stackobj->stack_size,
550 tso->stackobj->sp+64)));
551
552 // Note [Throw to self when masked], also #767 and #8303.
553 throwToSelf(cap, tso, (StgClosure *)stackOverflow_closure);
554 return;
555 }
556
557
558 // We also want to avoid enlarging the stack if squeezing has
559 // already released some of it. However, we don't want to get into
560 // a pathological situation where a thread has a nearly full stack
561 // (near its current limit, but not near the absolute -K limit),
562 // keeps allocating a little bit, squeezing removes a little bit,
563 // and then it runs again. So to avoid this, if we squeezed *and*
564 // there is still less than BLOCK_SIZE_W words free, then we enlarge
565 // the stack anyway.
566 //
567 // NB: This reasoning only applies if the stack has been squeezed;
568 // if no squeezing has occurred, then BLOCK_SIZE_W free space does
569 // not mean there is enough stack to run; the thread may have
570 // requested a large amount of stack (see below). If the amount
571 // we squeezed is not enough to run the thread, we'll come back
572 // here (no squeezing will have occurred and thus we'll enlarge the
573 // stack.)
574 if ((tso->flags & TSO_SQUEEZED) &&
575 ((W_)(tso->stackobj->sp - tso->stackobj->stack) >= BLOCK_SIZE_W)) {
576 return;
577 }
578
579 old_stack = tso->stackobj;
580
581 // If we used less than half of the previous stack chunk, then we
582 // must have failed a stack check for a large amount of stack. In
583 // this case we allocate a double-sized chunk to try to
584 // accommodate the large stack request. If that also fails, the
585 // next chunk will be 4x normal size, and so on.
586 //
587 // It would be better to have the mutator tell us how much stack
588 // was needed, as we do with heap allocations, but this works for
589 // now.
590 //
591 if (old_stack->sp > old_stack->stack + old_stack->stack_size / 2)
592 {
593 chunk_size = stg_max(2 * (old_stack->stack_size + sizeofW(StgStack)),
594 RtsFlags.GcFlags.stkChunkSize);
595 }
596 else
597 {
598 chunk_size = RtsFlags.GcFlags.stkChunkSize;
599 }
600
601 debugTraceCap(DEBUG_sched, cap,
602 "allocating new stack chunk of size %d bytes",
603 chunk_size * sizeof(W_));
604
605 new_stack = (StgStack*) allocate(cap, chunk_size);
606 SET_HDR(new_stack, &stg_STACK_info, old_stack->header.prof.ccs);
607 TICK_ALLOC_STACK(chunk_size);
608
609 new_stack->dirty = 0; // begin clean, we'll mark it dirty below
610 new_stack->stack_size = chunk_size - sizeofW(StgStack);
611 new_stack->sp = new_stack->stack + new_stack->stack_size;
612
613 tso->tot_stack_size += new_stack->stack_size;
614
615 {
616 StgWord *sp;
617 W_ chunk_words, size;
618
619 // find the boundary of the chunk of old stack we're going to
620 // copy to the new stack. We skip over stack frames until we
621 // reach the smaller of
622 //
623 // * the chunk buffer size (+RTS -kb)
624 // * the end of the old stack
625 //
626 for (sp = old_stack->sp;
627 sp < stg_min(old_stack->sp + RtsFlags.GcFlags.stkChunkBufferSize,
628 old_stack->stack + old_stack->stack_size); )
629 {
630 size = stack_frame_sizeW((StgClosure*)sp);
631
632 // if including this frame would exceed the size of the
633 // new stack (taking into account the underflow frame),
634 // then stop at the previous frame.
635 if (sp + size > old_stack->stack + (new_stack->stack_size -
636 sizeofW(StgUnderflowFrame))) {
637 break;
638 }
639 sp += size;
640 }
641
642 if (sp == old_stack->stack + old_stack->stack_size) {
643 //
644 // the old stack chunk is now empty, so we do *not* insert
645 // an underflow frame pointing back to it. There are two
646 // cases: either the old stack chunk was the last one, in
647 // which case it ends with a STOP_FRAME, or it is not the
648 // last one, and it already ends with an UNDERFLOW_FRAME
649 // pointing to the previous chunk. In the latter case, we
650 // will copy the UNDERFLOW_FRAME into the new stack chunk.
651 // In both cases, the old chunk will be subsequently GC'd.
652 //
653 // With the default settings, -ki1k -kb1k, this means the
654 // first stack chunk will be discarded after the first
655 // overflow, being replaced by a non-moving 32k chunk.
656 //
657 } else {
658 new_stack->sp -= sizeofW(StgUnderflowFrame);
659 frame = (StgUnderflowFrame*)new_stack->sp;
660 frame->info = &stg_stack_underflow_frame_info;
661 frame->next_chunk = old_stack;
662 }
663
664 // copy the stack chunk between tso->sp and sp to
665 // new_tso->sp + (tso->sp - sp)
666 chunk_words = sp - old_stack->sp;
667
668 memcpy(/* dest */ new_stack->sp - chunk_words,
669 /* source */ old_stack->sp,
670 /* size */ chunk_words * sizeof(W_));
671
672 old_stack->sp += chunk_words;
673 new_stack->sp -= chunk_words;
674 }
675
676 tso->stackobj = new_stack;
677
678 // we're about to run it, better mark it dirty
679 dirty_STACK(cap, new_stack);
680
681 IF_DEBUG(sanity,checkTSO(tso));
682 // IF_DEBUG(scheduler,printTSO(new_tso));
683 }
684
685
686
687 /* ---------------------------------------------------------------------------
688 Stack underflow - called from the stg_stack_underflow_info frame
689 ------------------------------------------------------------------------ */
690
691 W_ // returns offset to the return address
692 threadStackUnderflow (Capability *cap, StgTSO *tso)
693 {
694 StgStack *new_stack, *old_stack;
695 StgUnderflowFrame *frame;
696 nat retvals;
697
698 debugTraceCap(DEBUG_sched, cap, "stack underflow");
699
700 old_stack = tso->stackobj;
701
702 frame = (StgUnderflowFrame*)(old_stack->stack + old_stack->stack_size
703 - sizeofW(StgUnderflowFrame));
704 ASSERT(frame->info == &stg_stack_underflow_frame_info);
705
706 new_stack = (StgStack*)frame->next_chunk;
707 tso->stackobj = new_stack;
708
709 retvals = (P_)frame - old_stack->sp;
710 if (retvals != 0)
711 {
712 // we have some return values to copy to the old stack
713 if ((W_)(new_stack->sp - new_stack->stack) < retvals)
714 {
715 barf("threadStackUnderflow: not enough space for return values");
716 }
717
718 new_stack->sp -= retvals;
719
720 memcpy(/* dest */ new_stack->sp,
721 /* src */ old_stack->sp,
722 /* size */ retvals * sizeof(W_));
723 }
724
725 // empty the old stack. The GC may still visit this object
726 // because it is on the mutable list.
727 old_stack->sp = old_stack->stack + old_stack->stack_size;
728
729 // restore the stack parameters, and update tot_stack_size
730 tso->tot_stack_size -= old_stack->stack_size;
731
732 // we're about to run it, better mark it dirty
733 dirty_STACK(cap, new_stack);
734
735 return retvals;
736 }
737
738 /* ----------------------------------------------------------------------------
739 * Debugging: why is a thread blocked
740 * ------------------------------------------------------------------------- */
741
742 #if DEBUG
743 void
744 printThreadBlockage(StgTSO *tso)
745 {
746 switch (tso->why_blocked) {
747 #if defined(mingw32_HOST_OS)
748 case BlockedOnDoProc:
749 debugBelch("is blocked on proc (request: %u)", tso->block_info.async_result->reqID);
750 break;
751 #endif
752 #if !defined(THREADED_RTS)
753 case BlockedOnRead:
754 debugBelch("is blocked on read from fd %d", (int)(tso->block_info.fd));
755 break;
756 case BlockedOnWrite:
757 debugBelch("is blocked on write to fd %d", (int)(tso->block_info.fd));
758 break;
759 case BlockedOnDelay:
760 debugBelch("is blocked until %ld", (long)(tso->block_info.target));
761 break;
762 #endif
763 case BlockedOnMVar:
764 debugBelch("is blocked on an MVar @ %p", tso->block_info.closure);
765 break;
766 case BlockedOnMVarRead:
767 debugBelch("is blocked on atomic MVar read @ %p", tso->block_info.closure);
768 break;
769 case BlockedOnBlackHole:
770 debugBelch("is blocked on a black hole %p",
771 ((StgBlockingQueue*)tso->block_info.bh->bh));
772 break;
773 case BlockedOnMsgThrowTo:
774 debugBelch("is blocked on a throwto message");
775 break;
776 case NotBlocked:
777 debugBelch("is not blocked");
778 break;
779 case ThreadMigrating:
780 debugBelch("is runnable, but not on the run queue");
781 break;
782 case BlockedOnCCall:
783 debugBelch("is blocked on an external call");
784 break;
785 case BlockedOnCCall_Interruptible:
786 debugBelch("is blocked on an external call (but may be interrupted)");
787 break;
788 case BlockedOnSTM:
789 debugBelch("is blocked on an STM operation");
790 break;
791 default:
792 barf("printThreadBlockage: strange tso->why_blocked: %d for TSO %d (%d)",
793 tso->why_blocked, tso->id, tso);
794 }
795 }
796
797
798 void
799 printThreadStatus(StgTSO *t)
800 {
801 debugBelch("\tthread %4lu @ %p ", (unsigned long)t->id, (void *)t);
802 {
803 void *label = lookupThreadLabel(t->id);
804 if (label) debugBelch("[\"%s\"] ",(char *)label);
805 }
806 switch (t->what_next) {
807 case ThreadKilled:
808 debugBelch("has been killed");
809 break;
810 case ThreadComplete:
811 debugBelch("has completed");
812 break;
813 default:
814 printThreadBlockage(t);
815 }
816 if (t->dirty) {
817 debugBelch(" (TSO_DIRTY)");
818 }
819 debugBelch("\n");
820 }
821
822 void
823 printAllThreads(void)
824 {
825 StgTSO *t, *next;
826 nat i, g;
827 Capability *cap;
828
829 debugBelch("all threads:\n");
830
831 for (i = 0; i < n_capabilities; i++) {
832 cap = capabilities[i];
833 debugBelch("threads on capability %d:\n", cap->no);
834 for (t = cap->run_queue_hd; t != END_TSO_QUEUE; t = t->_link) {
835 printThreadStatus(t);
836 }
837 }
838
839 debugBelch("other threads:\n");
840 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
841 for (t = generations[g].threads; t != END_TSO_QUEUE; t = next) {
842 if (t->why_blocked != NotBlocked) {
843 printThreadStatus(t);
844 }
845 next = t->global_link;
846 }
847 }
848 }
849
850 // useful from gdb
851 void
852 printThreadQueue(StgTSO *t)
853 {
854 nat i = 0;
855 for (; t != END_TSO_QUEUE; t = t->_link) {
856 printThreadStatus(t);
857 i++;
858 }
859 debugBelch("%d threads on queue\n", i);
860 }
861
862 #endif /* DEBUG */