FIX #2164: check for ThreadRelocated in isAlive()
[ghc.git] / rts / Sanity.c
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 1998-2006
4 *
5 * Sanity checking code for the heap and stack.
6 *
7 * Used when debugging: check that everything reasonable.
8 *
9 * - All things that are supposed to be pointers look like pointers.
10 *
11 * - Objects in text space are marked as static closures, those
12 * in the heap are dynamic.
13 *
14 * ---------------------------------------------------------------------------*/
15
16 #include "PosixSource.h"
17 #include "Rts.h"
18
19 #ifdef DEBUG /* whole file */
20
21 #include "RtsFlags.h"
22 #include "RtsUtils.h"
23 #include "BlockAlloc.h"
24 #include "Sanity.h"
25 #include "MBlock.h"
26 #include "Storage.h"
27 #include "Schedule.h"
28 #include "Apply.h"
29
30 /* -----------------------------------------------------------------------------
31 Forward decls.
32 -------------------------------------------------------------------------- */
33
34 static void checkSmallBitmap ( StgPtr payload, StgWord bitmap, nat );
35 static void checkLargeBitmap ( StgPtr payload, StgLargeBitmap*, nat );
36 static void checkClosureShallow ( StgClosure * );
37
38 /* -----------------------------------------------------------------------------
39 Check stack sanity
40 -------------------------------------------------------------------------- */
41
42 static void
43 checkSmallBitmap( StgPtr payload, StgWord bitmap, nat size )
44 {
45 StgPtr p;
46 nat i;
47
48 p = payload;
49 for(i = 0; i < size; i++, bitmap >>= 1 ) {
50 if ((bitmap & 1) == 0) {
51 checkClosureShallow((StgClosure *)payload[i]);
52 }
53 }
54 }
55
56 static void
57 checkLargeBitmap( StgPtr payload, StgLargeBitmap* large_bitmap, nat size )
58 {
59 StgWord bmp;
60 nat i, j;
61
62 i = 0;
63 for (bmp=0; i < size; bmp++) {
64 StgWord bitmap = large_bitmap->bitmap[bmp];
65 j = 0;
66 for(; i < size && j < BITS_IN(W_); j++, i++, bitmap >>= 1 ) {
67 if ((bitmap & 1) == 0) {
68 checkClosureShallow((StgClosure *)payload[i]);
69 }
70 }
71 }
72 }
73
74 /*
75 * check that it looks like a valid closure - without checking its payload
76 * used to avoid recursion between checking PAPs and checking stack
77 * chunks.
78 */
79
80 static void
81 checkClosureShallow( StgClosure* p )
82 {
83 StgClosure *q;
84
85 q = UNTAG_CLOSURE(p);
86 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q));
87
88 /* Is it a static closure? */
89 if (!HEAP_ALLOCED(q)) {
90 ASSERT(closure_STATIC(q));
91 } else {
92 ASSERT(!closure_STATIC(q));
93 }
94 }
95
96 // check an individual stack object
97 StgOffset
98 checkStackFrame( StgPtr c )
99 {
100 nat size;
101 const StgRetInfoTable* info;
102
103 info = get_ret_itbl((StgClosure *)c);
104
105 /* All activation records have 'bitmap' style layout info. */
106 switch (info->i.type) {
107 case RET_DYN: /* Dynamic bitmap: the mask is stored on the stack */
108 {
109 StgWord dyn;
110 StgPtr p;
111 StgRetDyn* r;
112
113 r = (StgRetDyn *)c;
114 dyn = r->liveness;
115
116 p = (P_)(r->payload);
117 checkSmallBitmap(p,RET_DYN_LIVENESS(r->liveness),RET_DYN_BITMAP_SIZE);
118 p += RET_DYN_BITMAP_SIZE + RET_DYN_NONPTR_REGS_SIZE;
119
120 // skip over the non-pointers
121 p += RET_DYN_NONPTRS(dyn);
122
123 // follow the ptr words
124 for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
125 checkClosureShallow((StgClosure *)*p);
126 p++;
127 }
128
129 return sizeofW(StgRetDyn) + RET_DYN_BITMAP_SIZE +
130 RET_DYN_NONPTR_REGS_SIZE +
131 RET_DYN_NONPTRS(dyn) + RET_DYN_PTRS(dyn);
132 }
133
134 case UPDATE_FRAME:
135 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgUpdateFrame*)c)->updatee));
136 case ATOMICALLY_FRAME:
137 case CATCH_RETRY_FRAME:
138 case CATCH_STM_FRAME:
139 case CATCH_FRAME:
140 // small bitmap cases (<= 32 entries)
141 case STOP_FRAME:
142 case RET_SMALL:
143 size = BITMAP_SIZE(info->i.layout.bitmap);
144 checkSmallBitmap((StgPtr)c + 1,
145 BITMAP_BITS(info->i.layout.bitmap), size);
146 return 1 + size;
147
148 case RET_BCO: {
149 StgBCO *bco;
150 nat size;
151 bco = (StgBCO *)*(c+1);
152 size = BCO_BITMAP_SIZE(bco);
153 checkLargeBitmap((StgPtr)c + 2, BCO_BITMAP(bco), size);
154 return 2 + size;
155 }
156
157 case RET_BIG: // large bitmap (> 32 entries)
158 size = GET_LARGE_BITMAP(&info->i)->size;
159 checkLargeBitmap((StgPtr)c + 1, GET_LARGE_BITMAP(&info->i), size);
160 return 1 + size;
161
162 case RET_FUN:
163 {
164 StgFunInfoTable *fun_info;
165 StgRetFun *ret_fun;
166
167 ret_fun = (StgRetFun *)c;
168 fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun));
169 size = ret_fun->size;
170 switch (fun_info->f.fun_type) {
171 case ARG_GEN:
172 checkSmallBitmap((StgPtr)ret_fun->payload,
173 BITMAP_BITS(fun_info->f.b.bitmap), size);
174 break;
175 case ARG_GEN_BIG:
176 checkLargeBitmap((StgPtr)ret_fun->payload,
177 GET_FUN_LARGE_BITMAP(fun_info), size);
178 break;
179 default:
180 checkSmallBitmap((StgPtr)ret_fun->payload,
181 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
182 size);
183 break;
184 }
185 return sizeofW(StgRetFun) + size;
186 }
187
188 default:
189 barf("checkStackFrame: weird activation record found on stack (%p %d).",c,info->i.type);
190 }
191 }
192
193 // check sections of stack between update frames
194 void
195 checkStackChunk( StgPtr sp, StgPtr stack_end )
196 {
197 StgPtr p;
198
199 p = sp;
200 while (p < stack_end) {
201 p += checkStackFrame( p );
202 }
203 // ASSERT( p == stack_end ); -- HWL
204 }
205
206 static void
207 checkPAP (StgClosure *tagged_fun, StgClosure** payload, StgWord n_args)
208 {
209 StgClosure *fun;
210 StgClosure *p;
211 StgFunInfoTable *fun_info;
212
213 fun = UNTAG_CLOSURE(tagged_fun);
214 ASSERT(LOOKS_LIKE_CLOSURE_PTR(fun));
215 fun_info = get_fun_itbl(fun);
216
217 p = (StgClosure *)payload;
218 switch (fun_info->f.fun_type) {
219 case ARG_GEN:
220 checkSmallBitmap( (StgPtr)payload,
221 BITMAP_BITS(fun_info->f.b.bitmap), n_args );
222 break;
223 case ARG_GEN_BIG:
224 checkLargeBitmap( (StgPtr)payload,
225 GET_FUN_LARGE_BITMAP(fun_info),
226 n_args );
227 break;
228 case ARG_BCO:
229 checkLargeBitmap( (StgPtr)payload,
230 BCO_BITMAP(fun),
231 n_args );
232 break;
233 default:
234 checkSmallBitmap( (StgPtr)payload,
235 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
236 n_args );
237 break;
238 }
239
240 ASSERT(fun_info->f.arity > TAG_MASK ? GET_CLOSURE_TAG(tagged_fun) == 1
241 : GET_CLOSURE_TAG(tagged_fun) == fun_info->f.arity);
242 }
243
244
245 StgOffset
246 checkClosure( StgClosure* p )
247 {
248 const StgInfoTable *info;
249
250 ASSERT(LOOKS_LIKE_INFO_PTR(p->header.info));
251
252 p = UNTAG_CLOSURE(p);
253 /* Is it a static closure (i.e. in the data segment)? */
254 if (!HEAP_ALLOCED(p)) {
255 ASSERT(closure_STATIC(p));
256 } else {
257 ASSERT(!closure_STATIC(p));
258 }
259
260 info = p->header.info;
261
262 if (IS_FORWARDING_PTR(info)) {
263 barf("checkClosure: found EVACUATED closure %d", info->type);
264 }
265 info = INFO_PTR_TO_STRUCT(info);
266
267 switch (info->type) {
268
269 case MVAR_CLEAN:
270 case MVAR_DIRTY:
271 {
272 StgMVar *mvar = (StgMVar *)p;
273 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->head));
274 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->tail));
275 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->value));
276 #if 0
277 #if defined(PAR)
278 checkBQ((StgBlockingQueueElement *)mvar->head, p);
279 #else
280 checkBQ(mvar->head, p);
281 #endif
282 #endif
283 return sizeofW(StgMVar);
284 }
285
286 case THUNK:
287 case THUNK_1_0:
288 case THUNK_0_1:
289 case THUNK_1_1:
290 case THUNK_0_2:
291 case THUNK_2_0:
292 {
293 nat i;
294 for (i = 0; i < info->layout.payload.ptrs; i++) {
295 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgThunk *)p)->payload[i]));
296 }
297 return thunk_sizeW_fromITBL(info);
298 }
299
300 case FUN:
301 case FUN_1_0:
302 case FUN_0_1:
303 case FUN_1_1:
304 case FUN_0_2:
305 case FUN_2_0:
306 case CONSTR:
307 case CONSTR_1_0:
308 case CONSTR_0_1:
309 case CONSTR_1_1:
310 case CONSTR_0_2:
311 case CONSTR_2_0:
312 case IND_PERM:
313 case IND_OLDGEN:
314 case IND_OLDGEN_PERM:
315 #ifdef TICKY_TICKY
316 case SE_BLACKHOLE:
317 case SE_CAF_BLACKHOLE:
318 #endif
319 case BLACKHOLE:
320 case CAF_BLACKHOLE:
321 case STABLE_NAME:
322 case MUT_VAR_CLEAN:
323 case MUT_VAR_DIRTY:
324 case CONSTR_STATIC:
325 case CONSTR_NOCAF_STATIC:
326 case THUNK_STATIC:
327 case FUN_STATIC:
328 {
329 nat i;
330 for (i = 0; i < info->layout.payload.ptrs; i++) {
331 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p->payload[i]));
332 }
333 return sizeW_fromITBL(info);
334 }
335
336 case BCO: {
337 StgBCO *bco = (StgBCO *)p;
338 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->instrs));
339 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->literals));
340 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->ptrs));
341 return bco_sizeW(bco);
342 }
343
344 case IND_STATIC: /* (1, 0) closure */
345 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgIndStatic*)p)->indirectee));
346 return sizeW_fromITBL(info);
347
348 case WEAK:
349 /* deal with these specially - the info table isn't
350 * representative of the actual layout.
351 */
352 { StgWeak *w = (StgWeak *)p;
353 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->key));
354 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->value));
355 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->finalizer));
356 if (w->link) {
357 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->link));
358 }
359 return sizeW_fromITBL(info);
360 }
361
362 case THUNK_SELECTOR:
363 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgSelector *)p)->selectee));
364 return THUNK_SELECTOR_sizeW();
365
366 case IND:
367 {
368 /* we don't expect to see any of these after GC
369 * but they might appear during execution
370 */
371 StgInd *ind = (StgInd *)p;
372 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ind->indirectee));
373 return sizeofW(StgInd);
374 }
375
376 case RET_BCO:
377 case RET_SMALL:
378 case RET_BIG:
379 case RET_DYN:
380 case UPDATE_FRAME:
381 case STOP_FRAME:
382 case CATCH_FRAME:
383 case ATOMICALLY_FRAME:
384 case CATCH_RETRY_FRAME:
385 case CATCH_STM_FRAME:
386 barf("checkClosure: stack frame");
387
388 case AP:
389 {
390 StgAP* ap = (StgAP *)p;
391 checkPAP (ap->fun, ap->payload, ap->n_args);
392 return ap_sizeW(ap);
393 }
394
395 case PAP:
396 {
397 StgPAP* pap = (StgPAP *)p;
398 checkPAP (pap->fun, pap->payload, pap->n_args);
399 return pap_sizeW(pap);
400 }
401
402 case AP_STACK:
403 {
404 StgAP_STACK *ap = (StgAP_STACK *)p;
405 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ap->fun));
406 checkStackChunk((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
407 return ap_stack_sizeW(ap);
408 }
409
410 case ARR_WORDS:
411 return arr_words_sizeW((StgArrWords *)p);
412
413 case MUT_ARR_PTRS_CLEAN:
414 case MUT_ARR_PTRS_DIRTY:
415 case MUT_ARR_PTRS_FROZEN:
416 case MUT_ARR_PTRS_FROZEN0:
417 {
418 StgMutArrPtrs* a = (StgMutArrPtrs *)p;
419 nat i;
420 for (i = 0; i < a->ptrs; i++) {
421 ASSERT(LOOKS_LIKE_CLOSURE_PTR(a->payload[i]));
422 }
423 return mut_arr_ptrs_sizeW(a);
424 }
425
426 case TSO:
427 checkTSO((StgTSO *)p);
428 return tso_sizeW((StgTSO *)p);
429
430 #if defined(PAR)
431
432 case BLOCKED_FETCH:
433 ASSERT(LOOKS_LIKE_GA(&(((StgBlockedFetch *)p)->ga)));
434 ASSERT(LOOKS_LIKE_CLOSURE_PTR((((StgBlockedFetch *)p)->node)));
435 return sizeofW(StgBlockedFetch); // see size used in evacuate()
436
437 #ifdef DIST
438 case REMOTE_REF:
439 return sizeofW(StgFetchMe);
440 #endif /*DIST */
441
442 case FETCH_ME:
443 ASSERT(LOOKS_LIKE_GA(((StgFetchMe *)p)->ga));
444 return sizeofW(StgFetchMe); // see size used in evacuate()
445
446 case FETCH_ME_BQ:
447 checkBQ(((StgFetchMeBlockingQueue *)p)->blocking_queue, (StgClosure *)p);
448 return sizeofW(StgFetchMeBlockingQueue); // see size used in evacuate()
449
450 case RBH:
451 /* In an RBH the BQ may be empty (ie END_BQ_QUEUE) but not NULL */
452 ASSERT(((StgRBH *)p)->blocking_queue!=NULL);
453 if (((StgRBH *)p)->blocking_queue!=END_BQ_QUEUE)
454 checkBQ(((StgRBH *)p)->blocking_queue, p);
455 ASSERT(LOOKS_LIKE_INFO_PTR(REVERT_INFOPTR(get_itbl((StgClosure *)p))));
456 return BLACKHOLE_sizeW(); // see size used in evacuate()
457 // sizeW_fromITBL(REVERT_INFOPTR(get_itbl((StgClosure *)p)));
458
459 #endif
460
461 case TVAR_WATCH_QUEUE:
462 {
463 StgTVarWatchQueue *wq = (StgTVarWatchQueue *)p;
464 ASSERT(LOOKS_LIKE_CLOSURE_PTR(wq->next_queue_entry));
465 ASSERT(LOOKS_LIKE_CLOSURE_PTR(wq->prev_queue_entry));
466 return sizeofW(StgTVarWatchQueue);
467 }
468
469 case INVARIANT_CHECK_QUEUE:
470 {
471 StgInvariantCheckQueue *q = (StgInvariantCheckQueue *)p;
472 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q->invariant));
473 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q->my_execution));
474 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q->next_queue_entry));
475 return sizeofW(StgInvariantCheckQueue);
476 }
477
478 case ATOMIC_INVARIANT:
479 {
480 StgAtomicInvariant *invariant = (StgAtomicInvariant *)p;
481 ASSERT(LOOKS_LIKE_CLOSURE_PTR(invariant->code));
482 ASSERT(LOOKS_LIKE_CLOSURE_PTR(invariant->last_execution));
483 return sizeofW(StgAtomicInvariant);
484 }
485
486 case TVAR:
487 {
488 StgTVar *tv = (StgTVar *)p;
489 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tv->current_value));
490 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tv->first_watch_queue_entry));
491 return sizeofW(StgTVar);
492 }
493
494 case TREC_CHUNK:
495 {
496 nat i;
497 StgTRecChunk *tc = (StgTRecChunk *)p;
498 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->prev_chunk));
499 for (i = 0; i < tc -> next_entry_idx; i ++) {
500 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].tvar));
501 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].expected_value));
502 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].new_value));
503 }
504 return sizeofW(StgTRecChunk);
505 }
506
507 case TREC_HEADER:
508 {
509 StgTRecHeader *trec = (StgTRecHeader *)p;
510 ASSERT(LOOKS_LIKE_CLOSURE_PTR(trec -> enclosing_trec));
511 ASSERT(LOOKS_LIKE_CLOSURE_PTR(trec -> current_chunk));
512 return sizeofW(StgTRecHeader);
513 }
514
515 default:
516 barf("checkClosure (closure type %d)", info->type);
517 }
518 }
519
520 #if defined(PAR)
521
522 #define PVM_PE_MASK 0xfffc0000
523 #define MAX_PVM_PES MAX_PES
524 #define MAX_PVM_TIDS MAX_PES
525 #define MAX_SLOTS 100000
526
527 rtsBool
528 looks_like_tid(StgInt tid)
529 {
530 StgInt hi = (tid & PVM_PE_MASK) >> 18;
531 StgInt lo = (tid & ~PVM_PE_MASK);
532 rtsBool ok = (hi != 0) && (lo < MAX_PVM_TIDS) && (hi < MAX_PVM_TIDS);
533 return ok;
534 }
535
536 rtsBool
537 looks_like_slot(StgInt slot)
538 {
539 /* if tid is known better use looks_like_ga!! */
540 rtsBool ok = slot<MAX_SLOTS;
541 // This refers only to the no. of slots on the current PE
542 // rtsBool ok = slot<=highest_slot();
543 return ok;
544 }
545
546 rtsBool
547 looks_like_ga(globalAddr *ga)
548 {
549 rtsBool is_tid = looks_like_tid((ga)->payload.gc.gtid);
550 rtsBool is_slot = ((ga)->payload.gc.gtid==mytid) ?
551 (ga)->payload.gc.slot<=highest_slot() :
552 (ga)->payload.gc.slot<MAX_SLOTS;
553 rtsBool ok = is_tid && is_slot;
554 return ok;
555 }
556
557 #endif
558
559
560 /* -----------------------------------------------------------------------------
561 Check Heap Sanity
562
563 After garbage collection, the live heap is in a state where we can
564 run through and check that all the pointers point to the right
565 place. This function starts at a given position and sanity-checks
566 all the objects in the remainder of the chain.
567 -------------------------------------------------------------------------- */
568
569 void
570 checkHeap(bdescr *bd)
571 {
572 StgPtr p;
573
574 #if defined(THREADED_RTS)
575 // heap sanity checking doesn't work with SMP, because we can't
576 // zero the slop (see Updates.h).
577 return;
578 #endif
579
580 for (; bd != NULL; bd = bd->link) {
581 p = bd->start;
582 while (p < bd->free) {
583 nat size = checkClosure((StgClosure *)p);
584 /* This is the smallest size of closure that can live in the heap */
585 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
586 p += size;
587
588 /* skip over slop */
589 while (p < bd->free &&
590 (*p < 0x1000 || !LOOKS_LIKE_INFO_PTR((void*)*p))) { p++; }
591 }
592 }
593 }
594
595 #if defined(PAR)
596 /*
597 Check heap between start and end. Used after unpacking graphs.
598 */
599 void
600 checkHeapChunk(StgPtr start, StgPtr end)
601 {
602 extern globalAddr *LAGAlookup(StgClosure *addr);
603 StgPtr p;
604 nat size;
605
606 for (p=start; p<end; p+=size) {
607 ASSERT(LOOKS_LIKE_INFO_PTR((void*)*p));
608 if (get_itbl((StgClosure*)p)->type == FETCH_ME &&
609 *(p+1) == 0x0000eeee /* ie. unpack garbage (see SetGAandCommonUp) */) {
610 /* if it's a FM created during unpack and commoned up, it's not global */
611 ASSERT(LAGAlookup((StgClosure*)p)==NULL);
612 size = sizeofW(StgFetchMe);
613 } else if (get_itbl((StgClosure*)p)->type == IND) {
614 *(p+2) = 0x0000ee11; /* mark slop in IND as garbage */
615 size = sizeofW(StgInd);
616 } else {
617 size = checkClosure((StgClosure *)p);
618 /* This is the smallest size of closure that can live in the heap. */
619 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
620 }
621 }
622 }
623 #else /* !PAR */
624 void
625 checkHeapChunk(StgPtr start, StgPtr end)
626 {
627 StgPtr p;
628 nat size;
629
630 for (p=start; p<end; p+=size) {
631 ASSERT(LOOKS_LIKE_INFO_PTR((void*)*p));
632 size = checkClosure((StgClosure *)p);
633 /* This is the smallest size of closure that can live in the heap. */
634 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
635 }
636 }
637 #endif
638
639 void
640 checkChain(bdescr *bd)
641 {
642 while (bd != NULL) {
643 checkClosure((StgClosure *)bd->start);
644 bd = bd->link;
645 }
646 }
647
648 void
649 checkTSO(StgTSO *tso)
650 {
651 StgPtr sp = tso->sp;
652 StgPtr stack = tso->stack;
653 StgOffset stack_size = tso->stack_size;
654 StgPtr stack_end = stack + stack_size;
655
656 if (tso->what_next == ThreadRelocated) {
657 checkTSO(tso->_link);
658 return;
659 }
660
661 if (tso->what_next == ThreadKilled) {
662 /* The garbage collector doesn't bother following any pointers
663 * from dead threads, so don't check sanity here.
664 */
665 return;
666 }
667
668 ASSERT(stack <= sp && sp < stack_end);
669
670 #if defined(PAR)
671 ASSERT(tso->par.magic==TSO_MAGIC);
672
673 switch (tso->why_blocked) {
674 case BlockedOnGA:
675 checkClosureShallow(tso->block_info.closure);
676 ASSERT(/* Can't be a FETCH_ME because *this* closure is on its BQ */
677 get_itbl(tso->block_info.closure)->type==FETCH_ME_BQ);
678 break;
679 case BlockedOnGA_NoSend:
680 checkClosureShallow(tso->block_info.closure);
681 ASSERT(get_itbl(tso->block_info.closure)->type==FETCH_ME_BQ);
682 break;
683 case BlockedOnBlackHole:
684 checkClosureShallow(tso->block_info.closure);
685 ASSERT(get_itbl(tso->block_info.closure)->type==BLACKHOLE ||
686 get_itbl(tso->block_info.closure)->type==RBH);
687 break;
688 case BlockedOnRead:
689 case BlockedOnWrite:
690 case BlockedOnDelay:
691 #if defined(mingw32_HOST_OS)
692 case BlockedOnDoProc:
693 #endif
694 /* isOnBQ(blocked_queue) */
695 break;
696 case BlockedOnException:
697 /* isOnSomeBQ(tso) */
698 ASSERT(get_itbl(tso->block_info.tso)->type==TSO);
699 break;
700 case BlockedOnMVar:
701 ASSERT(get_itbl(tso->block_info.closure)->type==MVAR);
702 break;
703 case BlockedOnSTM:
704 ASSERT(tso->block_info.closure == END_TSO_QUEUE);
705 break;
706 default:
707 /*
708 Could check other values of why_blocked but I am more
709 lazy than paranoid (bad combination) -- HWL
710 */
711 }
712
713 /* if the link field is non-nil it most point to one of these
714 three closure types */
715 ASSERT(tso->link == END_TSO_QUEUE ||
716 get_itbl(tso->link)->type == TSO ||
717 get_itbl(tso->link)->type == BLOCKED_FETCH ||
718 get_itbl(tso->link)->type == CONSTR);
719 #endif
720
721 checkStackChunk(sp, stack_end);
722 }
723
724 #if defined(GRAN)
725 void
726 checkTSOsSanity(void) {
727 nat i, tsos;
728 StgTSO *tso;
729
730 debugBelch("Checking sanity of all runnable TSOs:");
731
732 for (i=0, tsos=0; i<RtsFlags.GranFlags.proc; i++) {
733 for (tso=run_queue_hds[i]; tso!=END_TSO_QUEUE; tso=tso->link) {
734 debugBelch("TSO %p on PE %d ...", tso, i);
735 checkTSO(tso);
736 debugBelch("OK, ");
737 tsos++;
738 }
739 }
740
741 debugBelch(" checked %d TSOs on %d PEs; ok\n", tsos, RtsFlags.GranFlags.proc);
742 }
743
744
745 // still GRAN only
746
747 rtsBool
748 checkThreadQSanity (PEs proc, rtsBool check_TSO_too)
749 {
750 StgTSO *tso, *prev;
751
752 /* the NIL value for TSOs is END_TSO_QUEUE; thus, finding NULL is an error */
753 ASSERT(run_queue_hds[proc]!=NULL);
754 ASSERT(run_queue_tls[proc]!=NULL);
755 /* if either head or tail is NIL then the other one must be NIL, too */
756 ASSERT(run_queue_hds[proc]!=END_TSO_QUEUE || run_queue_tls[proc]==END_TSO_QUEUE);
757 ASSERT(run_queue_tls[proc]!=END_TSO_QUEUE || run_queue_hds[proc]==END_TSO_QUEUE);
758 for (tso=run_queue_hds[proc], prev=END_TSO_QUEUE;
759 tso!=END_TSO_QUEUE;
760 prev=tso, tso=tso->link) {
761 ASSERT((prev!=END_TSO_QUEUE || tso==run_queue_hds[proc]) &&
762 (prev==END_TSO_QUEUE || prev->link==tso));
763 if (check_TSO_too)
764 checkTSO(tso);
765 }
766 ASSERT(prev==run_queue_tls[proc]);
767 }
768
769 rtsBool
770 checkThreadQsSanity (rtsBool check_TSO_too)
771 {
772 PEs p;
773
774 for (p=0; p<RtsFlags.GranFlags.proc; p++)
775 checkThreadQSanity(p, check_TSO_too);
776 }
777 #endif /* GRAN */
778
779 /*
780 Check that all TSOs have been evacuated.
781 Optionally also check the sanity of the TSOs.
782 */
783 void
784 checkGlobalTSOList (rtsBool checkTSOs)
785 {
786 StgTSO *tso;
787 nat s;
788
789 for (s = 0; s < total_steps; s++) {
790 for (tso=all_steps[s].threads; tso != END_TSO_QUEUE;
791 tso = tso->global_link) {
792 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso));
793 ASSERT(get_itbl(tso)->type == TSO);
794 if (checkTSOs)
795 checkTSO(tso);
796 }
797 }
798 }
799
800 /* -----------------------------------------------------------------------------
801 Check mutable list sanity.
802 -------------------------------------------------------------------------- */
803
804 void
805 checkMutableList( bdescr *mut_bd, nat gen )
806 {
807 bdescr *bd;
808 StgPtr q;
809 StgClosure *p;
810
811 for (bd = mut_bd; bd != NULL; bd = bd->link) {
812 for (q = bd->start; q < bd->free; q++) {
813 p = (StgClosure *)*q;
814 ASSERT(!HEAP_ALLOCED(p) || Bdescr((P_)p)->gen_no == gen);
815 }
816 }
817 }
818
819 /*
820 Check the static objects list.
821 */
822 void
823 checkStaticObjects ( StgClosure* static_objects )
824 {
825 StgClosure *p = static_objects;
826 StgInfoTable *info;
827
828 while (p != END_OF_STATIC_LIST) {
829 checkClosure(p);
830 info = get_itbl(p);
831 switch (info->type) {
832 case IND_STATIC:
833 {
834 StgClosure *indirectee = UNTAG_CLOSURE(((StgIndStatic *)p)->indirectee);
835
836 ASSERT(LOOKS_LIKE_CLOSURE_PTR(indirectee));
837 ASSERT(LOOKS_LIKE_INFO_PTR(indirectee->header.info));
838 p = *IND_STATIC_LINK((StgClosure *)p);
839 break;
840 }
841
842 case THUNK_STATIC:
843 p = *THUNK_STATIC_LINK((StgClosure *)p);
844 break;
845
846 case FUN_STATIC:
847 p = *FUN_STATIC_LINK((StgClosure *)p);
848 break;
849
850 case CONSTR_STATIC:
851 p = *STATIC_LINK(info,(StgClosure *)p);
852 break;
853
854 default:
855 barf("checkStaticObjetcs: strange closure %p (%s)",
856 p, info_type(p));
857 }
858 }
859 }
860
861 /*
862 Check the sanity of a blocking queue starting at bqe with closure being
863 the closure holding the blocking queue.
864 Note that in GUM we can have several different closure types in a
865 blocking queue
866 */
867 #if defined(PAR)
868 void
869 checkBQ (StgBlockingQueueElement *bqe, StgClosure *closure)
870 {
871 rtsBool end = rtsFalse;
872 StgInfoTable *info = get_itbl(closure);
873
874 ASSERT(info->type == MVAR || info->type == FETCH_ME_BQ || info->type == RBH);
875
876 do {
877 switch (get_itbl(bqe)->type) {
878 case BLOCKED_FETCH:
879 case TSO:
880 checkClosure((StgClosure *)bqe);
881 bqe = bqe->link;
882 end = (bqe==END_BQ_QUEUE);
883 break;
884
885 case CONSTR:
886 checkClosure((StgClosure *)bqe);
887 end = rtsTrue;
888 break;
889
890 default:
891 barf("checkBQ: strange closure %d in blocking queue for closure %p (%s)\n",
892 get_itbl(bqe)->type, closure, info_type(closure));
893 }
894 } while (!end);
895 }
896 #elif defined(GRAN)
897 void
898 checkBQ (StgTSO *bqe, StgClosure *closure)
899 {
900 rtsBool end = rtsFalse;
901 StgInfoTable *info = get_itbl(closure);
902
903 ASSERT(info->type == MVAR);
904
905 do {
906 switch (get_itbl(bqe)->type) {
907 case BLOCKED_FETCH:
908 case TSO:
909 checkClosure((StgClosure *)bqe);
910 bqe = bqe->link;
911 end = (bqe==END_BQ_QUEUE);
912 break;
913
914 default:
915 barf("checkBQ: strange closure %d in blocking queue for closure %p (%s)\n",
916 get_itbl(bqe)->type, closure, info_type(closure));
917 }
918 } while (!end);
919 }
920 #endif
921
922
923
924 /*
925 This routine checks the sanity of the LAGA and GALA tables. They are
926 implemented as lists through one hash table, LAtoGALAtable, because entries
927 in both tables have the same structure:
928 - the LAGA table maps local addresses to global addresses; it starts
929 with liveIndirections
930 - the GALA table maps global addresses to local addresses; it starts
931 with liveRemoteGAs
932 */
933
934 #if defined(PAR)
935 #include "Hash.h"
936
937 /* hidden in parallel/Global.c; only accessed for testing here */
938 extern GALA *liveIndirections;
939 extern GALA *liveRemoteGAs;
940 extern HashTable *LAtoGALAtable;
941
942 void
943 checkLAGAtable(rtsBool check_closures)
944 {
945 GALA *gala, *gala0;
946 nat n=0, m=0; // debugging
947
948 for (gala = liveIndirections; gala != NULL; gala = gala->next) {
949 n++;
950 gala0 = lookupHashTable(LAtoGALAtable, (StgWord) gala->la);
951 ASSERT(!gala->preferred || gala == gala0);
952 ASSERT(LOOKS_LIKE_INFO_PTR(((StgClosure *)gala->la)->header.info));
953 ASSERT(gala->next!=gala); // detect direct loops
954 if ( check_closures ) {
955 checkClosure((StgClosure *)gala->la);
956 }
957 }
958
959 for (gala = liveRemoteGAs; gala != NULL; gala = gala->next) {
960 m++;
961 gala0 = lookupHashTable(LAtoGALAtable, (StgWord) gala->la);
962 ASSERT(!gala->preferred || gala == gala0);
963 ASSERT(LOOKS_LIKE_INFO_PTR(((StgClosure *)gala->la)->header.info));
964 ASSERT(gala->next!=gala); // detect direct loops
965 /*
966 if ( check_closures ) {
967 checkClosure((StgClosure *)gala->la);
968 }
969 */
970 }
971 }
972 #endif
973
974 #endif /* DEBUG */