Fix the +RTS -V0 option introduced recently; it didn't work at all, now it does.
[ghc.git] / rts / Sanity.c
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 1998-2006
4 *
5 * Sanity checking code for the heap and stack.
6 *
7 * Used when debugging: check that everything reasonable.
8 *
9 * - All things that are supposed to be pointers look like pointers.
10 *
11 * - Objects in text space are marked as static closures, those
12 * in the heap are dynamic.
13 *
14 * ---------------------------------------------------------------------------*/
15
16 #include "PosixSource.h"
17 #include "Rts.h"
18
19 #ifdef DEBUG /* whole file */
20
21 #include "RtsFlags.h"
22 #include "RtsUtils.h"
23 #include "BlockAlloc.h"
24 #include "Sanity.h"
25 #include "MBlock.h"
26 #include "Storage.h"
27 #include "Schedule.h"
28 #include "Apply.h"
29
30 /* -----------------------------------------------------------------------------
31 Forward decls.
32 -------------------------------------------------------------------------- */
33
34 static void checkSmallBitmap ( StgPtr payload, StgWord bitmap, nat );
35 static void checkLargeBitmap ( StgPtr payload, StgLargeBitmap*, nat );
36 static void checkClosureShallow ( StgClosure * );
37
38 /* -----------------------------------------------------------------------------
39 Check stack sanity
40 -------------------------------------------------------------------------- */
41
42 static void
43 checkSmallBitmap( StgPtr payload, StgWord bitmap, nat size )
44 {
45 StgPtr p;
46 nat i;
47
48 p = payload;
49 for(i = 0; i < size; i++, bitmap >>= 1 ) {
50 if ((bitmap & 1) == 0) {
51 checkClosureShallow((StgClosure *)payload[i]);
52 }
53 }
54 }
55
56 static void
57 checkLargeBitmap( StgPtr payload, StgLargeBitmap* large_bitmap, nat size )
58 {
59 StgWord bmp;
60 nat i, j;
61
62 i = 0;
63 for (bmp=0; i < size; bmp++) {
64 StgWord bitmap = large_bitmap->bitmap[bmp];
65 j = 0;
66 for(; i < size && j < BITS_IN(W_); j++, i++, bitmap >>= 1 ) {
67 if ((bitmap & 1) == 0) {
68 checkClosureShallow((StgClosure *)payload[i]);
69 }
70 }
71 }
72 }
73
74 /*
75 * check that it looks like a valid closure - without checking its payload
76 * used to avoid recursion between checking PAPs and checking stack
77 * chunks.
78 */
79
80 static void
81 checkClosureShallow( StgClosure* p )
82 {
83 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
84
85 /* Is it a static closure? */
86 if (!HEAP_ALLOCED(p)) {
87 ASSERT(closure_STATIC(p));
88 } else {
89 ASSERT(!closure_STATIC(p));
90 }
91 }
92
93 // check an individual stack object
94 StgOffset
95 checkStackFrame( StgPtr c )
96 {
97 nat size;
98 const StgRetInfoTable* info;
99
100 info = get_ret_itbl((StgClosure *)c);
101
102 /* All activation records have 'bitmap' style layout info. */
103 switch (info->i.type) {
104 case RET_DYN: /* Dynamic bitmap: the mask is stored on the stack */
105 {
106 StgWord dyn;
107 StgPtr p;
108 StgRetDyn* r;
109
110 r = (StgRetDyn *)c;
111 dyn = r->liveness;
112
113 p = (P_)(r->payload);
114 checkSmallBitmap(p,RET_DYN_LIVENESS(r->liveness),RET_DYN_BITMAP_SIZE);
115 p += RET_DYN_BITMAP_SIZE + RET_DYN_NONPTR_REGS_SIZE;
116
117 // skip over the non-pointers
118 p += RET_DYN_NONPTRS(dyn);
119
120 // follow the ptr words
121 for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
122 checkClosureShallow((StgClosure *)*p);
123 p++;
124 }
125
126 return sizeofW(StgRetDyn) + RET_DYN_BITMAP_SIZE +
127 RET_DYN_NONPTR_REGS_SIZE +
128 RET_DYN_NONPTRS(dyn) + RET_DYN_PTRS(dyn);
129 }
130
131 case UPDATE_FRAME:
132 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgUpdateFrame*)c)->updatee));
133 case ATOMICALLY_FRAME:
134 case CATCH_RETRY_FRAME:
135 case CATCH_STM_FRAME:
136 case CATCH_FRAME:
137 // small bitmap cases (<= 32 entries)
138 case STOP_FRAME:
139 case RET_SMALL:
140 size = BITMAP_SIZE(info->i.layout.bitmap);
141 checkSmallBitmap((StgPtr)c + 1,
142 BITMAP_BITS(info->i.layout.bitmap), size);
143 return 1 + size;
144
145 case RET_BCO: {
146 StgBCO *bco;
147 nat size;
148 bco = (StgBCO *)*(c+1);
149 size = BCO_BITMAP_SIZE(bco);
150 checkLargeBitmap((StgPtr)c + 2, BCO_BITMAP(bco), size);
151 return 2 + size;
152 }
153
154 case RET_BIG: // large bitmap (> 32 entries)
155 size = GET_LARGE_BITMAP(&info->i)->size;
156 checkLargeBitmap((StgPtr)c + 1, GET_LARGE_BITMAP(&info->i), size);
157 return 1 + size;
158
159 case RET_FUN:
160 {
161 StgFunInfoTable *fun_info;
162 StgRetFun *ret_fun;
163
164 ret_fun = (StgRetFun *)c;
165 fun_info = get_fun_itbl(ret_fun->fun);
166 size = ret_fun->size;
167 switch (fun_info->f.fun_type) {
168 case ARG_GEN:
169 checkSmallBitmap((StgPtr)ret_fun->payload,
170 BITMAP_BITS(fun_info->f.b.bitmap), size);
171 break;
172 case ARG_GEN_BIG:
173 checkLargeBitmap((StgPtr)ret_fun->payload,
174 GET_FUN_LARGE_BITMAP(fun_info), size);
175 break;
176 default:
177 checkSmallBitmap((StgPtr)ret_fun->payload,
178 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
179 size);
180 break;
181 }
182 return sizeofW(StgRetFun) + size;
183 }
184
185 default:
186 barf("checkStackFrame: weird activation record found on stack (%p %d).",c,info->i.type);
187 }
188 }
189
190 // check sections of stack between update frames
191 void
192 checkStackChunk( StgPtr sp, StgPtr stack_end )
193 {
194 StgPtr p;
195
196 p = sp;
197 while (p < stack_end) {
198 p += checkStackFrame( p );
199 }
200 // ASSERT( p == stack_end ); -- HWL
201 }
202
203 static void
204 checkPAP (StgClosure *fun, StgClosure** payload, StgWord n_args)
205 {
206 StgClosure *p;
207 StgFunInfoTable *fun_info;
208
209 ASSERT(LOOKS_LIKE_CLOSURE_PTR(fun));
210 fun_info = get_fun_itbl(fun);
211
212 p = (StgClosure *)payload;
213 switch (fun_info->f.fun_type) {
214 case ARG_GEN:
215 checkSmallBitmap( (StgPtr)payload,
216 BITMAP_BITS(fun_info->f.b.bitmap), n_args );
217 break;
218 case ARG_GEN_BIG:
219 checkLargeBitmap( (StgPtr)payload,
220 GET_FUN_LARGE_BITMAP(fun_info),
221 n_args );
222 break;
223 case ARG_BCO:
224 checkLargeBitmap( (StgPtr)payload,
225 BCO_BITMAP(fun),
226 n_args );
227 break;
228 default:
229 checkSmallBitmap( (StgPtr)payload,
230 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
231 n_args );
232 break;
233 }
234 }
235
236
237 StgOffset
238 checkClosure( StgClosure* p )
239 {
240 const StgInfoTable *info;
241
242 ASSERT(LOOKS_LIKE_INFO_PTR(p->header.info));
243
244 /* Is it a static closure (i.e. in the data segment)? */
245 if (!HEAP_ALLOCED(p)) {
246 ASSERT(closure_STATIC(p));
247 } else {
248 ASSERT(!closure_STATIC(p));
249 }
250
251 info = get_itbl(p);
252 switch (info->type) {
253
254 case MVAR:
255 {
256 StgMVar *mvar = (StgMVar *)p;
257 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->head));
258 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->tail));
259 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->value));
260 #if 0
261 #if defined(PAR)
262 checkBQ((StgBlockingQueueElement *)mvar->head, p);
263 #else
264 checkBQ(mvar->head, p);
265 #endif
266 #endif
267 return sizeofW(StgMVar);
268 }
269
270 case THUNK:
271 case THUNK_1_0:
272 case THUNK_0_1:
273 case THUNK_1_1:
274 case THUNK_0_2:
275 case THUNK_2_0:
276 {
277 nat i;
278 for (i = 0; i < info->layout.payload.ptrs; i++) {
279 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgThunk *)p)->payload[i]));
280 }
281 return thunk_sizeW_fromITBL(info);
282 }
283
284 case FUN:
285 case FUN_1_0:
286 case FUN_0_1:
287 case FUN_1_1:
288 case FUN_0_2:
289 case FUN_2_0:
290 case CONSTR:
291 case CONSTR_1_0:
292 case CONSTR_0_1:
293 case CONSTR_1_1:
294 case CONSTR_0_2:
295 case CONSTR_2_0:
296 case IND_PERM:
297 case IND_OLDGEN:
298 case IND_OLDGEN_PERM:
299 #ifdef TICKY_TICKY
300 case SE_BLACKHOLE:
301 case SE_CAF_BLACKHOLE:
302 #endif
303 case BLACKHOLE:
304 case CAF_BLACKHOLE:
305 case STABLE_NAME:
306 case MUT_VAR_CLEAN:
307 case MUT_VAR_DIRTY:
308 case CONSTR_STATIC:
309 case CONSTR_NOCAF_STATIC:
310 case THUNK_STATIC:
311 case FUN_STATIC:
312 {
313 nat i;
314 for (i = 0; i < info->layout.payload.ptrs; i++) {
315 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p->payload[i]));
316 }
317 return sizeW_fromITBL(info);
318 }
319
320 case BCO: {
321 StgBCO *bco = (StgBCO *)p;
322 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->instrs));
323 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->literals));
324 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->ptrs));
325 return bco_sizeW(bco);
326 }
327
328 case IND_STATIC: /* (1, 0) closure */
329 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgIndStatic*)p)->indirectee));
330 return sizeW_fromITBL(info);
331
332 case WEAK:
333 /* deal with these specially - the info table isn't
334 * representative of the actual layout.
335 */
336 { StgWeak *w = (StgWeak *)p;
337 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->key));
338 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->value));
339 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->finalizer));
340 if (w->link) {
341 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->link));
342 }
343 return sizeW_fromITBL(info);
344 }
345
346 case THUNK_SELECTOR:
347 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgSelector *)p)->selectee));
348 return THUNK_SELECTOR_sizeW();
349
350 case IND:
351 {
352 /* we don't expect to see any of these after GC
353 * but they might appear during execution
354 */
355 StgInd *ind = (StgInd *)p;
356 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ind->indirectee));
357 return sizeofW(StgInd);
358 }
359
360 case RET_BCO:
361 case RET_SMALL:
362 case RET_BIG:
363 case RET_DYN:
364 case UPDATE_FRAME:
365 case STOP_FRAME:
366 case CATCH_FRAME:
367 case ATOMICALLY_FRAME:
368 case CATCH_RETRY_FRAME:
369 case CATCH_STM_FRAME:
370 barf("checkClosure: stack frame");
371
372 case AP:
373 {
374 StgAP* ap = (StgAP *)p;
375 checkPAP (ap->fun, ap->payload, ap->n_args);
376 return ap_sizeW(ap);
377 }
378
379 case PAP:
380 {
381 StgPAP* pap = (StgPAP *)p;
382 checkPAP (pap->fun, pap->payload, pap->n_args);
383 return pap_sizeW(pap);
384 }
385
386 case AP_STACK:
387 {
388 StgAP_STACK *ap = (StgAP_STACK *)p;
389 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ap->fun));
390 checkStackChunk((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
391 return ap_stack_sizeW(ap);
392 }
393
394 case ARR_WORDS:
395 return arr_words_sizeW((StgArrWords *)p);
396
397 case MUT_ARR_PTRS_CLEAN:
398 case MUT_ARR_PTRS_DIRTY:
399 case MUT_ARR_PTRS_FROZEN:
400 case MUT_ARR_PTRS_FROZEN0:
401 {
402 StgMutArrPtrs* a = (StgMutArrPtrs *)p;
403 nat i;
404 for (i = 0; i < a->ptrs; i++) {
405 ASSERT(LOOKS_LIKE_CLOSURE_PTR(a->payload[i]));
406 }
407 return mut_arr_ptrs_sizeW(a);
408 }
409
410 case TSO:
411 checkTSO((StgTSO *)p);
412 return tso_sizeW((StgTSO *)p);
413
414 #if defined(PAR)
415
416 case BLOCKED_FETCH:
417 ASSERT(LOOKS_LIKE_GA(&(((StgBlockedFetch *)p)->ga)));
418 ASSERT(LOOKS_LIKE_CLOSURE_PTR((((StgBlockedFetch *)p)->node)));
419 return sizeofW(StgBlockedFetch); // see size used in evacuate()
420
421 #ifdef DIST
422 case REMOTE_REF:
423 return sizeofW(StgFetchMe);
424 #endif /*DIST */
425
426 case FETCH_ME:
427 ASSERT(LOOKS_LIKE_GA(((StgFetchMe *)p)->ga));
428 return sizeofW(StgFetchMe); // see size used in evacuate()
429
430 case FETCH_ME_BQ:
431 checkBQ(((StgFetchMeBlockingQueue *)p)->blocking_queue, (StgClosure *)p);
432 return sizeofW(StgFetchMeBlockingQueue); // see size used in evacuate()
433
434 case RBH:
435 /* In an RBH the BQ may be empty (ie END_BQ_QUEUE) but not NULL */
436 ASSERT(((StgRBH *)p)->blocking_queue!=NULL);
437 if (((StgRBH *)p)->blocking_queue!=END_BQ_QUEUE)
438 checkBQ(((StgRBH *)p)->blocking_queue, p);
439 ASSERT(LOOKS_LIKE_INFO_PTR(REVERT_INFOPTR(get_itbl((StgClosure *)p))));
440 return BLACKHOLE_sizeW(); // see size used in evacuate()
441 // sizeW_fromITBL(REVERT_INFOPTR(get_itbl((StgClosure *)p)));
442
443 #endif
444
445 case TVAR_WATCH_QUEUE:
446 {
447 StgTVarWatchQueue *wq = (StgTVarWatchQueue *)p;
448 ASSERT(LOOKS_LIKE_CLOSURE_PTR(wq->next_queue_entry));
449 ASSERT(LOOKS_LIKE_CLOSURE_PTR(wq->prev_queue_entry));
450 return sizeofW(StgTVarWatchQueue);
451 }
452
453 case INVARIANT_CHECK_QUEUE:
454 {
455 StgInvariantCheckQueue *q = (StgInvariantCheckQueue *)p;
456 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q->invariant));
457 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q->my_execution));
458 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q->next_queue_entry));
459 return sizeofW(StgInvariantCheckQueue);
460 }
461
462 case ATOMIC_INVARIANT:
463 {
464 StgAtomicInvariant *invariant = (StgAtomicInvariant *)p;
465 ASSERT(LOOKS_LIKE_CLOSURE_PTR(invariant->code));
466 ASSERT(LOOKS_LIKE_CLOSURE_PTR(invariant->last_execution));
467 return sizeofW(StgAtomicInvariant);
468 }
469
470 case TVAR:
471 {
472 StgTVar *tv = (StgTVar *)p;
473 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tv->current_value));
474 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tv->first_watch_queue_entry));
475 return sizeofW(StgTVar);
476 }
477
478 case TREC_CHUNK:
479 {
480 nat i;
481 StgTRecChunk *tc = (StgTRecChunk *)p;
482 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->prev_chunk));
483 for (i = 0; i < tc -> next_entry_idx; i ++) {
484 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].tvar));
485 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].expected_value));
486 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].new_value));
487 }
488 return sizeofW(StgTRecChunk);
489 }
490
491 case TREC_HEADER:
492 {
493 StgTRecHeader *trec = (StgTRecHeader *)p;
494 ASSERT(LOOKS_LIKE_CLOSURE_PTR(trec -> enclosing_trec));
495 ASSERT(LOOKS_LIKE_CLOSURE_PTR(trec -> current_chunk));
496 return sizeofW(StgTRecHeader);
497 }
498
499
500 case EVACUATED:
501 barf("checkClosure: found EVACUATED closure %d",
502 info->type);
503 default:
504 barf("checkClosure (closure type %d)", info->type);
505 }
506 }
507
508 #if defined(PAR)
509
510 #define PVM_PE_MASK 0xfffc0000
511 #define MAX_PVM_PES MAX_PES
512 #define MAX_PVM_TIDS MAX_PES
513 #define MAX_SLOTS 100000
514
515 rtsBool
516 looks_like_tid(StgInt tid)
517 {
518 StgInt hi = (tid & PVM_PE_MASK) >> 18;
519 StgInt lo = (tid & ~PVM_PE_MASK);
520 rtsBool ok = (hi != 0) && (lo < MAX_PVM_TIDS) && (hi < MAX_PVM_TIDS);
521 return ok;
522 }
523
524 rtsBool
525 looks_like_slot(StgInt slot)
526 {
527 /* if tid is known better use looks_like_ga!! */
528 rtsBool ok = slot<MAX_SLOTS;
529 // This refers only to the no. of slots on the current PE
530 // rtsBool ok = slot<=highest_slot();
531 return ok;
532 }
533
534 rtsBool
535 looks_like_ga(globalAddr *ga)
536 {
537 rtsBool is_tid = looks_like_tid((ga)->payload.gc.gtid);
538 rtsBool is_slot = ((ga)->payload.gc.gtid==mytid) ?
539 (ga)->payload.gc.slot<=highest_slot() :
540 (ga)->payload.gc.slot<MAX_SLOTS;
541 rtsBool ok = is_tid && is_slot;
542 return ok;
543 }
544
545 #endif
546
547
548 /* -----------------------------------------------------------------------------
549 Check Heap Sanity
550
551 After garbage collection, the live heap is in a state where we can
552 run through and check that all the pointers point to the right
553 place. This function starts at a given position and sanity-checks
554 all the objects in the remainder of the chain.
555 -------------------------------------------------------------------------- */
556
557 void
558 checkHeap(bdescr *bd)
559 {
560 StgPtr p;
561
562 #if defined(THREADED_RTS)
563 // heap sanity checking doesn't work with SMP, because we can't
564 // zero the slop (see Updates.h).
565 return;
566 #endif
567
568 for (; bd != NULL; bd = bd->link) {
569 p = bd->start;
570 while (p < bd->free) {
571 nat size = checkClosure((StgClosure *)p);
572 /* This is the smallest size of closure that can live in the heap */
573 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
574 p += size;
575
576 /* skip over slop */
577 while (p < bd->free &&
578 (*p < 0x1000 || !LOOKS_LIKE_INFO_PTR((void*)*p))) { p++; }
579 }
580 }
581 }
582
583 #if defined(PAR)
584 /*
585 Check heap between start and end. Used after unpacking graphs.
586 */
587 void
588 checkHeapChunk(StgPtr start, StgPtr end)
589 {
590 extern globalAddr *LAGAlookup(StgClosure *addr);
591 StgPtr p;
592 nat size;
593
594 for (p=start; p<end; p+=size) {
595 ASSERT(LOOKS_LIKE_INFO_PTR((void*)*p));
596 if (get_itbl((StgClosure*)p)->type == FETCH_ME &&
597 *(p+1) == 0x0000eeee /* ie. unpack garbage (see SetGAandCommonUp) */) {
598 /* if it's a FM created during unpack and commoned up, it's not global */
599 ASSERT(LAGAlookup((StgClosure*)p)==NULL);
600 size = sizeofW(StgFetchMe);
601 } else if (get_itbl((StgClosure*)p)->type == IND) {
602 *(p+2) = 0x0000ee11; /* mark slop in IND as garbage */
603 size = sizeofW(StgInd);
604 } else {
605 size = checkClosure((StgClosure *)p);
606 /* This is the smallest size of closure that can live in the heap. */
607 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
608 }
609 }
610 }
611 #else /* !PAR */
612 void
613 checkHeapChunk(StgPtr start, StgPtr end)
614 {
615 StgPtr p;
616 nat size;
617
618 for (p=start; p<end; p+=size) {
619 ASSERT(LOOKS_LIKE_INFO_PTR((void*)*p));
620 size = checkClosure((StgClosure *)p);
621 /* This is the smallest size of closure that can live in the heap. */
622 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
623 }
624 }
625 #endif
626
627 void
628 checkChain(bdescr *bd)
629 {
630 while (bd != NULL) {
631 checkClosure((StgClosure *)bd->start);
632 bd = bd->link;
633 }
634 }
635
636 void
637 checkTSO(StgTSO *tso)
638 {
639 StgPtr sp = tso->sp;
640 StgPtr stack = tso->stack;
641 StgOffset stack_size = tso->stack_size;
642 StgPtr stack_end = stack + stack_size;
643
644 if (tso->what_next == ThreadRelocated) {
645 checkTSO(tso->link);
646 return;
647 }
648
649 if (tso->what_next == ThreadKilled) {
650 /* The garbage collector doesn't bother following any pointers
651 * from dead threads, so don't check sanity here.
652 */
653 return;
654 }
655
656 ASSERT(stack <= sp && sp < stack_end);
657
658 #if defined(PAR)
659 ASSERT(tso->par.magic==TSO_MAGIC);
660
661 switch (tso->why_blocked) {
662 case BlockedOnGA:
663 checkClosureShallow(tso->block_info.closure);
664 ASSERT(/* Can't be a FETCH_ME because *this* closure is on its BQ */
665 get_itbl(tso->block_info.closure)->type==FETCH_ME_BQ);
666 break;
667 case BlockedOnGA_NoSend:
668 checkClosureShallow(tso->block_info.closure);
669 ASSERT(get_itbl(tso->block_info.closure)->type==FETCH_ME_BQ);
670 break;
671 case BlockedOnBlackHole:
672 checkClosureShallow(tso->block_info.closure);
673 ASSERT(get_itbl(tso->block_info.closure)->type==BLACKHOLE ||
674 get_itbl(tso->block_info.closure)->type==RBH);
675 break;
676 case BlockedOnRead:
677 case BlockedOnWrite:
678 case BlockedOnDelay:
679 #if defined(mingw32_HOST_OS)
680 case BlockedOnDoProc:
681 #endif
682 /* isOnBQ(blocked_queue) */
683 break;
684 case BlockedOnException:
685 /* isOnSomeBQ(tso) */
686 ASSERT(get_itbl(tso->block_info.tso)->type==TSO);
687 break;
688 case BlockedOnMVar:
689 ASSERT(get_itbl(tso->block_info.closure)->type==MVAR);
690 break;
691 case BlockedOnSTM:
692 ASSERT(tso->block_info.closure == END_TSO_QUEUE);
693 break;
694 default:
695 /*
696 Could check other values of why_blocked but I am more
697 lazy than paranoid (bad combination) -- HWL
698 */
699 }
700
701 /* if the link field is non-nil it most point to one of these
702 three closure types */
703 ASSERT(tso->link == END_TSO_QUEUE ||
704 get_itbl(tso->link)->type == TSO ||
705 get_itbl(tso->link)->type == BLOCKED_FETCH ||
706 get_itbl(tso->link)->type == CONSTR);
707 #endif
708
709 checkStackChunk(sp, stack_end);
710 }
711
712 #if defined(GRAN)
713 void
714 checkTSOsSanity(void) {
715 nat i, tsos;
716 StgTSO *tso;
717
718 debugBelch("Checking sanity of all runnable TSOs:");
719
720 for (i=0, tsos=0; i<RtsFlags.GranFlags.proc; i++) {
721 for (tso=run_queue_hds[i]; tso!=END_TSO_QUEUE; tso=tso->link) {
722 debugBelch("TSO %p on PE %d ...", tso, i);
723 checkTSO(tso);
724 debugBelch("OK, ");
725 tsos++;
726 }
727 }
728
729 debugBelch(" checked %d TSOs on %d PEs; ok\n", tsos, RtsFlags.GranFlags.proc);
730 }
731
732
733 // still GRAN only
734
735 rtsBool
736 checkThreadQSanity (PEs proc, rtsBool check_TSO_too)
737 {
738 StgTSO *tso, *prev;
739
740 /* the NIL value for TSOs is END_TSO_QUEUE; thus, finding NULL is an error */
741 ASSERT(run_queue_hds[proc]!=NULL);
742 ASSERT(run_queue_tls[proc]!=NULL);
743 /* if either head or tail is NIL then the other one must be NIL, too */
744 ASSERT(run_queue_hds[proc]!=END_TSO_QUEUE || run_queue_tls[proc]==END_TSO_QUEUE);
745 ASSERT(run_queue_tls[proc]!=END_TSO_QUEUE || run_queue_hds[proc]==END_TSO_QUEUE);
746 for (tso=run_queue_hds[proc], prev=END_TSO_QUEUE;
747 tso!=END_TSO_QUEUE;
748 prev=tso, tso=tso->link) {
749 ASSERT((prev!=END_TSO_QUEUE || tso==run_queue_hds[proc]) &&
750 (prev==END_TSO_QUEUE || prev->link==tso));
751 if (check_TSO_too)
752 checkTSO(tso);
753 }
754 ASSERT(prev==run_queue_tls[proc]);
755 }
756
757 rtsBool
758 checkThreadQsSanity (rtsBool check_TSO_too)
759 {
760 PEs p;
761
762 for (p=0; p<RtsFlags.GranFlags.proc; p++)
763 checkThreadQSanity(p, check_TSO_too);
764 }
765 #endif /* GRAN */
766
767 /*
768 Check that all TSOs have been evacuated.
769 Optionally also check the sanity of the TSOs.
770 */
771 void
772 checkGlobalTSOList (rtsBool checkTSOs)
773 {
774 extern StgTSO *all_threads;
775 StgTSO *tso;
776 for (tso=all_threads; tso != END_TSO_QUEUE; tso = tso->global_link) {
777 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso));
778 ASSERT(get_itbl(tso)->type == TSO);
779 if (checkTSOs)
780 checkTSO(tso);
781 }
782 }
783
784 /* -----------------------------------------------------------------------------
785 Check mutable list sanity.
786 -------------------------------------------------------------------------- */
787
788 void
789 checkMutableList( bdescr *mut_bd, nat gen )
790 {
791 bdescr *bd;
792 StgPtr q;
793 StgClosure *p;
794
795 for (bd = mut_bd; bd != NULL; bd = bd->link) {
796 for (q = bd->start; q < bd->free; q++) {
797 p = (StgClosure *)*q;
798 ASSERT(!HEAP_ALLOCED(p) || Bdescr((P_)p)->gen_no == gen);
799 }
800 }
801 }
802
803 /*
804 Check the static objects list.
805 */
806 void
807 checkStaticObjects ( StgClosure* static_objects )
808 {
809 StgClosure *p = static_objects;
810 StgInfoTable *info;
811
812 while (p != END_OF_STATIC_LIST) {
813 checkClosure(p);
814 info = get_itbl(p);
815 switch (info->type) {
816 case IND_STATIC:
817 {
818 StgClosure *indirectee = ((StgIndStatic *)p)->indirectee;
819
820 ASSERT(LOOKS_LIKE_CLOSURE_PTR(indirectee));
821 ASSERT(LOOKS_LIKE_INFO_PTR(indirectee->header.info));
822 p = *IND_STATIC_LINK((StgClosure *)p);
823 break;
824 }
825
826 case THUNK_STATIC:
827 p = *THUNK_STATIC_LINK((StgClosure *)p);
828 break;
829
830 case FUN_STATIC:
831 p = *FUN_STATIC_LINK((StgClosure *)p);
832 break;
833
834 case CONSTR_STATIC:
835 p = *STATIC_LINK(info,(StgClosure *)p);
836 break;
837
838 default:
839 barf("checkStaticObjetcs: strange closure %p (%s)",
840 p, info_type(p));
841 }
842 }
843 }
844
845 /*
846 Check the sanity of a blocking queue starting at bqe with closure being
847 the closure holding the blocking queue.
848 Note that in GUM we can have several different closure types in a
849 blocking queue
850 */
851 #if defined(PAR)
852 void
853 checkBQ (StgBlockingQueueElement *bqe, StgClosure *closure)
854 {
855 rtsBool end = rtsFalse;
856 StgInfoTable *info = get_itbl(closure);
857
858 ASSERT(info->type == MVAR || info->type == FETCH_ME_BQ || info->type == RBH);
859
860 do {
861 switch (get_itbl(bqe)->type) {
862 case BLOCKED_FETCH:
863 case TSO:
864 checkClosure((StgClosure *)bqe);
865 bqe = bqe->link;
866 end = (bqe==END_BQ_QUEUE);
867 break;
868
869 case CONSTR:
870 checkClosure((StgClosure *)bqe);
871 end = rtsTrue;
872 break;
873
874 default:
875 barf("checkBQ: strange closure %d in blocking queue for closure %p (%s)\n",
876 get_itbl(bqe)->type, closure, info_type(closure));
877 }
878 } while (!end);
879 }
880 #elif defined(GRAN)
881 void
882 checkBQ (StgTSO *bqe, StgClosure *closure)
883 {
884 rtsBool end = rtsFalse;
885 StgInfoTable *info = get_itbl(closure);
886
887 ASSERT(info->type == MVAR);
888
889 do {
890 switch (get_itbl(bqe)->type) {
891 case BLOCKED_FETCH:
892 case TSO:
893 checkClosure((StgClosure *)bqe);
894 bqe = bqe->link;
895 end = (bqe==END_BQ_QUEUE);
896 break;
897
898 default:
899 barf("checkBQ: strange closure %d in blocking queue for closure %p (%s)\n",
900 get_itbl(bqe)->type, closure, info_type(closure));
901 }
902 } while (!end);
903 }
904 #endif
905
906
907
908 /*
909 This routine checks the sanity of the LAGA and GALA tables. They are
910 implemented as lists through one hash table, LAtoGALAtable, because entries
911 in both tables have the same structure:
912 - the LAGA table maps local addresses to global addresses; it starts
913 with liveIndirections
914 - the GALA table maps global addresses to local addresses; it starts
915 with liveRemoteGAs
916 */
917
918 #if defined(PAR)
919 #include "Hash.h"
920
921 /* hidden in parallel/Global.c; only accessed for testing here */
922 extern GALA *liveIndirections;
923 extern GALA *liveRemoteGAs;
924 extern HashTable *LAtoGALAtable;
925
926 void
927 checkLAGAtable(rtsBool check_closures)
928 {
929 GALA *gala, *gala0;
930 nat n=0, m=0; // debugging
931
932 for (gala = liveIndirections; gala != NULL; gala = gala->next) {
933 n++;
934 gala0 = lookupHashTable(LAtoGALAtable, (StgWord) gala->la);
935 ASSERT(!gala->preferred || gala == gala0);
936 ASSERT(LOOKS_LIKE_INFO_PTR(((StgClosure *)gala->la)->header.info));
937 ASSERT(gala->next!=gala); // detect direct loops
938 if ( check_closures ) {
939 checkClosure((StgClosure *)gala->la);
940 }
941 }
942
943 for (gala = liveRemoteGAs; gala != NULL; gala = gala->next) {
944 m++;
945 gala0 = lookupHashTable(LAtoGALAtable, (StgWord) gala->la);
946 ASSERT(!gala->preferred || gala == gala0);
947 ASSERT(LOOKS_LIKE_INFO_PTR(((StgClosure *)gala->la)->header.info));
948 ASSERT(gala->next!=gala); // detect direct loops
949 /*
950 if ( check_closures ) {
951 checkClosure((StgClosure *)gala->la);
952 }
953 */
954 }
955 }
956 #endif
957
958 #endif /* DEBUG */