Replace "tail -n +2" with "sed 1d", as Solaris doesn't understand the former
[ghc.git] / rts / Sanity.c
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 1998-2006
4 *
5 * Sanity checking code for the heap and stack.
6 *
7 * Used when debugging: check that everything reasonable.
8 *
9 * - All things that are supposed to be pointers look like pointers.
10 *
11 * - Objects in text space are marked as static closures, those
12 * in the heap are dynamic.
13 *
14 * ---------------------------------------------------------------------------*/
15
16 #include "PosixSource.h"
17 #include "Rts.h"
18
19 #ifdef DEBUG /* whole file */
20
21 #include "RtsFlags.h"
22 #include "RtsUtils.h"
23 #include "BlockAlloc.h"
24 #include "Sanity.h"
25 #include "MBlock.h"
26 #include "Storage.h"
27 #include "Schedule.h"
28 #include "Apply.h"
29
30 /* -----------------------------------------------------------------------------
31 Forward decls.
32 -------------------------------------------------------------------------- */
33
34 static void checkSmallBitmap ( StgPtr payload, StgWord bitmap, nat );
35 static void checkLargeBitmap ( StgPtr payload, StgLargeBitmap*, nat );
36 static void checkClosureShallow ( StgClosure * );
37
38 /* -----------------------------------------------------------------------------
39 Check stack sanity
40 -------------------------------------------------------------------------- */
41
42 static void
43 checkSmallBitmap( StgPtr payload, StgWord bitmap, nat size )
44 {
45 StgPtr p;
46 nat i;
47
48 p = payload;
49 for(i = 0; i < size; i++, bitmap >>= 1 ) {
50 if ((bitmap & 1) == 0) {
51 checkClosureShallow((StgClosure *)payload[i]);
52 }
53 }
54 }
55
56 static void
57 checkLargeBitmap( StgPtr payload, StgLargeBitmap* large_bitmap, nat size )
58 {
59 StgWord bmp;
60 nat i, j;
61
62 i = 0;
63 for (bmp=0; i < size; bmp++) {
64 StgWord bitmap = large_bitmap->bitmap[bmp];
65 j = 0;
66 for(; i < size && j < BITS_IN(W_); j++, i++, bitmap >>= 1 ) {
67 if ((bitmap & 1) == 0) {
68 checkClosureShallow((StgClosure *)payload[i]);
69 }
70 }
71 }
72 }
73
74 /*
75 * check that it looks like a valid closure - without checking its payload
76 * used to avoid recursion between checking PAPs and checking stack
77 * chunks.
78 */
79
80 static void
81 checkClosureShallow( StgClosure* p )
82 {
83 StgClosure *q;
84
85 q = UNTAG_CLOSURE(p);
86 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q));
87
88 /* Is it a static closure? */
89 if (!HEAP_ALLOCED(q)) {
90 ASSERT(closure_STATIC(q));
91 } else {
92 ASSERT(!closure_STATIC(q));
93 }
94 }
95
96 // check an individual stack object
97 StgOffset
98 checkStackFrame( StgPtr c )
99 {
100 nat size;
101 const StgRetInfoTable* info;
102
103 info = get_ret_itbl((StgClosure *)c);
104
105 /* All activation records have 'bitmap' style layout info. */
106 switch (info->i.type) {
107 case RET_DYN: /* Dynamic bitmap: the mask is stored on the stack */
108 {
109 StgWord dyn;
110 StgPtr p;
111 StgRetDyn* r;
112
113 r = (StgRetDyn *)c;
114 dyn = r->liveness;
115
116 p = (P_)(r->payload);
117 checkSmallBitmap(p,RET_DYN_LIVENESS(r->liveness),RET_DYN_BITMAP_SIZE);
118 p += RET_DYN_BITMAP_SIZE + RET_DYN_NONPTR_REGS_SIZE;
119
120 // skip over the non-pointers
121 p += RET_DYN_NONPTRS(dyn);
122
123 // follow the ptr words
124 for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
125 checkClosureShallow((StgClosure *)*p);
126 p++;
127 }
128
129 return sizeofW(StgRetDyn) + RET_DYN_BITMAP_SIZE +
130 RET_DYN_NONPTR_REGS_SIZE +
131 RET_DYN_NONPTRS(dyn) + RET_DYN_PTRS(dyn);
132 }
133
134 case UPDATE_FRAME:
135 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgUpdateFrame*)c)->updatee));
136 case ATOMICALLY_FRAME:
137 case CATCH_RETRY_FRAME:
138 case CATCH_STM_FRAME:
139 case CATCH_FRAME:
140 // small bitmap cases (<= 32 entries)
141 case STOP_FRAME:
142 case RET_SMALL:
143 size = BITMAP_SIZE(info->i.layout.bitmap);
144 checkSmallBitmap((StgPtr)c + 1,
145 BITMAP_BITS(info->i.layout.bitmap), size);
146 return 1 + size;
147
148 case RET_BCO: {
149 StgBCO *bco;
150 nat size;
151 bco = (StgBCO *)*(c+1);
152 size = BCO_BITMAP_SIZE(bco);
153 checkLargeBitmap((StgPtr)c + 2, BCO_BITMAP(bco), size);
154 return 2 + size;
155 }
156
157 case RET_BIG: // large bitmap (> 32 entries)
158 size = GET_LARGE_BITMAP(&info->i)->size;
159 checkLargeBitmap((StgPtr)c + 1, GET_LARGE_BITMAP(&info->i), size);
160 return 1 + size;
161
162 case RET_FUN:
163 {
164 StgFunInfoTable *fun_info;
165 StgRetFun *ret_fun;
166
167 ret_fun = (StgRetFun *)c;
168 fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun));
169 size = ret_fun->size;
170 switch (fun_info->f.fun_type) {
171 case ARG_GEN:
172 checkSmallBitmap((StgPtr)ret_fun->payload,
173 BITMAP_BITS(fun_info->f.b.bitmap), size);
174 break;
175 case ARG_GEN_BIG:
176 checkLargeBitmap((StgPtr)ret_fun->payload,
177 GET_FUN_LARGE_BITMAP(fun_info), size);
178 break;
179 default:
180 checkSmallBitmap((StgPtr)ret_fun->payload,
181 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
182 size);
183 break;
184 }
185 return sizeofW(StgRetFun) + size;
186 }
187
188 default:
189 barf("checkStackFrame: weird activation record found on stack (%p %d).",c,info->i.type);
190 }
191 }
192
193 // check sections of stack between update frames
194 void
195 checkStackChunk( StgPtr sp, StgPtr stack_end )
196 {
197 StgPtr p;
198
199 p = sp;
200 while (p < stack_end) {
201 p += checkStackFrame( p );
202 }
203 // ASSERT( p == stack_end ); -- HWL
204 }
205
206 static void
207 checkPAP (StgClosure *fun, StgClosure** payload, StgWord n_args)
208 {
209 StgClosure *p;
210 StgFunInfoTable *fun_info;
211
212 fun = UNTAG_CLOSURE(fun);
213 ASSERT(LOOKS_LIKE_CLOSURE_PTR(fun));
214 fun_info = get_fun_itbl(fun);
215
216 p = (StgClosure *)payload;
217 switch (fun_info->f.fun_type) {
218 case ARG_GEN:
219 checkSmallBitmap( (StgPtr)payload,
220 BITMAP_BITS(fun_info->f.b.bitmap), n_args );
221 break;
222 case ARG_GEN_BIG:
223 checkLargeBitmap( (StgPtr)payload,
224 GET_FUN_LARGE_BITMAP(fun_info),
225 n_args );
226 break;
227 case ARG_BCO:
228 checkLargeBitmap( (StgPtr)payload,
229 BCO_BITMAP(fun),
230 n_args );
231 break;
232 default:
233 checkSmallBitmap( (StgPtr)payload,
234 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
235 n_args );
236 break;
237 }
238 }
239
240
241 StgOffset
242 checkClosure( StgClosure* p )
243 {
244 const StgInfoTable *info;
245
246 ASSERT(LOOKS_LIKE_INFO_PTR(p->header.info));
247
248 p = UNTAG_CLOSURE(p);
249 /* Is it a static closure (i.e. in the data segment)? */
250 if (!HEAP_ALLOCED(p)) {
251 ASSERT(closure_STATIC(p));
252 } else {
253 ASSERT(!closure_STATIC(p));
254 }
255
256 info = get_itbl(p);
257 switch (info->type) {
258
259 case MVAR_CLEAN:
260 case MVAR_DIRTY:
261 {
262 StgMVar *mvar = (StgMVar *)p;
263 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->head));
264 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->tail));
265 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->value));
266 #if 0
267 #if defined(PAR)
268 checkBQ((StgBlockingQueueElement *)mvar->head, p);
269 #else
270 checkBQ(mvar->head, p);
271 #endif
272 #endif
273 return sizeofW(StgMVar);
274 }
275
276 case THUNK:
277 case THUNK_1_0:
278 case THUNK_0_1:
279 case THUNK_1_1:
280 case THUNK_0_2:
281 case THUNK_2_0:
282 {
283 nat i;
284 for (i = 0; i < info->layout.payload.ptrs; i++) {
285 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgThunk *)p)->payload[i]));
286 }
287 return thunk_sizeW_fromITBL(info);
288 }
289
290 case FUN:
291 case FUN_1_0:
292 case FUN_0_1:
293 case FUN_1_1:
294 case FUN_0_2:
295 case FUN_2_0:
296 case CONSTR:
297 case CONSTR_1_0:
298 case CONSTR_0_1:
299 case CONSTR_1_1:
300 case CONSTR_0_2:
301 case CONSTR_2_0:
302 case IND_PERM:
303 case IND_OLDGEN:
304 case IND_OLDGEN_PERM:
305 #ifdef TICKY_TICKY
306 case SE_BLACKHOLE:
307 case SE_CAF_BLACKHOLE:
308 #endif
309 case BLACKHOLE:
310 case CAF_BLACKHOLE:
311 case STABLE_NAME:
312 case MUT_VAR_CLEAN:
313 case MUT_VAR_DIRTY:
314 case CONSTR_STATIC:
315 case CONSTR_NOCAF_STATIC:
316 case THUNK_STATIC:
317 case FUN_STATIC:
318 {
319 nat i;
320 for (i = 0; i < info->layout.payload.ptrs; i++) {
321 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p->payload[i]));
322 }
323 return sizeW_fromITBL(info);
324 }
325
326 case BCO: {
327 StgBCO *bco = (StgBCO *)p;
328 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->instrs));
329 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->literals));
330 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->ptrs));
331 return bco_sizeW(bco);
332 }
333
334 case IND_STATIC: /* (1, 0) closure */
335 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgIndStatic*)p)->indirectee));
336 return sizeW_fromITBL(info);
337
338 case WEAK:
339 /* deal with these specially - the info table isn't
340 * representative of the actual layout.
341 */
342 { StgWeak *w = (StgWeak *)p;
343 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->key));
344 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->value));
345 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->finalizer));
346 if (w->link) {
347 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->link));
348 }
349 return sizeW_fromITBL(info);
350 }
351
352 case THUNK_SELECTOR:
353 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgSelector *)p)->selectee));
354 return THUNK_SELECTOR_sizeW();
355
356 case IND:
357 {
358 /* we don't expect to see any of these after GC
359 * but they might appear during execution
360 */
361 StgInd *ind = (StgInd *)p;
362 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ind->indirectee));
363 return sizeofW(StgInd);
364 }
365
366 case RET_BCO:
367 case RET_SMALL:
368 case RET_BIG:
369 case RET_DYN:
370 case UPDATE_FRAME:
371 case STOP_FRAME:
372 case CATCH_FRAME:
373 case ATOMICALLY_FRAME:
374 case CATCH_RETRY_FRAME:
375 case CATCH_STM_FRAME:
376 barf("checkClosure: stack frame");
377
378 case AP:
379 {
380 StgAP* ap = (StgAP *)p;
381 checkPAP (ap->fun, ap->payload, ap->n_args);
382 return ap_sizeW(ap);
383 }
384
385 case PAP:
386 {
387 StgPAP* pap = (StgPAP *)p;
388 checkPAP (pap->fun, pap->payload, pap->n_args);
389 return pap_sizeW(pap);
390 }
391
392 case AP_STACK:
393 {
394 StgAP_STACK *ap = (StgAP_STACK *)p;
395 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ap->fun));
396 checkStackChunk((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
397 return ap_stack_sizeW(ap);
398 }
399
400 case ARR_WORDS:
401 return arr_words_sizeW((StgArrWords *)p);
402
403 case MUT_ARR_PTRS_CLEAN:
404 case MUT_ARR_PTRS_DIRTY:
405 case MUT_ARR_PTRS_FROZEN:
406 case MUT_ARR_PTRS_FROZEN0:
407 {
408 StgMutArrPtrs* a = (StgMutArrPtrs *)p;
409 nat i;
410 for (i = 0; i < a->ptrs; i++) {
411 ASSERT(LOOKS_LIKE_CLOSURE_PTR(a->payload[i]));
412 }
413 return mut_arr_ptrs_sizeW(a);
414 }
415
416 case TSO:
417 checkTSO((StgTSO *)p);
418 return tso_sizeW((StgTSO *)p);
419
420 #if defined(PAR)
421
422 case BLOCKED_FETCH:
423 ASSERT(LOOKS_LIKE_GA(&(((StgBlockedFetch *)p)->ga)));
424 ASSERT(LOOKS_LIKE_CLOSURE_PTR((((StgBlockedFetch *)p)->node)));
425 return sizeofW(StgBlockedFetch); // see size used in evacuate()
426
427 #ifdef DIST
428 case REMOTE_REF:
429 return sizeofW(StgFetchMe);
430 #endif /*DIST */
431
432 case FETCH_ME:
433 ASSERT(LOOKS_LIKE_GA(((StgFetchMe *)p)->ga));
434 return sizeofW(StgFetchMe); // see size used in evacuate()
435
436 case FETCH_ME_BQ:
437 checkBQ(((StgFetchMeBlockingQueue *)p)->blocking_queue, (StgClosure *)p);
438 return sizeofW(StgFetchMeBlockingQueue); // see size used in evacuate()
439
440 case RBH:
441 /* In an RBH the BQ may be empty (ie END_BQ_QUEUE) but not NULL */
442 ASSERT(((StgRBH *)p)->blocking_queue!=NULL);
443 if (((StgRBH *)p)->blocking_queue!=END_BQ_QUEUE)
444 checkBQ(((StgRBH *)p)->blocking_queue, p);
445 ASSERT(LOOKS_LIKE_INFO_PTR(REVERT_INFOPTR(get_itbl((StgClosure *)p))));
446 return BLACKHOLE_sizeW(); // see size used in evacuate()
447 // sizeW_fromITBL(REVERT_INFOPTR(get_itbl((StgClosure *)p)));
448
449 #endif
450
451 case TVAR_WATCH_QUEUE:
452 {
453 StgTVarWatchQueue *wq = (StgTVarWatchQueue *)p;
454 ASSERT(LOOKS_LIKE_CLOSURE_PTR(wq->next_queue_entry));
455 ASSERT(LOOKS_LIKE_CLOSURE_PTR(wq->prev_queue_entry));
456 return sizeofW(StgTVarWatchQueue);
457 }
458
459 case INVARIANT_CHECK_QUEUE:
460 {
461 StgInvariantCheckQueue *q = (StgInvariantCheckQueue *)p;
462 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q->invariant));
463 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q->my_execution));
464 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q->next_queue_entry));
465 return sizeofW(StgInvariantCheckQueue);
466 }
467
468 case ATOMIC_INVARIANT:
469 {
470 StgAtomicInvariant *invariant = (StgAtomicInvariant *)p;
471 ASSERT(LOOKS_LIKE_CLOSURE_PTR(invariant->code));
472 ASSERT(LOOKS_LIKE_CLOSURE_PTR(invariant->last_execution));
473 return sizeofW(StgAtomicInvariant);
474 }
475
476 case TVAR:
477 {
478 StgTVar *tv = (StgTVar *)p;
479 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tv->current_value));
480 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tv->first_watch_queue_entry));
481 return sizeofW(StgTVar);
482 }
483
484 case TREC_CHUNK:
485 {
486 nat i;
487 StgTRecChunk *tc = (StgTRecChunk *)p;
488 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->prev_chunk));
489 for (i = 0; i < tc -> next_entry_idx; i ++) {
490 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].tvar));
491 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].expected_value));
492 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].new_value));
493 }
494 return sizeofW(StgTRecChunk);
495 }
496
497 case TREC_HEADER:
498 {
499 StgTRecHeader *trec = (StgTRecHeader *)p;
500 ASSERT(LOOKS_LIKE_CLOSURE_PTR(trec -> enclosing_trec));
501 ASSERT(LOOKS_LIKE_CLOSURE_PTR(trec -> current_chunk));
502 return sizeofW(StgTRecHeader);
503 }
504
505
506 case EVACUATED:
507 barf("checkClosure: found EVACUATED closure %d",
508 info->type);
509 default:
510 barf("checkClosure (closure type %d)", info->type);
511 }
512 }
513
514 #if defined(PAR)
515
516 #define PVM_PE_MASK 0xfffc0000
517 #define MAX_PVM_PES MAX_PES
518 #define MAX_PVM_TIDS MAX_PES
519 #define MAX_SLOTS 100000
520
521 rtsBool
522 looks_like_tid(StgInt tid)
523 {
524 StgInt hi = (tid & PVM_PE_MASK) >> 18;
525 StgInt lo = (tid & ~PVM_PE_MASK);
526 rtsBool ok = (hi != 0) && (lo < MAX_PVM_TIDS) && (hi < MAX_PVM_TIDS);
527 return ok;
528 }
529
530 rtsBool
531 looks_like_slot(StgInt slot)
532 {
533 /* if tid is known better use looks_like_ga!! */
534 rtsBool ok = slot<MAX_SLOTS;
535 // This refers only to the no. of slots on the current PE
536 // rtsBool ok = slot<=highest_slot();
537 return ok;
538 }
539
540 rtsBool
541 looks_like_ga(globalAddr *ga)
542 {
543 rtsBool is_tid = looks_like_tid((ga)->payload.gc.gtid);
544 rtsBool is_slot = ((ga)->payload.gc.gtid==mytid) ?
545 (ga)->payload.gc.slot<=highest_slot() :
546 (ga)->payload.gc.slot<MAX_SLOTS;
547 rtsBool ok = is_tid && is_slot;
548 return ok;
549 }
550
551 #endif
552
553
554 /* -----------------------------------------------------------------------------
555 Check Heap Sanity
556
557 After garbage collection, the live heap is in a state where we can
558 run through and check that all the pointers point to the right
559 place. This function starts at a given position and sanity-checks
560 all the objects in the remainder of the chain.
561 -------------------------------------------------------------------------- */
562
563 void
564 checkHeap(bdescr *bd)
565 {
566 StgPtr p;
567
568 #if defined(THREADED_RTS)
569 // heap sanity checking doesn't work with SMP, because we can't
570 // zero the slop (see Updates.h).
571 return;
572 #endif
573
574 for (; bd != NULL; bd = bd->link) {
575 p = bd->start;
576 while (p < bd->free) {
577 nat size = checkClosure((StgClosure *)p);
578 /* This is the smallest size of closure that can live in the heap */
579 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
580 p += size;
581
582 /* skip over slop */
583 while (p < bd->free &&
584 (*p < 0x1000 || !LOOKS_LIKE_INFO_PTR((void*)*p))) { p++; }
585 }
586 }
587 }
588
589 #if defined(PAR)
590 /*
591 Check heap between start and end. Used after unpacking graphs.
592 */
593 void
594 checkHeapChunk(StgPtr start, StgPtr end)
595 {
596 extern globalAddr *LAGAlookup(StgClosure *addr);
597 StgPtr p;
598 nat size;
599
600 for (p=start; p<end; p+=size) {
601 ASSERT(LOOKS_LIKE_INFO_PTR((void*)*p));
602 if (get_itbl((StgClosure*)p)->type == FETCH_ME &&
603 *(p+1) == 0x0000eeee /* ie. unpack garbage (see SetGAandCommonUp) */) {
604 /* if it's a FM created during unpack and commoned up, it's not global */
605 ASSERT(LAGAlookup((StgClosure*)p)==NULL);
606 size = sizeofW(StgFetchMe);
607 } else if (get_itbl((StgClosure*)p)->type == IND) {
608 *(p+2) = 0x0000ee11; /* mark slop in IND as garbage */
609 size = sizeofW(StgInd);
610 } else {
611 size = checkClosure((StgClosure *)p);
612 /* This is the smallest size of closure that can live in the heap. */
613 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
614 }
615 }
616 }
617 #else /* !PAR */
618 void
619 checkHeapChunk(StgPtr start, StgPtr end)
620 {
621 StgPtr p;
622 nat size;
623
624 for (p=start; p<end; p+=size) {
625 ASSERT(LOOKS_LIKE_INFO_PTR((void*)*p));
626 size = checkClosure((StgClosure *)p);
627 /* This is the smallest size of closure that can live in the heap. */
628 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
629 }
630 }
631 #endif
632
633 void
634 checkChain(bdescr *bd)
635 {
636 while (bd != NULL) {
637 checkClosure((StgClosure *)bd->start);
638 bd = bd->link;
639 }
640 }
641
642 void
643 checkTSO(StgTSO *tso)
644 {
645 StgPtr sp = tso->sp;
646 StgPtr stack = tso->stack;
647 StgOffset stack_size = tso->stack_size;
648 StgPtr stack_end = stack + stack_size;
649
650 if (tso->what_next == ThreadRelocated) {
651 checkTSO(tso->link);
652 return;
653 }
654
655 if (tso->what_next == ThreadKilled) {
656 /* The garbage collector doesn't bother following any pointers
657 * from dead threads, so don't check sanity here.
658 */
659 return;
660 }
661
662 ASSERT(stack <= sp && sp < stack_end);
663
664 #if defined(PAR)
665 ASSERT(tso->par.magic==TSO_MAGIC);
666
667 switch (tso->why_blocked) {
668 case BlockedOnGA:
669 checkClosureShallow(tso->block_info.closure);
670 ASSERT(/* Can't be a FETCH_ME because *this* closure is on its BQ */
671 get_itbl(tso->block_info.closure)->type==FETCH_ME_BQ);
672 break;
673 case BlockedOnGA_NoSend:
674 checkClosureShallow(tso->block_info.closure);
675 ASSERT(get_itbl(tso->block_info.closure)->type==FETCH_ME_BQ);
676 break;
677 case BlockedOnBlackHole:
678 checkClosureShallow(tso->block_info.closure);
679 ASSERT(get_itbl(tso->block_info.closure)->type==BLACKHOLE ||
680 get_itbl(tso->block_info.closure)->type==RBH);
681 break;
682 case BlockedOnRead:
683 case BlockedOnWrite:
684 case BlockedOnDelay:
685 #if defined(mingw32_HOST_OS)
686 case BlockedOnDoProc:
687 #endif
688 /* isOnBQ(blocked_queue) */
689 break;
690 case BlockedOnException:
691 /* isOnSomeBQ(tso) */
692 ASSERT(get_itbl(tso->block_info.tso)->type==TSO);
693 break;
694 case BlockedOnMVar:
695 ASSERT(get_itbl(tso->block_info.closure)->type==MVAR);
696 break;
697 case BlockedOnSTM:
698 ASSERT(tso->block_info.closure == END_TSO_QUEUE);
699 break;
700 default:
701 /*
702 Could check other values of why_blocked but I am more
703 lazy than paranoid (bad combination) -- HWL
704 */
705 }
706
707 /* if the link field is non-nil it most point to one of these
708 three closure types */
709 ASSERT(tso->link == END_TSO_QUEUE ||
710 get_itbl(tso->link)->type == TSO ||
711 get_itbl(tso->link)->type == BLOCKED_FETCH ||
712 get_itbl(tso->link)->type == CONSTR);
713 #endif
714
715 checkStackChunk(sp, stack_end);
716 }
717
718 #if defined(GRAN)
719 void
720 checkTSOsSanity(void) {
721 nat i, tsos;
722 StgTSO *tso;
723
724 debugBelch("Checking sanity of all runnable TSOs:");
725
726 for (i=0, tsos=0; i<RtsFlags.GranFlags.proc; i++) {
727 for (tso=run_queue_hds[i]; tso!=END_TSO_QUEUE; tso=tso->link) {
728 debugBelch("TSO %p on PE %d ...", tso, i);
729 checkTSO(tso);
730 debugBelch("OK, ");
731 tsos++;
732 }
733 }
734
735 debugBelch(" checked %d TSOs on %d PEs; ok\n", tsos, RtsFlags.GranFlags.proc);
736 }
737
738
739 // still GRAN only
740
741 rtsBool
742 checkThreadQSanity (PEs proc, rtsBool check_TSO_too)
743 {
744 StgTSO *tso, *prev;
745
746 /* the NIL value for TSOs is END_TSO_QUEUE; thus, finding NULL is an error */
747 ASSERT(run_queue_hds[proc]!=NULL);
748 ASSERT(run_queue_tls[proc]!=NULL);
749 /* if either head or tail is NIL then the other one must be NIL, too */
750 ASSERT(run_queue_hds[proc]!=END_TSO_QUEUE || run_queue_tls[proc]==END_TSO_QUEUE);
751 ASSERT(run_queue_tls[proc]!=END_TSO_QUEUE || run_queue_hds[proc]==END_TSO_QUEUE);
752 for (tso=run_queue_hds[proc], prev=END_TSO_QUEUE;
753 tso!=END_TSO_QUEUE;
754 prev=tso, tso=tso->link) {
755 ASSERT((prev!=END_TSO_QUEUE || tso==run_queue_hds[proc]) &&
756 (prev==END_TSO_QUEUE || prev->link==tso));
757 if (check_TSO_too)
758 checkTSO(tso);
759 }
760 ASSERT(prev==run_queue_tls[proc]);
761 }
762
763 rtsBool
764 checkThreadQsSanity (rtsBool check_TSO_too)
765 {
766 PEs p;
767
768 for (p=0; p<RtsFlags.GranFlags.proc; p++)
769 checkThreadQSanity(p, check_TSO_too);
770 }
771 #endif /* GRAN */
772
773 /*
774 Check that all TSOs have been evacuated.
775 Optionally also check the sanity of the TSOs.
776 */
777 void
778 checkGlobalTSOList (rtsBool checkTSOs)
779 {
780 extern StgTSO *all_threads;
781 StgTSO *tso;
782 for (tso=all_threads; tso != END_TSO_QUEUE; tso = tso->global_link) {
783 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso));
784 ASSERT(get_itbl(tso)->type == TSO);
785 if (checkTSOs)
786 checkTSO(tso);
787 }
788 }
789
790 /* -----------------------------------------------------------------------------
791 Check mutable list sanity.
792 -------------------------------------------------------------------------- */
793
794 void
795 checkMutableList( bdescr *mut_bd, nat gen )
796 {
797 bdescr *bd;
798 StgPtr q;
799 StgClosure *p;
800
801 for (bd = mut_bd; bd != NULL; bd = bd->link) {
802 for (q = bd->start; q < bd->free; q++) {
803 p = (StgClosure *)*q;
804 ASSERT(!HEAP_ALLOCED(p) || Bdescr((P_)p)->gen_no == gen);
805 }
806 }
807 }
808
809 /*
810 Check the static objects list.
811 */
812 void
813 checkStaticObjects ( StgClosure* static_objects )
814 {
815 StgClosure *p = static_objects;
816 StgInfoTable *info;
817
818 while (p != END_OF_STATIC_LIST) {
819 checkClosure(p);
820 info = get_itbl(p);
821 switch (info->type) {
822 case IND_STATIC:
823 {
824 StgClosure *indirectee = UNTAG_CLOSURE(((StgIndStatic *)p)->indirectee);
825
826 ASSERT(LOOKS_LIKE_CLOSURE_PTR(indirectee));
827 ASSERT(LOOKS_LIKE_INFO_PTR(indirectee->header.info));
828 p = *IND_STATIC_LINK((StgClosure *)p);
829 break;
830 }
831
832 case THUNK_STATIC:
833 p = *THUNK_STATIC_LINK((StgClosure *)p);
834 break;
835
836 case FUN_STATIC:
837 p = *FUN_STATIC_LINK((StgClosure *)p);
838 break;
839
840 case CONSTR_STATIC:
841 p = *STATIC_LINK(info,(StgClosure *)p);
842 break;
843
844 default:
845 barf("checkStaticObjetcs: strange closure %p (%s)",
846 p, info_type(p));
847 }
848 }
849 }
850
851 /*
852 Check the sanity of a blocking queue starting at bqe with closure being
853 the closure holding the blocking queue.
854 Note that in GUM we can have several different closure types in a
855 blocking queue
856 */
857 #if defined(PAR)
858 void
859 checkBQ (StgBlockingQueueElement *bqe, StgClosure *closure)
860 {
861 rtsBool end = rtsFalse;
862 StgInfoTable *info = get_itbl(closure);
863
864 ASSERT(info->type == MVAR || info->type == FETCH_ME_BQ || info->type == RBH);
865
866 do {
867 switch (get_itbl(bqe)->type) {
868 case BLOCKED_FETCH:
869 case TSO:
870 checkClosure((StgClosure *)bqe);
871 bqe = bqe->link;
872 end = (bqe==END_BQ_QUEUE);
873 break;
874
875 case CONSTR:
876 checkClosure((StgClosure *)bqe);
877 end = rtsTrue;
878 break;
879
880 default:
881 barf("checkBQ: strange closure %d in blocking queue for closure %p (%s)\n",
882 get_itbl(bqe)->type, closure, info_type(closure));
883 }
884 } while (!end);
885 }
886 #elif defined(GRAN)
887 void
888 checkBQ (StgTSO *bqe, StgClosure *closure)
889 {
890 rtsBool end = rtsFalse;
891 StgInfoTable *info = get_itbl(closure);
892
893 ASSERT(info->type == MVAR);
894
895 do {
896 switch (get_itbl(bqe)->type) {
897 case BLOCKED_FETCH:
898 case TSO:
899 checkClosure((StgClosure *)bqe);
900 bqe = bqe->link;
901 end = (bqe==END_BQ_QUEUE);
902 break;
903
904 default:
905 barf("checkBQ: strange closure %d in blocking queue for closure %p (%s)\n",
906 get_itbl(bqe)->type, closure, info_type(closure));
907 }
908 } while (!end);
909 }
910 #endif
911
912
913
914 /*
915 This routine checks the sanity of the LAGA and GALA tables. They are
916 implemented as lists through one hash table, LAtoGALAtable, because entries
917 in both tables have the same structure:
918 - the LAGA table maps local addresses to global addresses; it starts
919 with liveIndirections
920 - the GALA table maps global addresses to local addresses; it starts
921 with liveRemoteGAs
922 */
923
924 #if defined(PAR)
925 #include "Hash.h"
926
927 /* hidden in parallel/Global.c; only accessed for testing here */
928 extern GALA *liveIndirections;
929 extern GALA *liveRemoteGAs;
930 extern HashTable *LAtoGALAtable;
931
932 void
933 checkLAGAtable(rtsBool check_closures)
934 {
935 GALA *gala, *gala0;
936 nat n=0, m=0; // debugging
937
938 for (gala = liveIndirections; gala != NULL; gala = gala->next) {
939 n++;
940 gala0 = lookupHashTable(LAtoGALAtable, (StgWord) gala->la);
941 ASSERT(!gala->preferred || gala == gala0);
942 ASSERT(LOOKS_LIKE_INFO_PTR(((StgClosure *)gala->la)->header.info));
943 ASSERT(gala->next!=gala); // detect direct loops
944 if ( check_closures ) {
945 checkClosure((StgClosure *)gala->la);
946 }
947 }
948
949 for (gala = liveRemoteGAs; gala != NULL; gala = gala->next) {
950 m++;
951 gala0 = lookupHashTable(LAtoGALAtable, (StgWord) gala->la);
952 ASSERT(!gala->preferred || gala == gala0);
953 ASSERT(LOOKS_LIKE_INFO_PTR(((StgClosure *)gala->la)->header.info));
954 ASSERT(gala->next!=gala); // detect direct loops
955 /*
956 if ( check_closures ) {
957 checkClosure((StgClosure *)gala->la);
958 }
959 */
960 }
961 }
962 #endif
963
964 #endif /* DEBUG */