Windows: give a better error message when running out of memory
[ghc.git] / rts / Sanity.c
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 1998-2006
4 *
5 * Sanity checking code for the heap and stack.
6 *
7 * Used when debugging: check that everything reasonable.
8 *
9 * - All things that are supposed to be pointers look like pointers.
10 *
11 * - Objects in text space are marked as static closures, those
12 * in the heap are dynamic.
13 *
14 * ---------------------------------------------------------------------------*/
15
16 #include "PosixSource.h"
17 #include "Rts.h"
18
19 #ifdef DEBUG /* whole file */
20
21 #include "RtsFlags.h"
22 #include "RtsUtils.h"
23 #include "BlockAlloc.h"
24 #include "Sanity.h"
25 #include "MBlock.h"
26 #include "Storage.h"
27 #include "Schedule.h"
28 #include "Apply.h"
29
30 /* -----------------------------------------------------------------------------
31 Forward decls.
32 -------------------------------------------------------------------------- */
33
34 static void checkSmallBitmap ( StgPtr payload, StgWord bitmap, nat );
35 static void checkLargeBitmap ( StgPtr payload, StgLargeBitmap*, nat );
36 static void checkClosureShallow ( StgClosure * );
37
38 /* -----------------------------------------------------------------------------
39 Check stack sanity
40 -------------------------------------------------------------------------- */
41
42 static void
43 checkSmallBitmap( StgPtr payload, StgWord bitmap, nat size )
44 {
45 StgPtr p;
46 nat i;
47
48 p = payload;
49 for(i = 0; i < size; i++, bitmap >>= 1 ) {
50 if ((bitmap & 1) == 0) {
51 checkClosureShallow((StgClosure *)payload[i]);
52 }
53 }
54 }
55
56 static void
57 checkLargeBitmap( StgPtr payload, StgLargeBitmap* large_bitmap, nat size )
58 {
59 StgWord bmp;
60 nat i, j;
61
62 i = 0;
63 for (bmp=0; i < size; bmp++) {
64 StgWord bitmap = large_bitmap->bitmap[bmp];
65 j = 0;
66 for(; i < size && j < BITS_IN(W_); j++, i++, bitmap >>= 1 ) {
67 if ((bitmap & 1) == 0) {
68 checkClosureShallow((StgClosure *)payload[i]);
69 }
70 }
71 }
72 }
73
74 /*
75 * check that it looks like a valid closure - without checking its payload
76 * used to avoid recursion between checking PAPs and checking stack
77 * chunks.
78 */
79
80 static void
81 checkClosureShallow( StgClosure* p )
82 {
83 StgClosure *q;
84
85 q = UNTAG_CLOSURE(p);
86 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q));
87
88 /* Is it a static closure? */
89 if (!HEAP_ALLOCED(q)) {
90 ASSERT(closure_STATIC(q));
91 } else {
92 ASSERT(!closure_STATIC(q));
93 }
94 }
95
96 // check an individual stack object
97 StgOffset
98 checkStackFrame( StgPtr c )
99 {
100 nat size;
101 const StgRetInfoTable* info;
102
103 info = get_ret_itbl((StgClosure *)c);
104
105 /* All activation records have 'bitmap' style layout info. */
106 switch (info->i.type) {
107 case RET_DYN: /* Dynamic bitmap: the mask is stored on the stack */
108 {
109 StgWord dyn;
110 StgPtr p;
111 StgRetDyn* r;
112
113 r = (StgRetDyn *)c;
114 dyn = r->liveness;
115
116 p = (P_)(r->payload);
117 checkSmallBitmap(p,RET_DYN_LIVENESS(r->liveness),RET_DYN_BITMAP_SIZE);
118 p += RET_DYN_BITMAP_SIZE + RET_DYN_NONPTR_REGS_SIZE;
119
120 // skip over the non-pointers
121 p += RET_DYN_NONPTRS(dyn);
122
123 // follow the ptr words
124 for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
125 checkClosureShallow((StgClosure *)*p);
126 p++;
127 }
128
129 return sizeofW(StgRetDyn) + RET_DYN_BITMAP_SIZE +
130 RET_DYN_NONPTR_REGS_SIZE +
131 RET_DYN_NONPTRS(dyn) + RET_DYN_PTRS(dyn);
132 }
133
134 case UPDATE_FRAME:
135 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgUpdateFrame*)c)->updatee));
136 case ATOMICALLY_FRAME:
137 case CATCH_RETRY_FRAME:
138 case CATCH_STM_FRAME:
139 case CATCH_FRAME:
140 // small bitmap cases (<= 32 entries)
141 case STOP_FRAME:
142 case RET_SMALL:
143 size = BITMAP_SIZE(info->i.layout.bitmap);
144 checkSmallBitmap((StgPtr)c + 1,
145 BITMAP_BITS(info->i.layout.bitmap), size);
146 return 1 + size;
147
148 case RET_BCO: {
149 StgBCO *bco;
150 nat size;
151 bco = (StgBCO *)*(c+1);
152 size = BCO_BITMAP_SIZE(bco);
153 checkLargeBitmap((StgPtr)c + 2, BCO_BITMAP(bco), size);
154 return 2 + size;
155 }
156
157 case RET_BIG: // large bitmap (> 32 entries)
158 size = GET_LARGE_BITMAP(&info->i)->size;
159 checkLargeBitmap((StgPtr)c + 1, GET_LARGE_BITMAP(&info->i), size);
160 return 1 + size;
161
162 case RET_FUN:
163 {
164 StgFunInfoTable *fun_info;
165 StgRetFun *ret_fun;
166
167 ret_fun = (StgRetFun *)c;
168 fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun));
169 size = ret_fun->size;
170 switch (fun_info->f.fun_type) {
171 case ARG_GEN:
172 checkSmallBitmap((StgPtr)ret_fun->payload,
173 BITMAP_BITS(fun_info->f.b.bitmap), size);
174 break;
175 case ARG_GEN_BIG:
176 checkLargeBitmap((StgPtr)ret_fun->payload,
177 GET_FUN_LARGE_BITMAP(fun_info), size);
178 break;
179 default:
180 checkSmallBitmap((StgPtr)ret_fun->payload,
181 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
182 size);
183 break;
184 }
185 return sizeofW(StgRetFun) + size;
186 }
187
188 default:
189 barf("checkStackFrame: weird activation record found on stack (%p %d).",c,info->i.type);
190 }
191 }
192
193 // check sections of stack between update frames
194 void
195 checkStackChunk( StgPtr sp, StgPtr stack_end )
196 {
197 StgPtr p;
198
199 p = sp;
200 while (p < stack_end) {
201 p += checkStackFrame( p );
202 }
203 // ASSERT( p == stack_end ); -- HWL
204 }
205
206 static void
207 checkPAP (StgClosure *fun, StgClosure** payload, StgWord n_args)
208 {
209 StgClosure *p;
210 StgFunInfoTable *fun_info;
211
212 fun = UNTAG_CLOSURE(fun);
213 ASSERT(LOOKS_LIKE_CLOSURE_PTR(fun));
214 fun_info = get_fun_itbl(fun);
215
216 p = (StgClosure *)payload;
217 switch (fun_info->f.fun_type) {
218 case ARG_GEN:
219 checkSmallBitmap( (StgPtr)payload,
220 BITMAP_BITS(fun_info->f.b.bitmap), n_args );
221 break;
222 case ARG_GEN_BIG:
223 checkLargeBitmap( (StgPtr)payload,
224 GET_FUN_LARGE_BITMAP(fun_info),
225 n_args );
226 break;
227 case ARG_BCO:
228 checkLargeBitmap( (StgPtr)payload,
229 BCO_BITMAP(fun),
230 n_args );
231 break;
232 default:
233 checkSmallBitmap( (StgPtr)payload,
234 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
235 n_args );
236 break;
237 }
238 }
239
240
241 StgOffset
242 checkClosure( StgClosure* p )
243 {
244 const StgInfoTable *info;
245
246 ASSERT(LOOKS_LIKE_INFO_PTR(p->header.info));
247
248 p = UNTAG_CLOSURE(p);
249 /* Is it a static closure (i.e. in the data segment)? */
250 if (!HEAP_ALLOCED(p)) {
251 ASSERT(closure_STATIC(p));
252 } else {
253 ASSERT(!closure_STATIC(p));
254 }
255
256 info = get_itbl(p);
257 switch (info->type) {
258
259 case MVAR:
260 {
261 StgMVar *mvar = (StgMVar *)p;
262 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->head));
263 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->tail));
264 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->value));
265 #if 0
266 #if defined(PAR)
267 checkBQ((StgBlockingQueueElement *)mvar->head, p);
268 #else
269 checkBQ(mvar->head, p);
270 #endif
271 #endif
272 return sizeofW(StgMVar);
273 }
274
275 case THUNK:
276 case THUNK_1_0:
277 case THUNK_0_1:
278 case THUNK_1_1:
279 case THUNK_0_2:
280 case THUNK_2_0:
281 {
282 nat i;
283 for (i = 0; i < info->layout.payload.ptrs; i++) {
284 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgThunk *)p)->payload[i]));
285 }
286 return thunk_sizeW_fromITBL(info);
287 }
288
289 case FUN:
290 case FUN_1_0:
291 case FUN_0_1:
292 case FUN_1_1:
293 case FUN_0_2:
294 case FUN_2_0:
295 case CONSTR:
296 case CONSTR_1_0:
297 case CONSTR_0_1:
298 case CONSTR_1_1:
299 case CONSTR_0_2:
300 case CONSTR_2_0:
301 case IND_PERM:
302 case IND_OLDGEN:
303 case IND_OLDGEN_PERM:
304 #ifdef TICKY_TICKY
305 case SE_BLACKHOLE:
306 case SE_CAF_BLACKHOLE:
307 #endif
308 case BLACKHOLE:
309 case CAF_BLACKHOLE:
310 case STABLE_NAME:
311 case MUT_VAR_CLEAN:
312 case MUT_VAR_DIRTY:
313 case CONSTR_STATIC:
314 case CONSTR_NOCAF_STATIC:
315 case THUNK_STATIC:
316 case FUN_STATIC:
317 {
318 nat i;
319 for (i = 0; i < info->layout.payload.ptrs; i++) {
320 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p->payload[i]));
321 }
322 return sizeW_fromITBL(info);
323 }
324
325 case BCO: {
326 StgBCO *bco = (StgBCO *)p;
327 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->instrs));
328 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->literals));
329 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->ptrs));
330 return bco_sizeW(bco);
331 }
332
333 case IND_STATIC: /* (1, 0) closure */
334 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgIndStatic*)p)->indirectee));
335 return sizeW_fromITBL(info);
336
337 case WEAK:
338 /* deal with these specially - the info table isn't
339 * representative of the actual layout.
340 */
341 { StgWeak *w = (StgWeak *)p;
342 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->key));
343 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->value));
344 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->finalizer));
345 if (w->link) {
346 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->link));
347 }
348 return sizeW_fromITBL(info);
349 }
350
351 case THUNK_SELECTOR:
352 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgSelector *)p)->selectee));
353 return THUNK_SELECTOR_sizeW();
354
355 case IND:
356 {
357 /* we don't expect to see any of these after GC
358 * but they might appear during execution
359 */
360 StgInd *ind = (StgInd *)p;
361 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ind->indirectee));
362 return sizeofW(StgInd);
363 }
364
365 case RET_BCO:
366 case RET_SMALL:
367 case RET_BIG:
368 case RET_DYN:
369 case UPDATE_FRAME:
370 case STOP_FRAME:
371 case CATCH_FRAME:
372 case ATOMICALLY_FRAME:
373 case CATCH_RETRY_FRAME:
374 case CATCH_STM_FRAME:
375 barf("checkClosure: stack frame");
376
377 case AP:
378 {
379 StgAP* ap = (StgAP *)p;
380 checkPAP (ap->fun, ap->payload, ap->n_args);
381 return ap_sizeW(ap);
382 }
383
384 case PAP:
385 {
386 StgPAP* pap = (StgPAP *)p;
387 checkPAP (pap->fun, pap->payload, pap->n_args);
388 return pap_sizeW(pap);
389 }
390
391 case AP_STACK:
392 {
393 StgAP_STACK *ap = (StgAP_STACK *)p;
394 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ap->fun));
395 checkStackChunk((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
396 return ap_stack_sizeW(ap);
397 }
398
399 case ARR_WORDS:
400 return arr_words_sizeW((StgArrWords *)p);
401
402 case MUT_ARR_PTRS_CLEAN:
403 case MUT_ARR_PTRS_DIRTY:
404 case MUT_ARR_PTRS_FROZEN:
405 case MUT_ARR_PTRS_FROZEN0:
406 {
407 StgMutArrPtrs* a = (StgMutArrPtrs *)p;
408 nat i;
409 for (i = 0; i < a->ptrs; i++) {
410 ASSERT(LOOKS_LIKE_CLOSURE_PTR(a->payload[i]));
411 }
412 return mut_arr_ptrs_sizeW(a);
413 }
414
415 case TSO:
416 checkTSO((StgTSO *)p);
417 return tso_sizeW((StgTSO *)p);
418
419 #if defined(PAR)
420
421 case BLOCKED_FETCH:
422 ASSERT(LOOKS_LIKE_GA(&(((StgBlockedFetch *)p)->ga)));
423 ASSERT(LOOKS_LIKE_CLOSURE_PTR((((StgBlockedFetch *)p)->node)));
424 return sizeofW(StgBlockedFetch); // see size used in evacuate()
425
426 #ifdef DIST
427 case REMOTE_REF:
428 return sizeofW(StgFetchMe);
429 #endif /*DIST */
430
431 case FETCH_ME:
432 ASSERT(LOOKS_LIKE_GA(((StgFetchMe *)p)->ga));
433 return sizeofW(StgFetchMe); // see size used in evacuate()
434
435 case FETCH_ME_BQ:
436 checkBQ(((StgFetchMeBlockingQueue *)p)->blocking_queue, (StgClosure *)p);
437 return sizeofW(StgFetchMeBlockingQueue); // see size used in evacuate()
438
439 case RBH:
440 /* In an RBH the BQ may be empty (ie END_BQ_QUEUE) but not NULL */
441 ASSERT(((StgRBH *)p)->blocking_queue!=NULL);
442 if (((StgRBH *)p)->blocking_queue!=END_BQ_QUEUE)
443 checkBQ(((StgRBH *)p)->blocking_queue, p);
444 ASSERT(LOOKS_LIKE_INFO_PTR(REVERT_INFOPTR(get_itbl((StgClosure *)p))));
445 return BLACKHOLE_sizeW(); // see size used in evacuate()
446 // sizeW_fromITBL(REVERT_INFOPTR(get_itbl((StgClosure *)p)));
447
448 #endif
449
450 case TVAR_WATCH_QUEUE:
451 {
452 StgTVarWatchQueue *wq = (StgTVarWatchQueue *)p;
453 ASSERT(LOOKS_LIKE_CLOSURE_PTR(wq->next_queue_entry));
454 ASSERT(LOOKS_LIKE_CLOSURE_PTR(wq->prev_queue_entry));
455 return sizeofW(StgTVarWatchQueue);
456 }
457
458 case INVARIANT_CHECK_QUEUE:
459 {
460 StgInvariantCheckQueue *q = (StgInvariantCheckQueue *)p;
461 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q->invariant));
462 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q->my_execution));
463 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q->next_queue_entry));
464 return sizeofW(StgInvariantCheckQueue);
465 }
466
467 case ATOMIC_INVARIANT:
468 {
469 StgAtomicInvariant *invariant = (StgAtomicInvariant *)p;
470 ASSERT(LOOKS_LIKE_CLOSURE_PTR(invariant->code));
471 ASSERT(LOOKS_LIKE_CLOSURE_PTR(invariant->last_execution));
472 return sizeofW(StgAtomicInvariant);
473 }
474
475 case TVAR:
476 {
477 StgTVar *tv = (StgTVar *)p;
478 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tv->current_value));
479 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tv->first_watch_queue_entry));
480 return sizeofW(StgTVar);
481 }
482
483 case TREC_CHUNK:
484 {
485 nat i;
486 StgTRecChunk *tc = (StgTRecChunk *)p;
487 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->prev_chunk));
488 for (i = 0; i < tc -> next_entry_idx; i ++) {
489 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].tvar));
490 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].expected_value));
491 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].new_value));
492 }
493 return sizeofW(StgTRecChunk);
494 }
495
496 case TREC_HEADER:
497 {
498 StgTRecHeader *trec = (StgTRecHeader *)p;
499 ASSERT(LOOKS_LIKE_CLOSURE_PTR(trec -> enclosing_trec));
500 ASSERT(LOOKS_LIKE_CLOSURE_PTR(trec -> current_chunk));
501 return sizeofW(StgTRecHeader);
502 }
503
504
505 case EVACUATED:
506 barf("checkClosure: found EVACUATED closure %d",
507 info->type);
508 default:
509 barf("checkClosure (closure type %d)", info->type);
510 }
511 }
512
513 #if defined(PAR)
514
515 #define PVM_PE_MASK 0xfffc0000
516 #define MAX_PVM_PES MAX_PES
517 #define MAX_PVM_TIDS MAX_PES
518 #define MAX_SLOTS 100000
519
520 rtsBool
521 looks_like_tid(StgInt tid)
522 {
523 StgInt hi = (tid & PVM_PE_MASK) >> 18;
524 StgInt lo = (tid & ~PVM_PE_MASK);
525 rtsBool ok = (hi != 0) && (lo < MAX_PVM_TIDS) && (hi < MAX_PVM_TIDS);
526 return ok;
527 }
528
529 rtsBool
530 looks_like_slot(StgInt slot)
531 {
532 /* if tid is known better use looks_like_ga!! */
533 rtsBool ok = slot<MAX_SLOTS;
534 // This refers only to the no. of slots on the current PE
535 // rtsBool ok = slot<=highest_slot();
536 return ok;
537 }
538
539 rtsBool
540 looks_like_ga(globalAddr *ga)
541 {
542 rtsBool is_tid = looks_like_tid((ga)->payload.gc.gtid);
543 rtsBool is_slot = ((ga)->payload.gc.gtid==mytid) ?
544 (ga)->payload.gc.slot<=highest_slot() :
545 (ga)->payload.gc.slot<MAX_SLOTS;
546 rtsBool ok = is_tid && is_slot;
547 return ok;
548 }
549
550 #endif
551
552
553 /* -----------------------------------------------------------------------------
554 Check Heap Sanity
555
556 After garbage collection, the live heap is in a state where we can
557 run through and check that all the pointers point to the right
558 place. This function starts at a given position and sanity-checks
559 all the objects in the remainder of the chain.
560 -------------------------------------------------------------------------- */
561
562 void
563 checkHeap(bdescr *bd)
564 {
565 StgPtr p;
566
567 #if defined(THREADED_RTS)
568 // heap sanity checking doesn't work with SMP, because we can't
569 // zero the slop (see Updates.h).
570 return;
571 #endif
572
573 for (; bd != NULL; bd = bd->link) {
574 p = bd->start;
575 while (p < bd->free) {
576 nat size = checkClosure((StgClosure *)p);
577 /* This is the smallest size of closure that can live in the heap */
578 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
579 p += size;
580
581 /* skip over slop */
582 while (p < bd->free &&
583 (*p < 0x1000 || !LOOKS_LIKE_INFO_PTR((void*)*p))) { p++; }
584 }
585 }
586 }
587
588 #if defined(PAR)
589 /*
590 Check heap between start and end. Used after unpacking graphs.
591 */
592 void
593 checkHeapChunk(StgPtr start, StgPtr end)
594 {
595 extern globalAddr *LAGAlookup(StgClosure *addr);
596 StgPtr p;
597 nat size;
598
599 for (p=start; p<end; p+=size) {
600 ASSERT(LOOKS_LIKE_INFO_PTR((void*)*p));
601 if (get_itbl((StgClosure*)p)->type == FETCH_ME &&
602 *(p+1) == 0x0000eeee /* ie. unpack garbage (see SetGAandCommonUp) */) {
603 /* if it's a FM created during unpack and commoned up, it's not global */
604 ASSERT(LAGAlookup((StgClosure*)p)==NULL);
605 size = sizeofW(StgFetchMe);
606 } else if (get_itbl((StgClosure*)p)->type == IND) {
607 *(p+2) = 0x0000ee11; /* mark slop in IND as garbage */
608 size = sizeofW(StgInd);
609 } else {
610 size = checkClosure((StgClosure *)p);
611 /* This is the smallest size of closure that can live in the heap. */
612 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
613 }
614 }
615 }
616 #else /* !PAR */
617 void
618 checkHeapChunk(StgPtr start, StgPtr end)
619 {
620 StgPtr p;
621 nat size;
622
623 for (p=start; p<end; p+=size) {
624 ASSERT(LOOKS_LIKE_INFO_PTR((void*)*p));
625 size = checkClosure((StgClosure *)p);
626 /* This is the smallest size of closure that can live in the heap. */
627 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
628 }
629 }
630 #endif
631
632 void
633 checkChain(bdescr *bd)
634 {
635 while (bd != NULL) {
636 checkClosure((StgClosure *)bd->start);
637 bd = bd->link;
638 }
639 }
640
641 void
642 checkTSO(StgTSO *tso)
643 {
644 StgPtr sp = tso->sp;
645 StgPtr stack = tso->stack;
646 StgOffset stack_size = tso->stack_size;
647 StgPtr stack_end = stack + stack_size;
648
649 if (tso->what_next == ThreadRelocated) {
650 checkTSO(tso->link);
651 return;
652 }
653
654 if (tso->what_next == ThreadKilled) {
655 /* The garbage collector doesn't bother following any pointers
656 * from dead threads, so don't check sanity here.
657 */
658 return;
659 }
660
661 ASSERT(stack <= sp && sp < stack_end);
662
663 #if defined(PAR)
664 ASSERT(tso->par.magic==TSO_MAGIC);
665
666 switch (tso->why_blocked) {
667 case BlockedOnGA:
668 checkClosureShallow(tso->block_info.closure);
669 ASSERT(/* Can't be a FETCH_ME because *this* closure is on its BQ */
670 get_itbl(tso->block_info.closure)->type==FETCH_ME_BQ);
671 break;
672 case BlockedOnGA_NoSend:
673 checkClosureShallow(tso->block_info.closure);
674 ASSERT(get_itbl(tso->block_info.closure)->type==FETCH_ME_BQ);
675 break;
676 case BlockedOnBlackHole:
677 checkClosureShallow(tso->block_info.closure);
678 ASSERT(get_itbl(tso->block_info.closure)->type==BLACKHOLE ||
679 get_itbl(tso->block_info.closure)->type==RBH);
680 break;
681 case BlockedOnRead:
682 case BlockedOnWrite:
683 case BlockedOnDelay:
684 #if defined(mingw32_HOST_OS)
685 case BlockedOnDoProc:
686 #endif
687 /* isOnBQ(blocked_queue) */
688 break;
689 case BlockedOnException:
690 /* isOnSomeBQ(tso) */
691 ASSERT(get_itbl(tso->block_info.tso)->type==TSO);
692 break;
693 case BlockedOnMVar:
694 ASSERT(get_itbl(tso->block_info.closure)->type==MVAR);
695 break;
696 case BlockedOnSTM:
697 ASSERT(tso->block_info.closure == END_TSO_QUEUE);
698 break;
699 default:
700 /*
701 Could check other values of why_blocked but I am more
702 lazy than paranoid (bad combination) -- HWL
703 */
704 }
705
706 /* if the link field is non-nil it most point to one of these
707 three closure types */
708 ASSERT(tso->link == END_TSO_QUEUE ||
709 get_itbl(tso->link)->type == TSO ||
710 get_itbl(tso->link)->type == BLOCKED_FETCH ||
711 get_itbl(tso->link)->type == CONSTR);
712 #endif
713
714 checkStackChunk(sp, stack_end);
715 }
716
717 #if defined(GRAN)
718 void
719 checkTSOsSanity(void) {
720 nat i, tsos;
721 StgTSO *tso;
722
723 debugBelch("Checking sanity of all runnable TSOs:");
724
725 for (i=0, tsos=0; i<RtsFlags.GranFlags.proc; i++) {
726 for (tso=run_queue_hds[i]; tso!=END_TSO_QUEUE; tso=tso->link) {
727 debugBelch("TSO %p on PE %d ...", tso, i);
728 checkTSO(tso);
729 debugBelch("OK, ");
730 tsos++;
731 }
732 }
733
734 debugBelch(" checked %d TSOs on %d PEs; ok\n", tsos, RtsFlags.GranFlags.proc);
735 }
736
737
738 // still GRAN only
739
740 rtsBool
741 checkThreadQSanity (PEs proc, rtsBool check_TSO_too)
742 {
743 StgTSO *tso, *prev;
744
745 /* the NIL value for TSOs is END_TSO_QUEUE; thus, finding NULL is an error */
746 ASSERT(run_queue_hds[proc]!=NULL);
747 ASSERT(run_queue_tls[proc]!=NULL);
748 /* if either head or tail is NIL then the other one must be NIL, too */
749 ASSERT(run_queue_hds[proc]!=END_TSO_QUEUE || run_queue_tls[proc]==END_TSO_QUEUE);
750 ASSERT(run_queue_tls[proc]!=END_TSO_QUEUE || run_queue_hds[proc]==END_TSO_QUEUE);
751 for (tso=run_queue_hds[proc], prev=END_TSO_QUEUE;
752 tso!=END_TSO_QUEUE;
753 prev=tso, tso=tso->link) {
754 ASSERT((prev!=END_TSO_QUEUE || tso==run_queue_hds[proc]) &&
755 (prev==END_TSO_QUEUE || prev->link==tso));
756 if (check_TSO_too)
757 checkTSO(tso);
758 }
759 ASSERT(prev==run_queue_tls[proc]);
760 }
761
762 rtsBool
763 checkThreadQsSanity (rtsBool check_TSO_too)
764 {
765 PEs p;
766
767 for (p=0; p<RtsFlags.GranFlags.proc; p++)
768 checkThreadQSanity(p, check_TSO_too);
769 }
770 #endif /* GRAN */
771
772 /*
773 Check that all TSOs have been evacuated.
774 Optionally also check the sanity of the TSOs.
775 */
776 void
777 checkGlobalTSOList (rtsBool checkTSOs)
778 {
779 extern StgTSO *all_threads;
780 StgTSO *tso;
781 for (tso=all_threads; tso != END_TSO_QUEUE; tso = tso->global_link) {
782 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso));
783 ASSERT(get_itbl(tso)->type == TSO);
784 if (checkTSOs)
785 checkTSO(tso);
786 }
787 }
788
789 /* -----------------------------------------------------------------------------
790 Check mutable list sanity.
791 -------------------------------------------------------------------------- */
792
793 void
794 checkMutableList( bdescr *mut_bd, nat gen )
795 {
796 bdescr *bd;
797 StgPtr q;
798 StgClosure *p;
799
800 for (bd = mut_bd; bd != NULL; bd = bd->link) {
801 for (q = bd->start; q < bd->free; q++) {
802 p = (StgClosure *)*q;
803 ASSERT(!HEAP_ALLOCED(p) || Bdescr((P_)p)->gen_no == gen);
804 }
805 }
806 }
807
808 /*
809 Check the static objects list.
810 */
811 void
812 checkStaticObjects ( StgClosure* static_objects )
813 {
814 StgClosure *p = static_objects;
815 StgInfoTable *info;
816
817 while (p != END_OF_STATIC_LIST) {
818 checkClosure(p);
819 info = get_itbl(p);
820 switch (info->type) {
821 case IND_STATIC:
822 {
823 StgClosure *indirectee = UNTAG_CLOSURE(((StgIndStatic *)p)->indirectee);
824
825 ASSERT(LOOKS_LIKE_CLOSURE_PTR(indirectee));
826 ASSERT(LOOKS_LIKE_INFO_PTR(indirectee->header.info));
827 p = *IND_STATIC_LINK((StgClosure *)p);
828 break;
829 }
830
831 case THUNK_STATIC:
832 p = *THUNK_STATIC_LINK((StgClosure *)p);
833 break;
834
835 case FUN_STATIC:
836 p = *FUN_STATIC_LINK((StgClosure *)p);
837 break;
838
839 case CONSTR_STATIC:
840 p = *STATIC_LINK(info,(StgClosure *)p);
841 break;
842
843 default:
844 barf("checkStaticObjetcs: strange closure %p (%s)",
845 p, info_type(p));
846 }
847 }
848 }
849
850 /*
851 Check the sanity of a blocking queue starting at bqe with closure being
852 the closure holding the blocking queue.
853 Note that in GUM we can have several different closure types in a
854 blocking queue
855 */
856 #if defined(PAR)
857 void
858 checkBQ (StgBlockingQueueElement *bqe, StgClosure *closure)
859 {
860 rtsBool end = rtsFalse;
861 StgInfoTable *info = get_itbl(closure);
862
863 ASSERT(info->type == MVAR || info->type == FETCH_ME_BQ || info->type == RBH);
864
865 do {
866 switch (get_itbl(bqe)->type) {
867 case BLOCKED_FETCH:
868 case TSO:
869 checkClosure((StgClosure *)bqe);
870 bqe = bqe->link;
871 end = (bqe==END_BQ_QUEUE);
872 break;
873
874 case CONSTR:
875 checkClosure((StgClosure *)bqe);
876 end = rtsTrue;
877 break;
878
879 default:
880 barf("checkBQ: strange closure %d in blocking queue for closure %p (%s)\n",
881 get_itbl(bqe)->type, closure, info_type(closure));
882 }
883 } while (!end);
884 }
885 #elif defined(GRAN)
886 void
887 checkBQ (StgTSO *bqe, StgClosure *closure)
888 {
889 rtsBool end = rtsFalse;
890 StgInfoTable *info = get_itbl(closure);
891
892 ASSERT(info->type == MVAR);
893
894 do {
895 switch (get_itbl(bqe)->type) {
896 case BLOCKED_FETCH:
897 case TSO:
898 checkClosure((StgClosure *)bqe);
899 bqe = bqe->link;
900 end = (bqe==END_BQ_QUEUE);
901 break;
902
903 default:
904 barf("checkBQ: strange closure %d in blocking queue for closure %p (%s)\n",
905 get_itbl(bqe)->type, closure, info_type(closure));
906 }
907 } while (!end);
908 }
909 #endif
910
911
912
913 /*
914 This routine checks the sanity of the LAGA and GALA tables. They are
915 implemented as lists through one hash table, LAtoGALAtable, because entries
916 in both tables have the same structure:
917 - the LAGA table maps local addresses to global addresses; it starts
918 with liveIndirections
919 - the GALA table maps global addresses to local addresses; it starts
920 with liveRemoteGAs
921 */
922
923 #if defined(PAR)
924 #include "Hash.h"
925
926 /* hidden in parallel/Global.c; only accessed for testing here */
927 extern GALA *liveIndirections;
928 extern GALA *liveRemoteGAs;
929 extern HashTable *LAtoGALAtable;
930
931 void
932 checkLAGAtable(rtsBool check_closures)
933 {
934 GALA *gala, *gala0;
935 nat n=0, m=0; // debugging
936
937 for (gala = liveIndirections; gala != NULL; gala = gala->next) {
938 n++;
939 gala0 = lookupHashTable(LAtoGALAtable, (StgWord) gala->la);
940 ASSERT(!gala->preferred || gala == gala0);
941 ASSERT(LOOKS_LIKE_INFO_PTR(((StgClosure *)gala->la)->header.info));
942 ASSERT(gala->next!=gala); // detect direct loops
943 if ( check_closures ) {
944 checkClosure((StgClosure *)gala->la);
945 }
946 }
947
948 for (gala = liveRemoteGAs; gala != NULL; gala = gala->next) {
949 m++;
950 gala0 = lookupHashTable(LAtoGALAtable, (StgWord) gala->la);
951 ASSERT(!gala->preferred || gala == gala0);
952 ASSERT(LOOKS_LIKE_INFO_PTR(((StgClosure *)gala->la)->header.info));
953 ASSERT(gala->next!=gala); // detect direct loops
954 /*
955 if ( check_closures ) {
956 checkClosure((StgClosure *)gala->la);
957 }
958 */
959 }
960 }
961 #endif
962
963 #endif /* DEBUG */