rts: More const correct-ness fixes
[ghc.git] / rts / sm / Sanity.c
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 1998-2006
4 *
5 * Sanity checking code for the heap and stack.
6 *
7 * Used when debugging: check that everything reasonable.
8 *
9 * - All things that are supposed to be pointers look like pointers.
10 *
11 * - Objects in text space are marked as static closures, those
12 * in the heap are dynamic.
13 *
14 * ---------------------------------------------------------------------------*/
15
16 #include "PosixSource.h"
17 #include "Rts.h"
18
19 #ifdef DEBUG /* whole file */
20
21 #include "RtsUtils.h"
22 #include "sm/Storage.h"
23 #include "sm/BlockAlloc.h"
24 #include "GCThread.h"
25 #include "Sanity.h"
26 #include "Schedule.h"
27 #include "Apply.h"
28 #include "Printer.h"
29 #include "Arena.h"
30 #include "RetainerProfile.h"
31
32 /* -----------------------------------------------------------------------------
33 Forward decls.
34 -------------------------------------------------------------------------- */
35
36 static void checkSmallBitmap ( StgPtr payload, StgWord bitmap, uint32_t );
37 static void checkLargeBitmap ( StgPtr payload, StgLargeBitmap*, uint32_t );
38 static void checkClosureShallow ( const StgClosure * );
39 static void checkSTACK (StgStack *stack);
40
41 /* -----------------------------------------------------------------------------
42 Check stack sanity
43 -------------------------------------------------------------------------- */
44
45 static void
46 checkSmallBitmap( StgPtr payload, StgWord bitmap, uint32_t size )
47 {
48 uint32_t i;
49
50 for(i = 0; i < size; i++, bitmap >>= 1 ) {
51 if ((bitmap & 1) == 0) {
52 checkClosureShallow((StgClosure *)payload[i]);
53 }
54 }
55 }
56
57 static void
58 checkLargeBitmap( StgPtr payload, StgLargeBitmap* large_bitmap, uint32_t size )
59 {
60 StgWord bmp;
61 uint32_t i, j;
62
63 i = 0;
64 for (bmp=0; i < size; bmp++) {
65 StgWord bitmap = large_bitmap->bitmap[bmp];
66 j = 0;
67 for(; i < size && j < BITS_IN(W_); j++, i++, bitmap >>= 1 ) {
68 if ((bitmap & 1) == 0) {
69 checkClosureShallow((StgClosure *)payload[i]);
70 }
71 }
72 }
73 }
74
75 /*
76 * check that it looks like a valid closure - without checking its payload
77 * used to avoid recursion between checking PAPs and checking stack
78 * chunks.
79 */
80
81 static void
82 checkClosureShallow( const StgClosure* p )
83 {
84 const StgClosure *q;
85
86 q = UNTAG_CONST_CLOSURE(p);
87 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q));
88
89 /* Is it a static closure? */
90 if (!HEAP_ALLOCED(q)) {
91 ASSERT(closure_STATIC(q));
92 } else {
93 ASSERT(!closure_STATIC(q));
94 }
95 }
96
97 // check an individual stack object
98 StgOffset
99 checkStackFrame( StgPtr c )
100 {
101 uint32_t size;
102 const StgRetInfoTable* info;
103
104 info = get_ret_itbl((StgClosure *)c);
105
106 /* All activation records have 'bitmap' style layout info. */
107 switch (info->i.type) {
108
109 case UPDATE_FRAME:
110 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgUpdateFrame*)c)->updatee));
111 case ATOMICALLY_FRAME:
112 case CATCH_RETRY_FRAME:
113 case CATCH_STM_FRAME:
114 case CATCH_FRAME:
115 // small bitmap cases (<= 32 entries)
116 case UNDERFLOW_FRAME:
117 case STOP_FRAME:
118 case RET_SMALL:
119 size = BITMAP_SIZE(info->i.layout.bitmap);
120 checkSmallBitmap((StgPtr)c + 1,
121 BITMAP_BITS(info->i.layout.bitmap), size);
122 return 1 + size;
123
124 case RET_BCO: {
125 StgBCO *bco;
126 uint32_t size;
127 bco = (StgBCO *)*(c+1);
128 size = BCO_BITMAP_SIZE(bco);
129 checkLargeBitmap((StgPtr)c + 2, BCO_BITMAP(bco), size);
130 return 2 + size;
131 }
132
133 case RET_BIG: // large bitmap (> 32 entries)
134 size = GET_LARGE_BITMAP(&info->i)->size;
135 checkLargeBitmap((StgPtr)c + 1, GET_LARGE_BITMAP(&info->i), size);
136 return 1 + size;
137
138 case RET_FUN:
139 {
140 const StgFunInfoTable *fun_info;
141 StgRetFun *ret_fun;
142
143 ret_fun = (StgRetFun *)c;
144 fun_info = get_fun_itbl(UNTAG_CONST_CLOSURE(ret_fun->fun));
145 size = ret_fun->size;
146 switch (fun_info->f.fun_type) {
147 case ARG_GEN:
148 checkSmallBitmap((StgPtr)ret_fun->payload,
149 BITMAP_BITS(fun_info->f.b.bitmap), size);
150 break;
151 case ARG_GEN_BIG:
152 checkLargeBitmap((StgPtr)ret_fun->payload,
153 GET_FUN_LARGE_BITMAP(fun_info), size);
154 break;
155 default:
156 checkSmallBitmap((StgPtr)ret_fun->payload,
157 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
158 size);
159 break;
160 }
161 return sizeofW(StgRetFun) + size;
162 }
163
164 default:
165 barf("checkStackFrame: weird activation record found on stack (%p %d).",c,info->i.type);
166 }
167 }
168
169 // check sections of stack between update frames
170 void
171 checkStackChunk( StgPtr sp, StgPtr stack_end )
172 {
173 StgPtr p;
174
175 p = sp;
176 while (p < stack_end) {
177 p += checkStackFrame( p );
178 }
179 // ASSERT( p == stack_end ); -- HWL
180 }
181
182 static void
183 checkPAP (StgClosure *tagged_fun, StgClosure** payload, StgWord n_args)
184 {
185 const StgClosure *fun;
186 const StgFunInfoTable *fun_info;
187
188 fun = UNTAG_CONST_CLOSURE(tagged_fun);
189 ASSERT(LOOKS_LIKE_CLOSURE_PTR(fun));
190 fun_info = get_fun_itbl(fun);
191
192 switch (fun_info->f.fun_type) {
193 case ARG_GEN:
194 checkSmallBitmap( (StgPtr)payload,
195 BITMAP_BITS(fun_info->f.b.bitmap), n_args );
196 break;
197 case ARG_GEN_BIG:
198 checkLargeBitmap( (StgPtr)payload,
199 GET_FUN_LARGE_BITMAP(fun_info),
200 n_args );
201 break;
202 case ARG_BCO:
203 checkLargeBitmap( (StgPtr)payload,
204 BCO_BITMAP(fun),
205 n_args );
206 break;
207 default:
208 checkSmallBitmap( (StgPtr)payload,
209 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
210 n_args );
211 break;
212 }
213
214 ASSERT(fun_info->f.arity > TAG_MASK ? GET_CLOSURE_TAG(tagged_fun) == 0
215 : GET_CLOSURE_TAG(tagged_fun) == fun_info->f.arity);
216 }
217
218
219 StgOffset
220 checkClosure( const StgClosure* p )
221 {
222 const StgInfoTable *info;
223
224 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
225
226 p = UNTAG_CONST_CLOSURE(p);
227 /* Is it a static closure (i.e. in the data segment)? */
228 if (!HEAP_ALLOCED(p)) {
229 ASSERT(closure_STATIC(p));
230 } else {
231 ASSERT(!closure_STATIC(p));
232 }
233
234 info = p->header.info;
235
236 if (IS_FORWARDING_PTR(info)) {
237 barf("checkClosure: found EVACUATED closure %d", info->type);
238 }
239 info = INFO_PTR_TO_STRUCT(info);
240
241 switch (info->type) {
242
243 case MVAR_CLEAN:
244 case MVAR_DIRTY:
245 {
246 StgMVar *mvar = (StgMVar *)p;
247 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->head));
248 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->tail));
249 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->value));
250 return sizeofW(StgMVar);
251 }
252
253 case THUNK:
254 case THUNK_1_0:
255 case THUNK_0_1:
256 case THUNK_1_1:
257 case THUNK_0_2:
258 case THUNK_2_0:
259 {
260 uint32_t i;
261 for (i = 0; i < info->layout.payload.ptrs; i++) {
262 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgThunk *)p)->payload[i]));
263 }
264 return thunk_sizeW_fromITBL(info);
265 }
266
267 case FUN:
268 case FUN_1_0:
269 case FUN_0_1:
270 case FUN_1_1:
271 case FUN_0_2:
272 case FUN_2_0:
273 case CONSTR:
274 case CONSTR_1_0:
275 case CONSTR_0_1:
276 case CONSTR_1_1:
277 case CONSTR_0_2:
278 case CONSTR_2_0:
279 case BLACKHOLE:
280 case PRIM:
281 case MUT_PRIM:
282 case MUT_VAR_CLEAN:
283 case MUT_VAR_DIRTY:
284 case TVAR:
285 case CONSTR_STATIC:
286 case CONSTR_NOCAF_STATIC:
287 case THUNK_STATIC:
288 case FUN_STATIC:
289 {
290 uint32_t i;
291 for (i = 0; i < info->layout.payload.ptrs; i++) {
292 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p->payload[i]));
293 }
294 return sizeW_fromITBL(info);
295 }
296
297 case BLOCKING_QUEUE:
298 {
299 StgBlockingQueue *bq = (StgBlockingQueue *)p;
300
301 // NO: the BH might have been updated now
302 // ASSERT(get_itbl(bq->bh)->type == BLACKHOLE);
303 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bq->bh));
304
305 ASSERT(get_itbl((StgClosure *)(bq->owner))->type == TSO);
306 ASSERT(bq->queue == (MessageBlackHole*)END_TSO_QUEUE
307 || bq->queue->header.info == &stg_MSG_BLACKHOLE_info);
308 ASSERT(bq->link == (StgBlockingQueue*)END_TSO_QUEUE ||
309 get_itbl((StgClosure *)(bq->link))->type == IND ||
310 get_itbl((StgClosure *)(bq->link))->type == BLOCKING_QUEUE);
311
312 return sizeofW(StgBlockingQueue);
313 }
314
315 case BCO: {
316 StgBCO *bco = (StgBCO *)p;
317 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->instrs));
318 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->literals));
319 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->ptrs));
320 return bco_sizeW(bco);
321 }
322
323 case IND_STATIC: /* (1, 0) closure */
324 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgIndStatic*)p)->indirectee));
325 return sizeW_fromITBL(info);
326
327 case WEAK:
328 /* deal with these specially - the info table isn't
329 * representative of the actual layout.
330 */
331 { StgWeak *w = (StgWeak *)p;
332 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->key));
333 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->value));
334 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->finalizer));
335 if (w->link) {
336 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->link));
337 }
338 return sizeW_fromITBL(info);
339 }
340
341 case THUNK_SELECTOR:
342 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgSelector *)p)->selectee));
343 return THUNK_SELECTOR_sizeW();
344
345 case IND:
346 {
347 /* we don't expect to see any of these after GC
348 * but they might appear during execution
349 */
350 StgInd *ind = (StgInd *)p;
351 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ind->indirectee));
352 return sizeofW(StgInd);
353 }
354
355 case RET_BCO:
356 case RET_SMALL:
357 case RET_BIG:
358 case UPDATE_FRAME:
359 case UNDERFLOW_FRAME:
360 case STOP_FRAME:
361 case CATCH_FRAME:
362 case ATOMICALLY_FRAME:
363 case CATCH_RETRY_FRAME:
364 case CATCH_STM_FRAME:
365 barf("checkClosure: stack frame");
366
367 case AP:
368 {
369 StgAP* ap = (StgAP *)p;
370 checkPAP (ap->fun, ap->payload, ap->n_args);
371 return ap_sizeW(ap);
372 }
373
374 case PAP:
375 {
376 StgPAP* pap = (StgPAP *)p;
377 checkPAP (pap->fun, pap->payload, pap->n_args);
378 return pap_sizeW(pap);
379 }
380
381 case AP_STACK:
382 {
383 StgAP_STACK *ap = (StgAP_STACK *)p;
384 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ap->fun));
385 checkStackChunk((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
386 return ap_stack_sizeW(ap);
387 }
388
389 case ARR_WORDS:
390 return arr_words_sizeW((StgArrBytes *)p);
391
392 case MUT_ARR_PTRS_CLEAN:
393 case MUT_ARR_PTRS_DIRTY:
394 case MUT_ARR_PTRS_FROZEN:
395 case MUT_ARR_PTRS_FROZEN0:
396 {
397 StgMutArrPtrs* a = (StgMutArrPtrs *)p;
398 uint32_t i;
399 for (i = 0; i < a->ptrs; i++) {
400 ASSERT(LOOKS_LIKE_CLOSURE_PTR(a->payload[i]));
401 }
402 return mut_arr_ptrs_sizeW(a);
403 }
404
405 case TSO:
406 checkTSO((StgTSO *)p);
407 return sizeofW(StgTSO);
408
409 case STACK:
410 checkSTACK((StgStack*)p);
411 return stack_sizeW((StgStack*)p);
412
413 case TREC_CHUNK:
414 {
415 uint32_t i;
416 StgTRecChunk *tc = (StgTRecChunk *)p;
417 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->prev_chunk));
418 for (i = 0; i < tc -> next_entry_idx; i ++) {
419 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].tvar));
420 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].expected_value));
421 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].new_value));
422 }
423 return sizeofW(StgTRecChunk);
424 }
425
426 default:
427 barf("checkClosure (closure type %d)", info->type);
428 }
429 }
430
431
432 /* -----------------------------------------------------------------------------
433 Check Heap Sanity
434
435 After garbage collection, the live heap is in a state where we can
436 run through and check that all the pointers point to the right
437 place. This function starts at a given position and sanity-checks
438 all the objects in the remainder of the chain.
439 -------------------------------------------------------------------------- */
440
441 void checkHeapChain (bdescr *bd)
442 {
443 StgPtr p;
444
445 for (; bd != NULL; bd = bd->link) {
446 if(!(bd->flags & BF_SWEPT)) {
447 p = bd->start;
448 while (p < bd->free) {
449 uint32_t size = checkClosure((StgClosure *)p);
450 /* This is the smallest size of closure that can live in the heap */
451 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
452 p += size;
453
454 /* skip over slop */
455 while (p < bd->free &&
456 (*p < 0x1000 || !LOOKS_LIKE_INFO_PTR(*p))) { p++; }
457 }
458 }
459 }
460 }
461
462 void
463 checkHeapChunk(StgPtr start, StgPtr end)
464 {
465 StgPtr p;
466 uint32_t size;
467
468 for (p=start; p<end; p+=size) {
469 ASSERT(LOOKS_LIKE_INFO_PTR(*p));
470 size = checkClosure((StgClosure *)p);
471 /* This is the smallest size of closure that can live in the heap. */
472 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
473 }
474 }
475
476 void
477 checkLargeObjects(bdescr *bd)
478 {
479 while (bd != NULL) {
480 if (!(bd->flags & BF_PINNED)) {
481 checkClosure((StgClosure *)bd->start);
482 }
483 bd = bd->link;
484 }
485 }
486
487 static void
488 checkSTACK (StgStack *stack)
489 {
490 StgPtr sp = stack->sp;
491 StgOffset stack_size = stack->stack_size;
492 StgPtr stack_end = stack->stack + stack_size;
493
494 ASSERT(stack->stack <= sp && sp <= stack_end);
495
496 checkStackChunk(sp, stack_end);
497 }
498
499 void
500 checkTSO(StgTSO *tso)
501 {
502 StgTSO *next;
503 const StgInfoTable *info;
504
505 if (tso->what_next == ThreadKilled) {
506 /* The garbage collector doesn't bother following any pointers
507 * from dead threads, so don't check sanity here.
508 */
509 return;
510 }
511
512 next = tso->_link;
513 info = (const StgInfoTable*) tso->_link->header.info;
514
515 ASSERT(next == END_TSO_QUEUE ||
516 info == &stg_MVAR_TSO_QUEUE_info ||
517 info == &stg_TSO_info ||
518 info == &stg_WHITEHOLE_info); // happens due to STM doing lockTSO()
519
520 if ( tso->why_blocked == BlockedOnMVar
521 || tso->why_blocked == BlockedOnMVarRead
522 || tso->why_blocked == BlockedOnBlackHole
523 || tso->why_blocked == BlockedOnMsgThrowTo
524 || tso->why_blocked == NotBlocked
525 ) {
526 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->block_info.closure));
527 }
528
529 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->bq));
530 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->blocked_exceptions));
531 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->stackobj));
532
533 // XXX are we checking the stack twice?
534 checkSTACK(tso->stackobj);
535 }
536
537 /*
538 Check that all TSOs have been evacuated.
539 Optionally also check the sanity of the TSOs.
540 */
541 void
542 checkGlobalTSOList (rtsBool checkTSOs)
543 {
544 StgTSO *tso;
545 uint32_t g;
546
547 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
548 for (tso=generations[g].threads; tso != END_TSO_QUEUE;
549 tso = tso->global_link) {
550 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso));
551 ASSERT(get_itbl((StgClosure *)tso)->type == TSO);
552 if (checkTSOs)
553 checkTSO(tso);
554
555 // If this TSO is dirty and in an old generation, it better
556 // be on the mutable list.
557 if (tso->dirty) {
558 ASSERT(Bdescr((P_)tso)->gen_no == 0 || (tso->flags & TSO_MARKED));
559 tso->flags &= ~TSO_MARKED;
560 }
561
562 {
563 StgStack *stack;
564 StgUnderflowFrame *frame;
565
566 stack = tso->stackobj;
567 while (1) {
568 if (stack->dirty & 1) {
569 ASSERT(Bdescr((P_)stack)->gen_no == 0 || (stack->dirty & TSO_MARKED));
570 stack->dirty &= ~TSO_MARKED;
571 }
572 frame = (StgUnderflowFrame*) (stack->stack + stack->stack_size
573 - sizeofW(StgUnderflowFrame));
574 if (frame->info != &stg_stack_underflow_frame_info
575 || frame->next_chunk == (StgStack*)END_TSO_QUEUE) break;
576 stack = frame->next_chunk;
577 }
578 }
579 }
580 }
581 }
582
583 /* -----------------------------------------------------------------------------
584 Check mutable list sanity.
585 -------------------------------------------------------------------------- */
586
587 static void
588 checkMutableList( bdescr *mut_bd, uint32_t gen )
589 {
590 bdescr *bd;
591 StgPtr q;
592 StgClosure *p;
593
594 for (bd = mut_bd; bd != NULL; bd = bd->link) {
595 for (q = bd->start; q < bd->free; q++) {
596 p = (StgClosure *)*q;
597 ASSERT(!HEAP_ALLOCED(p) || Bdescr((P_)p)->gen_no == gen);
598 checkClosure(p);
599
600 switch (get_itbl(p)->type) {
601 case TSO:
602 ((StgTSO *)p)->flags |= TSO_MARKED;
603 break;
604 case STACK:
605 ((StgStack *)p)->dirty |= TSO_MARKED;
606 break;
607 }
608 }
609 }
610 }
611
612 static void
613 checkLocalMutableLists (uint32_t cap_no)
614 {
615 uint32_t g;
616 for (g = 1; g < RtsFlags.GcFlags.generations; g++) {
617 checkMutableList(capabilities[cap_no]->mut_lists[g], g);
618 }
619 }
620
621 static void
622 checkMutableLists (void)
623 {
624 uint32_t i;
625 for (i = 0; i < n_capabilities; i++) {
626 checkLocalMutableLists(i);
627 }
628 }
629
630 /*
631 Check the static objects list.
632 */
633 void
634 checkStaticObjects ( StgClosure* static_objects )
635 {
636 StgClosure *p = static_objects;
637 const StgInfoTable *info;
638
639 while (p != END_OF_STATIC_OBJECT_LIST) {
640 p = UNTAG_STATIC_LIST_PTR(p);
641 checkClosure(p);
642 info = get_itbl(p);
643 switch (info->type) {
644 case IND_STATIC:
645 {
646 const StgClosure *indirectee;
647
648 indirectee = UNTAG_CONST_CLOSURE(((StgIndStatic *)p)->indirectee);
649 ASSERT(LOOKS_LIKE_CLOSURE_PTR(indirectee));
650 ASSERT(LOOKS_LIKE_INFO_PTR((StgWord)indirectee->header.info));
651 p = *IND_STATIC_LINK((StgClosure *)p);
652 break;
653 }
654
655 case THUNK_STATIC:
656 p = *THUNK_STATIC_LINK((StgClosure *)p);
657 break;
658
659 case FUN_STATIC:
660 p = *FUN_STATIC_LINK((StgClosure *)p);
661 break;
662
663 case CONSTR_STATIC:
664 p = *STATIC_LINK(info,(StgClosure *)p);
665 break;
666
667 default:
668 barf("checkStaticObjetcs: strange closure %p (%s)",
669 p, info_type(p));
670 }
671 }
672 }
673
674 /* Nursery sanity check */
675 void
676 checkNurserySanity (nursery *nursery)
677 {
678 bdescr *bd, *prev;
679 uint32_t blocks = 0;
680
681 prev = NULL;
682 for (bd = nursery->blocks; bd != NULL; bd = bd->link) {
683 ASSERT(bd->gen == g0);
684 ASSERT(bd->u.back == prev);
685 prev = bd;
686 blocks += bd->blocks;
687 }
688
689 ASSERT(blocks == nursery->n_blocks);
690 }
691
692 static void checkGeneration (generation *gen,
693 rtsBool after_major_gc USED_IF_THREADS)
694 {
695 uint32_t n;
696 gen_workspace *ws;
697
698 ASSERT(countBlocks(gen->blocks) == gen->n_blocks);
699 ASSERT(countBlocks(gen->large_objects) == gen->n_large_blocks);
700
701 #if defined(THREADED_RTS)
702 // heap sanity checking doesn't work with SMP, because we can't
703 // zero the slop (see Updates.h). However, we can sanity-check
704 // the heap after a major gc, because there is no slop.
705 if (!after_major_gc) return;
706 #endif
707
708 checkHeapChain(gen->blocks);
709
710 for (n = 0; n < n_capabilities; n++) {
711 ws = &gc_threads[n]->gens[gen->no];
712 checkHeapChain(ws->todo_bd);
713 checkHeapChain(ws->part_list);
714 checkHeapChain(ws->scavd_list);
715 }
716
717 checkLargeObjects(gen->large_objects);
718 }
719
720 /* Full heap sanity check. */
721 static void checkFullHeap (rtsBool after_major_gc)
722 {
723 uint32_t g, n;
724
725 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
726 checkGeneration(&generations[g], after_major_gc);
727 }
728 for (n = 0; n < n_capabilities; n++) {
729 checkNurserySanity(&nurseries[n]);
730 }
731 }
732
733 void checkSanity (rtsBool after_gc, rtsBool major_gc)
734 {
735 checkFullHeap(after_gc && major_gc);
736
737 checkFreeListSanity();
738
739 // always check the stacks in threaded mode, because checkHeap()
740 // does nothing in this case.
741 if (after_gc) {
742 checkMutableLists();
743 checkGlobalTSOList(rtsTrue);
744 }
745 }
746
747 // If memInventory() calculates that we have a memory leak, this
748 // function will try to find the block(s) that are leaking by marking
749 // all the ones that we know about, and search through memory to find
750 // blocks that are not marked. In the debugger this can help to give
751 // us a clue about what kind of block leaked. In the future we might
752 // annotate blocks with their allocation site to give more helpful
753 // info.
754 static void
755 findMemoryLeak (void)
756 {
757 uint32_t g, i;
758 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
759 for (i = 0; i < n_capabilities; i++) {
760 markBlocks(capabilities[i]->mut_lists[g]);
761 markBlocks(gc_threads[i]->gens[g].part_list);
762 markBlocks(gc_threads[i]->gens[g].scavd_list);
763 markBlocks(gc_threads[i]->gens[g].todo_bd);
764 }
765 markBlocks(generations[g].blocks);
766 markBlocks(generations[g].large_objects);
767 }
768
769 for (i = 0; i < n_nurseries; i++) {
770 markBlocks(nurseries[i].blocks);
771 }
772
773 for (i = 0; i < n_capabilities; i++) {
774 markBlocks(gc_threads[i]->free_blocks);
775 markBlocks(capabilities[i]->pinned_object_block);
776 }
777
778 #ifdef PROFILING
779 // TODO:
780 // if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER) {
781 // markRetainerBlocks();
782 // }
783 #endif
784
785 // count the blocks allocated by the arena allocator
786 // TODO:
787 // markArenaBlocks();
788
789 // count the blocks containing executable memory
790 markBlocks(exec_block);
791
792 reportUnmarkedBlocks();
793 }
794
795 void
796 checkRunQueue(Capability *cap)
797 {
798 StgTSO *prev, *tso;
799 prev = END_TSO_QUEUE;
800 for (tso = cap->run_queue_hd; tso != END_TSO_QUEUE;
801 prev = tso, tso = tso->_link) {
802 ASSERT(prev == END_TSO_QUEUE || prev->_link == tso);
803 ASSERT(tso->block_info.prev == prev);
804 }
805 ASSERT(cap->run_queue_tl == prev);
806 }
807
808 /* -----------------------------------------------------------------------------
809 Memory leak detection
810
811 memInventory() checks for memory leaks by counting up all the
812 blocks we know about and comparing that to the number of blocks
813 allegedly floating around in the system.
814 -------------------------------------------------------------------------- */
815
816 // Useful for finding partially full blocks in gdb
817 void findSlop(bdescr *bd);
818 void findSlop(bdescr *bd)
819 {
820 W_ slop;
821
822 for (; bd != NULL; bd = bd->link) {
823 slop = (bd->blocks * BLOCK_SIZE_W) - (bd->free - bd->start);
824 if (slop > (1024/sizeof(W_))) {
825 debugBelch("block at %p (bdescr %p) has %" FMT_Word "KB slop\n",
826 bd->start, bd, slop / (1024/sizeof(W_)));
827 }
828 }
829 }
830
831 static W_
832 genBlocks (generation *gen)
833 {
834 ASSERT(countBlocks(gen->blocks) == gen->n_blocks);
835 ASSERT(countBlocks(gen->large_objects) == gen->n_large_blocks);
836 return gen->n_blocks + gen->n_old_blocks +
837 countAllocdBlocks(gen->large_objects);
838 }
839
840 void
841 memInventory (rtsBool show)
842 {
843 uint32_t g, i;
844 W_ gen_blocks[RtsFlags.GcFlags.generations];
845 W_ nursery_blocks, retainer_blocks,
846 arena_blocks, exec_blocks, gc_free_blocks = 0;
847 W_ live_blocks = 0, free_blocks = 0;
848 rtsBool leak;
849
850 // count the blocks we current have
851
852 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
853 gen_blocks[g] = 0;
854 for (i = 0; i < n_capabilities; i++) {
855 gen_blocks[g] += countBlocks(capabilities[i]->mut_lists[g]);
856 gen_blocks[g] += countBlocks(gc_threads[i]->gens[g].part_list);
857 gen_blocks[g] += countBlocks(gc_threads[i]->gens[g].scavd_list);
858 gen_blocks[g] += countBlocks(gc_threads[i]->gens[g].todo_bd);
859 }
860 gen_blocks[g] += genBlocks(&generations[g]);
861 }
862
863 nursery_blocks = 0;
864 for (i = 0; i < n_nurseries; i++) {
865 ASSERT(countBlocks(nurseries[i].blocks) == nurseries[i].n_blocks);
866 nursery_blocks += nurseries[i].n_blocks;
867 }
868 for (i = 0; i < n_capabilities; i++) {
869 gc_free_blocks += countBlocks(gc_threads[i]->free_blocks);
870 if (capabilities[i]->pinned_object_block != NULL) {
871 nursery_blocks += capabilities[i]->pinned_object_block->blocks;
872 }
873 nursery_blocks += countBlocks(capabilities[i]->pinned_object_blocks);
874 }
875
876 retainer_blocks = 0;
877 #ifdef PROFILING
878 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER) {
879 retainer_blocks = retainerStackBlocks();
880 }
881 #endif
882
883 // count the blocks allocated by the arena allocator
884 arena_blocks = arenaBlocks();
885
886 // count the blocks containing executable memory
887 exec_blocks = countAllocdBlocks(exec_block);
888
889 /* count the blocks on the free list */
890 free_blocks = countFreeList();
891
892 live_blocks = 0;
893 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
894 live_blocks += gen_blocks[g];
895 }
896 live_blocks += nursery_blocks +
897 + retainer_blocks + arena_blocks + exec_blocks + gc_free_blocks;
898
899 #define MB(n) (((double)(n) * BLOCK_SIZE_W) / ((1024*1024)/sizeof(W_)))
900
901 leak = live_blocks + free_blocks != mblocks_allocated * BLOCKS_PER_MBLOCK;
902
903 if (show || leak)
904 {
905 if (leak) {
906 debugBelch("Memory leak detected:\n");
907 } else {
908 debugBelch("Memory inventory:\n");
909 }
910 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
911 debugBelch(" gen %d blocks : %5" FMT_Word " blocks (%6.1lf MB)\n", g,
912 gen_blocks[g], MB(gen_blocks[g]));
913 }
914 debugBelch(" nursery : %5" FMT_Word " blocks (%6.1lf MB)\n",
915 nursery_blocks, MB(nursery_blocks));
916 debugBelch(" retainer : %5" FMT_Word " blocks (%6.1lf MB)\n",
917 retainer_blocks, MB(retainer_blocks));
918 debugBelch(" arena blocks : %5" FMT_Word " blocks (%6.1lf MB)\n",
919 arena_blocks, MB(arena_blocks));
920 debugBelch(" exec : %5" FMT_Word " blocks (%6.1lf MB)\n",
921 exec_blocks, MB(exec_blocks));
922 debugBelch(" GC free pool : %5" FMT_Word " blocks (%6.1lf MB)\n",
923 gc_free_blocks, MB(gc_free_blocks));
924 debugBelch(" free : %5" FMT_Word " blocks (%6.1lf MB)\n",
925 free_blocks, MB(free_blocks));
926 debugBelch(" total : %5" FMT_Word " blocks (%6.1lf MB)\n",
927 live_blocks + free_blocks, MB(live_blocks+free_blocks));
928 if (leak) {
929 debugBelch("\n in system : %5" FMT_Word " blocks (%" FMT_Word " MB)\n",
930 (W_)(mblocks_allocated * BLOCKS_PER_MBLOCK), mblocks_allocated);
931 }
932 }
933
934 if (leak) {
935 debugBelch("\n");
936 findMemoryLeak();
937 }
938 ASSERT(n_alloc_blocks == live_blocks);
939 ASSERT(!leak);
940 }
941
942
943 #endif /* DEBUG */