GC refactoring, remove "steps"
[ghc.git] / rts / sm / Sanity.c
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 1998-2006
4 *
5 * Sanity checking code for the heap and stack.
6 *
7 * Used when debugging: check that everything reasonable.
8 *
9 * - All things that are supposed to be pointers look like pointers.
10 *
11 * - Objects in text space are marked as static closures, those
12 * in the heap are dynamic.
13 *
14 * ---------------------------------------------------------------------------*/
15
16 #include "PosixSource.h"
17 #include "Rts.h"
18
19 #ifdef DEBUG /* whole file */
20
21 #include "RtsUtils.h"
22 #include "sm/Storage.h"
23 #include "sm/BlockAlloc.h"
24 #include "Sanity.h"
25 #include "Schedule.h"
26 #include "Apply.h"
27 #include "Printer.h"
28 #include "Arena.h"
29
30 /* -----------------------------------------------------------------------------
31 Forward decls.
32 -------------------------------------------------------------------------- */
33
34 static void checkSmallBitmap ( StgPtr payload, StgWord bitmap, nat );
35 static void checkLargeBitmap ( StgPtr payload, StgLargeBitmap*, nat );
36 static void checkClosureShallow ( StgClosure * );
37
38 /* -----------------------------------------------------------------------------
39 Check stack sanity
40 -------------------------------------------------------------------------- */
41
42 static void
43 checkSmallBitmap( StgPtr payload, StgWord bitmap, nat size )
44 {
45 StgPtr p;
46 nat i;
47
48 p = payload;
49 for(i = 0; i < size; i++, bitmap >>= 1 ) {
50 if ((bitmap & 1) == 0) {
51 checkClosureShallow((StgClosure *)payload[i]);
52 }
53 }
54 }
55
56 static void
57 checkLargeBitmap( StgPtr payload, StgLargeBitmap* large_bitmap, nat size )
58 {
59 StgWord bmp;
60 nat i, j;
61
62 i = 0;
63 for (bmp=0; i < size; bmp++) {
64 StgWord bitmap = large_bitmap->bitmap[bmp];
65 j = 0;
66 for(; i < size && j < BITS_IN(W_); j++, i++, bitmap >>= 1 ) {
67 if ((bitmap & 1) == 0) {
68 checkClosureShallow((StgClosure *)payload[i]);
69 }
70 }
71 }
72 }
73
74 /*
75 * check that it looks like a valid closure - without checking its payload
76 * used to avoid recursion between checking PAPs and checking stack
77 * chunks.
78 */
79
80 static void
81 checkClosureShallow( StgClosure* p )
82 {
83 StgClosure *q;
84
85 q = UNTAG_CLOSURE(p);
86 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q));
87
88 /* Is it a static closure? */
89 if (!HEAP_ALLOCED(q)) {
90 ASSERT(closure_STATIC(q));
91 } else {
92 ASSERT(!closure_STATIC(q));
93 }
94 }
95
96 // check an individual stack object
97 StgOffset
98 checkStackFrame( StgPtr c )
99 {
100 nat size;
101 const StgRetInfoTable* info;
102
103 info = get_ret_itbl((StgClosure *)c);
104
105 /* All activation records have 'bitmap' style layout info. */
106 switch (info->i.type) {
107 case RET_DYN: /* Dynamic bitmap: the mask is stored on the stack */
108 {
109 StgWord dyn;
110 StgPtr p;
111 StgRetDyn* r;
112
113 r = (StgRetDyn *)c;
114 dyn = r->liveness;
115
116 p = (P_)(r->payload);
117 checkSmallBitmap(p,RET_DYN_LIVENESS(r->liveness),RET_DYN_BITMAP_SIZE);
118 p += RET_DYN_BITMAP_SIZE + RET_DYN_NONPTR_REGS_SIZE;
119
120 // skip over the non-pointers
121 p += RET_DYN_NONPTRS(dyn);
122
123 // follow the ptr words
124 for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
125 checkClosureShallow((StgClosure *)*p);
126 p++;
127 }
128
129 return sizeofW(StgRetDyn) + RET_DYN_BITMAP_SIZE +
130 RET_DYN_NONPTR_REGS_SIZE +
131 RET_DYN_NONPTRS(dyn) + RET_DYN_PTRS(dyn);
132 }
133
134 case UPDATE_FRAME:
135 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgUpdateFrame*)c)->updatee));
136 case ATOMICALLY_FRAME:
137 case CATCH_RETRY_FRAME:
138 case CATCH_STM_FRAME:
139 case CATCH_FRAME:
140 // small bitmap cases (<= 32 entries)
141 case STOP_FRAME:
142 case RET_SMALL:
143 size = BITMAP_SIZE(info->i.layout.bitmap);
144 checkSmallBitmap((StgPtr)c + 1,
145 BITMAP_BITS(info->i.layout.bitmap), size);
146 return 1 + size;
147
148 case RET_BCO: {
149 StgBCO *bco;
150 nat size;
151 bco = (StgBCO *)*(c+1);
152 size = BCO_BITMAP_SIZE(bco);
153 checkLargeBitmap((StgPtr)c + 2, BCO_BITMAP(bco), size);
154 return 2 + size;
155 }
156
157 case RET_BIG: // large bitmap (> 32 entries)
158 size = GET_LARGE_BITMAP(&info->i)->size;
159 checkLargeBitmap((StgPtr)c + 1, GET_LARGE_BITMAP(&info->i), size);
160 return 1 + size;
161
162 case RET_FUN:
163 {
164 StgFunInfoTable *fun_info;
165 StgRetFun *ret_fun;
166
167 ret_fun = (StgRetFun *)c;
168 fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun));
169 size = ret_fun->size;
170 switch (fun_info->f.fun_type) {
171 case ARG_GEN:
172 checkSmallBitmap((StgPtr)ret_fun->payload,
173 BITMAP_BITS(fun_info->f.b.bitmap), size);
174 break;
175 case ARG_GEN_BIG:
176 checkLargeBitmap((StgPtr)ret_fun->payload,
177 GET_FUN_LARGE_BITMAP(fun_info), size);
178 break;
179 default:
180 checkSmallBitmap((StgPtr)ret_fun->payload,
181 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
182 size);
183 break;
184 }
185 return sizeofW(StgRetFun) + size;
186 }
187
188 default:
189 barf("checkStackFrame: weird activation record found on stack (%p %d).",c,info->i.type);
190 }
191 }
192
193 // check sections of stack between update frames
194 void
195 checkStackChunk( StgPtr sp, StgPtr stack_end )
196 {
197 StgPtr p;
198
199 p = sp;
200 while (p < stack_end) {
201 p += checkStackFrame( p );
202 }
203 // ASSERT( p == stack_end ); -- HWL
204 }
205
206 static void
207 checkPAP (StgClosure *tagged_fun, StgClosure** payload, StgWord n_args)
208 {
209 StgClosure *fun;
210 StgClosure *p;
211 StgFunInfoTable *fun_info;
212
213 fun = UNTAG_CLOSURE(tagged_fun);
214 ASSERT(LOOKS_LIKE_CLOSURE_PTR(fun));
215 fun_info = get_fun_itbl(fun);
216
217 p = (StgClosure *)payload;
218 switch (fun_info->f.fun_type) {
219 case ARG_GEN:
220 checkSmallBitmap( (StgPtr)payload,
221 BITMAP_BITS(fun_info->f.b.bitmap), n_args );
222 break;
223 case ARG_GEN_BIG:
224 checkLargeBitmap( (StgPtr)payload,
225 GET_FUN_LARGE_BITMAP(fun_info),
226 n_args );
227 break;
228 case ARG_BCO:
229 checkLargeBitmap( (StgPtr)payload,
230 BCO_BITMAP(fun),
231 n_args );
232 break;
233 default:
234 checkSmallBitmap( (StgPtr)payload,
235 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
236 n_args );
237 break;
238 }
239
240 ASSERT(fun_info->f.arity > TAG_MASK ? GET_CLOSURE_TAG(tagged_fun) == 0
241 : GET_CLOSURE_TAG(tagged_fun) == fun_info->f.arity);
242 }
243
244
245 StgOffset
246 checkClosure( StgClosure* p )
247 {
248 const StgInfoTable *info;
249
250 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
251
252 p = UNTAG_CLOSURE(p);
253 /* Is it a static closure (i.e. in the data segment)? */
254 if (!HEAP_ALLOCED(p)) {
255 ASSERT(closure_STATIC(p));
256 } else {
257 ASSERT(!closure_STATIC(p));
258 }
259
260 info = p->header.info;
261
262 if (IS_FORWARDING_PTR(info)) {
263 barf("checkClosure: found EVACUATED closure %d", info->type);
264 }
265 info = INFO_PTR_TO_STRUCT(info);
266
267 switch (info->type) {
268
269 case MVAR_CLEAN:
270 case MVAR_DIRTY:
271 {
272 StgMVar *mvar = (StgMVar *)p;
273 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->head));
274 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->tail));
275 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->value));
276 return sizeofW(StgMVar);
277 }
278
279 case THUNK:
280 case THUNK_1_0:
281 case THUNK_0_1:
282 case THUNK_1_1:
283 case THUNK_0_2:
284 case THUNK_2_0:
285 {
286 nat i;
287 for (i = 0; i < info->layout.payload.ptrs; i++) {
288 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgThunk *)p)->payload[i]));
289 }
290 return thunk_sizeW_fromITBL(info);
291 }
292
293 case FUN:
294 case FUN_1_0:
295 case FUN_0_1:
296 case FUN_1_1:
297 case FUN_0_2:
298 case FUN_2_0:
299 case CONSTR:
300 case CONSTR_1_0:
301 case CONSTR_0_1:
302 case CONSTR_1_1:
303 case CONSTR_0_2:
304 case CONSTR_2_0:
305 case IND_PERM:
306 case IND_OLDGEN:
307 case IND_OLDGEN_PERM:
308 case BLACKHOLE:
309 case CAF_BLACKHOLE:
310 case STABLE_NAME:
311 case MUT_VAR_CLEAN:
312 case MUT_VAR_DIRTY:
313 case CONSTR_STATIC:
314 case CONSTR_NOCAF_STATIC:
315 case THUNK_STATIC:
316 case FUN_STATIC:
317 {
318 nat i;
319 for (i = 0; i < info->layout.payload.ptrs; i++) {
320 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p->payload[i]));
321 }
322 return sizeW_fromITBL(info);
323 }
324
325 case BCO: {
326 StgBCO *bco = (StgBCO *)p;
327 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->instrs));
328 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->literals));
329 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->ptrs));
330 return bco_sizeW(bco);
331 }
332
333 case IND_STATIC: /* (1, 0) closure */
334 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgIndStatic*)p)->indirectee));
335 return sizeW_fromITBL(info);
336
337 case WEAK:
338 /* deal with these specially - the info table isn't
339 * representative of the actual layout.
340 */
341 { StgWeak *w = (StgWeak *)p;
342 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->key));
343 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->value));
344 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->finalizer));
345 if (w->link) {
346 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->link));
347 }
348 return sizeW_fromITBL(info);
349 }
350
351 case THUNK_SELECTOR:
352 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgSelector *)p)->selectee));
353 return THUNK_SELECTOR_sizeW();
354
355 case IND:
356 {
357 /* we don't expect to see any of these after GC
358 * but they might appear during execution
359 */
360 StgInd *ind = (StgInd *)p;
361 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ind->indirectee));
362 return sizeofW(StgInd);
363 }
364
365 case RET_BCO:
366 case RET_SMALL:
367 case RET_BIG:
368 case RET_DYN:
369 case UPDATE_FRAME:
370 case STOP_FRAME:
371 case CATCH_FRAME:
372 case ATOMICALLY_FRAME:
373 case CATCH_RETRY_FRAME:
374 case CATCH_STM_FRAME:
375 barf("checkClosure: stack frame");
376
377 case AP:
378 {
379 StgAP* ap = (StgAP *)p;
380 checkPAP (ap->fun, ap->payload, ap->n_args);
381 return ap_sizeW(ap);
382 }
383
384 case PAP:
385 {
386 StgPAP* pap = (StgPAP *)p;
387 checkPAP (pap->fun, pap->payload, pap->n_args);
388 return pap_sizeW(pap);
389 }
390
391 case AP_STACK:
392 {
393 StgAP_STACK *ap = (StgAP_STACK *)p;
394 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ap->fun));
395 checkStackChunk((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
396 return ap_stack_sizeW(ap);
397 }
398
399 case ARR_WORDS:
400 return arr_words_sizeW((StgArrWords *)p);
401
402 case MUT_ARR_PTRS_CLEAN:
403 case MUT_ARR_PTRS_DIRTY:
404 case MUT_ARR_PTRS_FROZEN:
405 case MUT_ARR_PTRS_FROZEN0:
406 {
407 StgMutArrPtrs* a = (StgMutArrPtrs *)p;
408 nat i;
409 for (i = 0; i < a->ptrs; i++) {
410 ASSERT(LOOKS_LIKE_CLOSURE_PTR(a->payload[i]));
411 }
412 return mut_arr_ptrs_sizeW(a);
413 }
414
415 case TSO:
416 checkTSO((StgTSO *)p);
417 return tso_sizeW((StgTSO *)p);
418
419 case TVAR_WATCH_QUEUE:
420 {
421 StgTVarWatchQueue *wq = (StgTVarWatchQueue *)p;
422 ASSERT(LOOKS_LIKE_CLOSURE_PTR(wq->next_queue_entry));
423 ASSERT(LOOKS_LIKE_CLOSURE_PTR(wq->prev_queue_entry));
424 return sizeofW(StgTVarWatchQueue);
425 }
426
427 case INVARIANT_CHECK_QUEUE:
428 {
429 StgInvariantCheckQueue *q = (StgInvariantCheckQueue *)p;
430 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q->invariant));
431 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q->my_execution));
432 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q->next_queue_entry));
433 return sizeofW(StgInvariantCheckQueue);
434 }
435
436 case ATOMIC_INVARIANT:
437 {
438 StgAtomicInvariant *invariant = (StgAtomicInvariant *)p;
439 ASSERT(LOOKS_LIKE_CLOSURE_PTR(invariant->code));
440 ASSERT(LOOKS_LIKE_CLOSURE_PTR(invariant->last_execution));
441 return sizeofW(StgAtomicInvariant);
442 }
443
444 case TVAR:
445 {
446 StgTVar *tv = (StgTVar *)p;
447 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tv->current_value));
448 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tv->first_watch_queue_entry));
449 return sizeofW(StgTVar);
450 }
451
452 case TREC_CHUNK:
453 {
454 nat i;
455 StgTRecChunk *tc = (StgTRecChunk *)p;
456 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->prev_chunk));
457 for (i = 0; i < tc -> next_entry_idx; i ++) {
458 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].tvar));
459 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].expected_value));
460 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].new_value));
461 }
462 return sizeofW(StgTRecChunk);
463 }
464
465 case TREC_HEADER:
466 {
467 StgTRecHeader *trec = (StgTRecHeader *)p;
468 ASSERT(LOOKS_LIKE_CLOSURE_PTR(trec -> enclosing_trec));
469 ASSERT(LOOKS_LIKE_CLOSURE_PTR(trec -> current_chunk));
470 return sizeofW(StgTRecHeader);
471 }
472
473 default:
474 barf("checkClosure (closure type %d)", info->type);
475 }
476 }
477
478
479 /* -----------------------------------------------------------------------------
480 Check Heap Sanity
481
482 After garbage collection, the live heap is in a state where we can
483 run through and check that all the pointers point to the right
484 place. This function starts at a given position and sanity-checks
485 all the objects in the remainder of the chain.
486 -------------------------------------------------------------------------- */
487
488 void
489 checkHeap(bdescr *bd)
490 {
491 StgPtr p;
492
493 #if defined(THREADED_RTS)
494 // heap sanity checking doesn't work with SMP, because we can't
495 // zero the slop (see Updates.h).
496 return;
497 #endif
498
499 for (; bd != NULL; bd = bd->link) {
500 p = bd->start;
501 while (p < bd->free) {
502 nat size = checkClosure((StgClosure *)p);
503 /* This is the smallest size of closure that can live in the heap */
504 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
505 p += size;
506
507 /* skip over slop */
508 while (p < bd->free &&
509 (*p < 0x1000 || !LOOKS_LIKE_INFO_PTR(*p))) { p++; }
510 }
511 }
512 }
513
514 void
515 checkHeapChunk(StgPtr start, StgPtr end)
516 {
517 StgPtr p;
518 nat size;
519
520 for (p=start; p<end; p+=size) {
521 ASSERT(LOOKS_LIKE_INFO_PTR(*p));
522 size = checkClosure((StgClosure *)p);
523 /* This is the smallest size of closure that can live in the heap. */
524 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
525 }
526 }
527
528 void
529 checkLargeObjects(bdescr *bd)
530 {
531 while (bd != NULL) {
532 if (!(bd->flags & BF_PINNED)) {
533 checkClosure((StgClosure *)bd->start);
534 }
535 bd = bd->link;
536 }
537 }
538
539 void
540 checkTSO(StgTSO *tso)
541 {
542 StgPtr sp = tso->sp;
543 StgPtr stack = tso->stack;
544 StgOffset stack_size = tso->stack_size;
545 StgPtr stack_end = stack + stack_size;
546
547 if (tso->what_next == ThreadRelocated) {
548 checkTSO(tso->_link);
549 return;
550 }
551
552 if (tso->what_next == ThreadKilled) {
553 /* The garbage collector doesn't bother following any pointers
554 * from dead threads, so don't check sanity here.
555 */
556 return;
557 }
558
559 ASSERT(stack <= sp && sp < stack_end);
560
561 checkStackChunk(sp, stack_end);
562 }
563
564 /*
565 Check that all TSOs have been evacuated.
566 Optionally also check the sanity of the TSOs.
567 */
568 void
569 checkGlobalTSOList (rtsBool checkTSOs)
570 {
571 StgTSO *tso;
572 nat g;
573
574 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
575 for (tso=generations[g].threads; tso != END_TSO_QUEUE;
576 tso = tso->global_link) {
577 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso));
578 ASSERT(get_itbl(tso)->type == TSO);
579 if (checkTSOs)
580 checkTSO(tso);
581
582 while (tso->what_next == ThreadRelocated) {
583 tso = tso->_link;
584 }
585
586 // If this TSO is dirty and in an old generation, it better
587 // be on the mutable list.
588 if (tso->dirty || (tso->flags & TSO_LINK_DIRTY)) {
589 ASSERT(Bdescr((P_)tso)->gen_no == 0 || (tso->flags & TSO_MARKED));
590 tso->flags &= ~TSO_MARKED;
591 }
592 }
593 }
594 }
595
596 /* -----------------------------------------------------------------------------
597 Check mutable list sanity.
598 -------------------------------------------------------------------------- */
599
600 void
601 checkMutableList( bdescr *mut_bd, nat gen )
602 {
603 bdescr *bd;
604 StgPtr q;
605 StgClosure *p;
606
607 for (bd = mut_bd; bd != NULL; bd = bd->link) {
608 for (q = bd->start; q < bd->free; q++) {
609 p = (StgClosure *)*q;
610 ASSERT(!HEAP_ALLOCED(p) || Bdescr((P_)p)->gen_no == gen);
611 if (get_itbl(p)->type == TSO) {
612 ((StgTSO *)p)->flags |= TSO_MARKED;
613 }
614 }
615 }
616 }
617
618 void
619 checkMutableLists (rtsBool checkTSOs)
620 {
621 nat g, i;
622
623 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
624 checkMutableList(generations[g].mut_list, g);
625 for (i = 0; i < n_capabilities; i++) {
626 checkMutableList(capabilities[i].mut_lists[g], g);
627 }
628 }
629 checkGlobalTSOList(checkTSOs);
630 }
631
632 /*
633 Check the static objects list.
634 */
635 void
636 checkStaticObjects ( StgClosure* static_objects )
637 {
638 StgClosure *p = static_objects;
639 StgInfoTable *info;
640
641 while (p != END_OF_STATIC_LIST) {
642 checkClosure(p);
643 info = get_itbl(p);
644 switch (info->type) {
645 case IND_STATIC:
646 {
647 StgClosure *indirectee = UNTAG_CLOSURE(((StgIndStatic *)p)->indirectee);
648
649 ASSERT(LOOKS_LIKE_CLOSURE_PTR(indirectee));
650 ASSERT(LOOKS_LIKE_INFO_PTR((StgWord)indirectee->header.info));
651 p = *IND_STATIC_LINK((StgClosure *)p);
652 break;
653 }
654
655 case THUNK_STATIC:
656 p = *THUNK_STATIC_LINK((StgClosure *)p);
657 break;
658
659 case FUN_STATIC:
660 p = *FUN_STATIC_LINK((StgClosure *)p);
661 break;
662
663 case CONSTR_STATIC:
664 p = *STATIC_LINK(info,(StgClosure *)p);
665 break;
666
667 default:
668 barf("checkStaticObjetcs: strange closure %p (%s)",
669 p, info_type(p));
670 }
671 }
672 }
673
674 /* Nursery sanity check */
675 void
676 checkNurserySanity (nursery *nursery)
677 {
678 bdescr *bd, *prev;
679 nat blocks = 0;
680
681 prev = NULL;
682 for (bd = nursery->blocks; bd != NULL; bd = bd->link) {
683 ASSERT(bd->u.back == prev);
684 prev = bd;
685 blocks += bd->blocks;
686 }
687
688 ASSERT(blocks == nursery->n_blocks);
689 }
690
691
692 /* Full heap sanity check. */
693 void
694 checkSanity( rtsBool check_heap )
695 {
696 nat g, n;
697
698 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
699 ASSERT(countBlocks(generations[g].blocks)
700 == generations[g].n_blocks);
701 ASSERT(countBlocks(generations[g].large_objects)
702 == generations[g].n_large_blocks);
703 if (check_heap) {
704 checkHeap(generations[g].blocks);
705 }
706 checkLargeObjects(generations[g].large_objects);
707 }
708
709 for (n = 0; n < n_capabilities; n++) {
710 checkNurserySanity(&nurseries[n]);
711 }
712
713 checkFreeListSanity();
714
715 #if defined(THREADED_RTS)
716 // always check the stacks in threaded mode, because checkHeap()
717 // does nothing in this case.
718 checkMutableLists(rtsTrue);
719 #else
720 if (check_heap) {
721 checkMutableLists(rtsFalse);
722 } else {
723 checkMutableLists(rtsTrue);
724 }
725 #endif
726 }
727
728 // If memInventory() calculates that we have a memory leak, this
729 // function will try to find the block(s) that are leaking by marking
730 // all the ones that we know about, and search through memory to find
731 // blocks that are not marked. In the debugger this can help to give
732 // us a clue about what kind of block leaked. In the future we might
733 // annotate blocks with their allocation site to give more helpful
734 // info.
735 static void
736 findMemoryLeak (void)
737 {
738 nat g, i;
739 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
740 for (i = 0; i < n_capabilities; i++) {
741 markBlocks(capabilities[i].mut_lists[g]);
742 }
743 markBlocks(generations[g].mut_list);
744 markBlocks(generations[g].blocks);
745 markBlocks(generations[g].large_objects);
746 }
747
748 for (i = 0; i < n_capabilities; i++) {
749 markBlocks(nurseries[i].blocks);
750 }
751
752 #ifdef PROFILING
753 // TODO:
754 // if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER) {
755 // markRetainerBlocks();
756 // }
757 #endif
758
759 // count the blocks allocated by the arena allocator
760 // TODO:
761 // markArenaBlocks();
762
763 // count the blocks containing executable memory
764 markBlocks(exec_block);
765
766 reportUnmarkedBlocks();
767 }
768
769
770 /* -----------------------------------------------------------------------------
771 Memory leak detection
772
773 memInventory() checks for memory leaks by counting up all the
774 blocks we know about and comparing that to the number of blocks
775 allegedly floating around in the system.
776 -------------------------------------------------------------------------- */
777
778 // Useful for finding partially full blocks in gdb
779 void findSlop(bdescr *bd);
780 void findSlop(bdescr *bd)
781 {
782 lnat slop;
783
784 for (; bd != NULL; bd = bd->link) {
785 slop = (bd->blocks * BLOCK_SIZE_W) - (bd->free - bd->start);
786 if (slop > (1024/sizeof(W_))) {
787 debugBelch("block at %p (bdescr %p) has %ldKB slop\n",
788 bd->start, bd, slop / (1024/sizeof(W_)));
789 }
790 }
791 }
792
793 static lnat
794 genBlocks (generation *gen)
795 {
796 ASSERT(countBlocks(gen->blocks) == gen->n_blocks);
797 ASSERT(countBlocks(gen->large_objects) == gen->n_large_blocks);
798 return gen->n_blocks + gen->n_old_blocks +
799 countAllocdBlocks(gen->large_objects);
800 }
801
802 void
803 memInventory (rtsBool show)
804 {
805 nat g, i;
806 lnat gen_blocks[RtsFlags.GcFlags.generations];
807 lnat nursery_blocks, retainer_blocks,
808 arena_blocks, exec_blocks;
809 lnat live_blocks = 0, free_blocks = 0;
810 rtsBool leak;
811
812 // count the blocks we current have
813
814 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
815 gen_blocks[g] = 0;
816 for (i = 0; i < n_capabilities; i++) {
817 gen_blocks[g] += countBlocks(capabilities[i].mut_lists[g]);
818 }
819 gen_blocks[g] += countAllocdBlocks(generations[g].mut_list);
820 gen_blocks[g] += genBlocks(&generations[g]);
821 }
822
823 nursery_blocks = 0;
824 for (i = 0; i < n_capabilities; i++) {
825 ASSERT(countBlocks(nurseries[i].blocks) == nurseries[i].n_blocks);
826 nursery_blocks += nurseries[i].n_blocks;
827 }
828
829 retainer_blocks = 0;
830 #ifdef PROFILING
831 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER) {
832 retainer_blocks = retainerStackBlocks();
833 }
834 #endif
835
836 // count the blocks allocated by the arena allocator
837 arena_blocks = arenaBlocks();
838
839 // count the blocks containing executable memory
840 exec_blocks = countAllocdBlocks(exec_block);
841
842 /* count the blocks on the free list */
843 free_blocks = countFreeList();
844
845 live_blocks = 0;
846 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
847 live_blocks += gen_blocks[g];
848 }
849 live_blocks += nursery_blocks +
850 + retainer_blocks + arena_blocks + exec_blocks;
851
852 #define MB(n) (((n) * BLOCK_SIZE_W) / ((1024*1024)/sizeof(W_)))
853
854 leak = live_blocks + free_blocks != mblocks_allocated * BLOCKS_PER_MBLOCK;
855
856 if (show || leak)
857 {
858 if (leak) {
859 debugBelch("Memory leak detected:\n");
860 } else {
861 debugBelch("Memory inventory:\n");
862 }
863 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
864 debugBelch(" gen %d blocks : %5lu blocks (%lu MB)\n", g,
865 gen_blocks[g], MB(gen_blocks[g]));
866 }
867 debugBelch(" nursery : %5lu blocks (%lu MB)\n",
868 nursery_blocks, MB(nursery_blocks));
869 debugBelch(" retainer : %5lu blocks (%lu MB)\n",
870 retainer_blocks, MB(retainer_blocks));
871 debugBelch(" arena blocks : %5lu blocks (%lu MB)\n",
872 arena_blocks, MB(arena_blocks));
873 debugBelch(" exec : %5lu blocks (%lu MB)\n",
874 exec_blocks, MB(exec_blocks));
875 debugBelch(" free : %5lu blocks (%lu MB)\n",
876 free_blocks, MB(free_blocks));
877 debugBelch(" total : %5lu blocks (%lu MB)\n",
878 live_blocks + free_blocks, MB(live_blocks+free_blocks));
879 if (leak) {
880 debugBelch("\n in system : %5lu blocks (%lu MB)\n",
881 mblocks_allocated * BLOCKS_PER_MBLOCK, mblocks_allocated);
882 }
883 }
884
885 if (leak) {
886 debugBelch("\n");
887 findMemoryLeak();
888 }
889 ASSERT(n_alloc_blocks == live_blocks);
890 ASSERT(!leak);
891 }
892
893
894 #endif /* DEBUG */