65a70fa05c9bc3259ab27cd55e2adcb4e9884053
[ghc.git] / rts / sm / Sanity.c
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 1998-2006
4 *
5 * Sanity checking code for the heap and stack.
6 *
7 * Used when debugging: check that everything reasonable.
8 *
9 * - All things that are supposed to be pointers look like pointers.
10 *
11 * - Objects in text space are marked as static closures, those
12 * in the heap are dynamic.
13 *
14 * ---------------------------------------------------------------------------*/
15
16 #include "PosixSource.h"
17 #include "Rts.h"
18
19 #ifdef DEBUG /* whole file */
20
21 #include "RtsUtils.h"
22 #include "sm/Storage.h"
23 #include "sm/BlockAlloc.h"
24 #include "Sanity.h"
25 #include "Schedule.h"
26 #include "Apply.h"
27 #include "Printer.h"
28 #include "Arena.h"
29 #include "RetainerProfile.h"
30
31 /* -----------------------------------------------------------------------------
32 Forward decls.
33 -------------------------------------------------------------------------- */
34
35 static void checkSmallBitmap ( StgPtr payload, StgWord bitmap, nat );
36 static void checkLargeBitmap ( StgPtr payload, StgLargeBitmap*, nat );
37 static void checkClosureShallow ( StgClosure * );
38 static void checkSTACK (StgStack *stack);
39
40 /* -----------------------------------------------------------------------------
41 Check stack sanity
42 -------------------------------------------------------------------------- */
43
44 static void
45 checkSmallBitmap( StgPtr payload, StgWord bitmap, nat size )
46 {
47 StgPtr p;
48 nat i;
49
50 p = payload;
51 for(i = 0; i < size; i++, bitmap >>= 1 ) {
52 if ((bitmap & 1) == 0) {
53 checkClosureShallow((StgClosure *)payload[i]);
54 }
55 }
56 }
57
58 static void
59 checkLargeBitmap( StgPtr payload, StgLargeBitmap* large_bitmap, nat size )
60 {
61 StgWord bmp;
62 nat i, j;
63
64 i = 0;
65 for (bmp=0; i < size; bmp++) {
66 StgWord bitmap = large_bitmap->bitmap[bmp];
67 j = 0;
68 for(; i < size && j < BITS_IN(W_); j++, i++, bitmap >>= 1 ) {
69 if ((bitmap & 1) == 0) {
70 checkClosureShallow((StgClosure *)payload[i]);
71 }
72 }
73 }
74 }
75
76 /*
77 * check that it looks like a valid closure - without checking its payload
78 * used to avoid recursion between checking PAPs and checking stack
79 * chunks.
80 */
81
82 static void
83 checkClosureShallow( StgClosure* p )
84 {
85 StgClosure *q;
86
87 q = UNTAG_CLOSURE(p);
88 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q));
89
90 /* Is it a static closure? */
91 if (!HEAP_ALLOCED(q)) {
92 ASSERT(closure_STATIC(q));
93 } else {
94 ASSERT(!closure_STATIC(q));
95 }
96 }
97
98 // check an individual stack object
99 StgOffset
100 checkStackFrame( StgPtr c )
101 {
102 nat size;
103 const StgRetInfoTable* info;
104
105 info = get_ret_itbl((StgClosure *)c);
106
107 /* All activation records have 'bitmap' style layout info. */
108 switch (info->i.type) {
109 case RET_DYN: /* Dynamic bitmap: the mask is stored on the stack */
110 {
111 StgWord dyn;
112 StgPtr p;
113 StgRetDyn* r;
114
115 r = (StgRetDyn *)c;
116 dyn = r->liveness;
117
118 p = (P_)(r->payload);
119 checkSmallBitmap(p,RET_DYN_LIVENESS(r->liveness),RET_DYN_BITMAP_SIZE);
120 p += RET_DYN_BITMAP_SIZE + RET_DYN_NONPTR_REGS_SIZE;
121
122 // skip over the non-pointers
123 p += RET_DYN_NONPTRS(dyn);
124
125 // follow the ptr words
126 for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
127 checkClosureShallow((StgClosure *)*p);
128 p++;
129 }
130
131 return sizeofW(StgRetDyn) + RET_DYN_BITMAP_SIZE +
132 RET_DYN_NONPTR_REGS_SIZE +
133 RET_DYN_NONPTRS(dyn) + RET_DYN_PTRS(dyn);
134 }
135
136 case UPDATE_FRAME:
137 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgUpdateFrame*)c)->updatee));
138 case ATOMICALLY_FRAME:
139 case CATCH_RETRY_FRAME:
140 case CATCH_STM_FRAME:
141 case CATCH_FRAME:
142 // small bitmap cases (<= 32 entries)
143 case UNDERFLOW_FRAME:
144 case STOP_FRAME:
145 case RET_SMALL:
146 size = BITMAP_SIZE(info->i.layout.bitmap);
147 checkSmallBitmap((StgPtr)c + 1,
148 BITMAP_BITS(info->i.layout.bitmap), size);
149 return 1 + size;
150
151 case RET_BCO: {
152 StgBCO *bco;
153 nat size;
154 bco = (StgBCO *)*(c+1);
155 size = BCO_BITMAP_SIZE(bco);
156 checkLargeBitmap((StgPtr)c + 2, BCO_BITMAP(bco), size);
157 return 2 + size;
158 }
159
160 case RET_BIG: // large bitmap (> 32 entries)
161 size = GET_LARGE_BITMAP(&info->i)->size;
162 checkLargeBitmap((StgPtr)c + 1, GET_LARGE_BITMAP(&info->i), size);
163 return 1 + size;
164
165 case RET_FUN:
166 {
167 StgFunInfoTable *fun_info;
168 StgRetFun *ret_fun;
169
170 ret_fun = (StgRetFun *)c;
171 fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun));
172 size = ret_fun->size;
173 switch (fun_info->f.fun_type) {
174 case ARG_GEN:
175 checkSmallBitmap((StgPtr)ret_fun->payload,
176 BITMAP_BITS(fun_info->f.b.bitmap), size);
177 break;
178 case ARG_GEN_BIG:
179 checkLargeBitmap((StgPtr)ret_fun->payload,
180 GET_FUN_LARGE_BITMAP(fun_info), size);
181 break;
182 default:
183 checkSmallBitmap((StgPtr)ret_fun->payload,
184 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
185 size);
186 break;
187 }
188 return sizeofW(StgRetFun) + size;
189 }
190
191 default:
192 barf("checkStackFrame: weird activation record found on stack (%p %d).",c,info->i.type);
193 }
194 }
195
196 // check sections of stack between update frames
197 void
198 checkStackChunk( StgPtr sp, StgPtr stack_end )
199 {
200 StgPtr p;
201
202 p = sp;
203 while (p < stack_end) {
204 p += checkStackFrame( p );
205 }
206 // ASSERT( p == stack_end ); -- HWL
207 }
208
209 static void
210 checkPAP (StgClosure *tagged_fun, StgClosure** payload, StgWord n_args)
211 {
212 StgClosure *fun;
213 StgClosure *p;
214 StgFunInfoTable *fun_info;
215
216 fun = UNTAG_CLOSURE(tagged_fun);
217 ASSERT(LOOKS_LIKE_CLOSURE_PTR(fun));
218 fun_info = get_fun_itbl(fun);
219
220 p = (StgClosure *)payload;
221 switch (fun_info->f.fun_type) {
222 case ARG_GEN:
223 checkSmallBitmap( (StgPtr)payload,
224 BITMAP_BITS(fun_info->f.b.bitmap), n_args );
225 break;
226 case ARG_GEN_BIG:
227 checkLargeBitmap( (StgPtr)payload,
228 GET_FUN_LARGE_BITMAP(fun_info),
229 n_args );
230 break;
231 case ARG_BCO:
232 checkLargeBitmap( (StgPtr)payload,
233 BCO_BITMAP(fun),
234 n_args );
235 break;
236 default:
237 checkSmallBitmap( (StgPtr)payload,
238 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
239 n_args );
240 break;
241 }
242
243 ASSERT(fun_info->f.arity > TAG_MASK ? GET_CLOSURE_TAG(tagged_fun) == 0
244 : GET_CLOSURE_TAG(tagged_fun) == fun_info->f.arity);
245 }
246
247
248 StgOffset
249 checkClosure( StgClosure* p )
250 {
251 const StgInfoTable *info;
252
253 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
254
255 p = UNTAG_CLOSURE(p);
256 /* Is it a static closure (i.e. in the data segment)? */
257 if (!HEAP_ALLOCED(p)) {
258 ASSERT(closure_STATIC(p));
259 } else {
260 ASSERT(!closure_STATIC(p));
261 }
262
263 info = p->header.info;
264
265 if (IS_FORWARDING_PTR(info)) {
266 barf("checkClosure: found EVACUATED closure %d", info->type);
267 }
268 info = INFO_PTR_TO_STRUCT(info);
269
270 switch (info->type) {
271
272 case MVAR_CLEAN:
273 case MVAR_DIRTY:
274 {
275 StgMVar *mvar = (StgMVar *)p;
276 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->head));
277 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->tail));
278 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->value));
279 return sizeofW(StgMVar);
280 }
281
282 case THUNK:
283 case THUNK_1_0:
284 case THUNK_0_1:
285 case THUNK_1_1:
286 case THUNK_0_2:
287 case THUNK_2_0:
288 {
289 nat i;
290 for (i = 0; i < info->layout.payload.ptrs; i++) {
291 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgThunk *)p)->payload[i]));
292 }
293 return thunk_sizeW_fromITBL(info);
294 }
295
296 case FUN:
297 case FUN_1_0:
298 case FUN_0_1:
299 case FUN_1_1:
300 case FUN_0_2:
301 case FUN_2_0:
302 case CONSTR:
303 case CONSTR_1_0:
304 case CONSTR_0_1:
305 case CONSTR_1_1:
306 case CONSTR_0_2:
307 case CONSTR_2_0:
308 case IND_PERM:
309 case BLACKHOLE:
310 case PRIM:
311 case MUT_PRIM:
312 case MUT_VAR_CLEAN:
313 case MUT_VAR_DIRTY:
314 case CONSTR_STATIC:
315 case CONSTR_NOCAF_STATIC:
316 case THUNK_STATIC:
317 case FUN_STATIC:
318 {
319 nat i;
320 for (i = 0; i < info->layout.payload.ptrs; i++) {
321 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p->payload[i]));
322 }
323 return sizeW_fromITBL(info);
324 }
325
326 case BLOCKING_QUEUE:
327 {
328 StgBlockingQueue *bq = (StgBlockingQueue *)p;
329
330 // NO: the BH might have been updated now
331 // ASSERT(get_itbl(bq->bh)->type == BLACKHOLE);
332 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bq->bh));
333
334 ASSERT(get_itbl(bq->owner)->type == TSO);
335 ASSERT(bq->queue == (MessageBlackHole*)END_TSO_QUEUE
336 || bq->queue->header.info == &stg_MSG_BLACKHOLE_info);
337 ASSERT(bq->link == (StgBlockingQueue*)END_TSO_QUEUE ||
338 get_itbl(bq->link)->type == IND ||
339 get_itbl(bq->link)->type == BLOCKING_QUEUE);
340
341 return sizeofW(StgBlockingQueue);
342 }
343
344 case BCO: {
345 StgBCO *bco = (StgBCO *)p;
346 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->instrs));
347 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->literals));
348 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->ptrs));
349 return bco_sizeW(bco);
350 }
351
352 case IND_STATIC: /* (1, 0) closure */
353 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgIndStatic*)p)->indirectee));
354 return sizeW_fromITBL(info);
355
356 case WEAK:
357 /* deal with these specially - the info table isn't
358 * representative of the actual layout.
359 */
360 { StgWeak *w = (StgWeak *)p;
361 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->key));
362 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->value));
363 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->finalizer));
364 if (w->link) {
365 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->link));
366 }
367 return sizeW_fromITBL(info);
368 }
369
370 case THUNK_SELECTOR:
371 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgSelector *)p)->selectee));
372 return THUNK_SELECTOR_sizeW();
373
374 case IND:
375 {
376 /* we don't expect to see any of these after GC
377 * but they might appear during execution
378 */
379 StgInd *ind = (StgInd *)p;
380 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ind->indirectee));
381 return sizeofW(StgInd);
382 }
383
384 case RET_BCO:
385 case RET_SMALL:
386 case RET_BIG:
387 case RET_DYN:
388 case UPDATE_FRAME:
389 case UNDERFLOW_FRAME:
390 case STOP_FRAME:
391 case CATCH_FRAME:
392 case ATOMICALLY_FRAME:
393 case CATCH_RETRY_FRAME:
394 case CATCH_STM_FRAME:
395 barf("checkClosure: stack frame");
396
397 case AP:
398 {
399 StgAP* ap = (StgAP *)p;
400 checkPAP (ap->fun, ap->payload, ap->n_args);
401 return ap_sizeW(ap);
402 }
403
404 case PAP:
405 {
406 StgPAP* pap = (StgPAP *)p;
407 checkPAP (pap->fun, pap->payload, pap->n_args);
408 return pap_sizeW(pap);
409 }
410
411 case AP_STACK:
412 {
413 StgAP_STACK *ap = (StgAP_STACK *)p;
414 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ap->fun));
415 checkStackChunk((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
416 return ap_stack_sizeW(ap);
417 }
418
419 case ARR_WORDS:
420 return arr_words_sizeW((StgArrWords *)p);
421
422 case MUT_ARR_PTRS_CLEAN:
423 case MUT_ARR_PTRS_DIRTY:
424 case MUT_ARR_PTRS_FROZEN:
425 case MUT_ARR_PTRS_FROZEN0:
426 {
427 StgMutArrPtrs* a = (StgMutArrPtrs *)p;
428 nat i;
429 for (i = 0; i < a->ptrs; i++) {
430 ASSERT(LOOKS_LIKE_CLOSURE_PTR(a->payload[i]));
431 }
432 return mut_arr_ptrs_sizeW(a);
433 }
434
435 case TSO:
436 checkTSO((StgTSO *)p);
437 return sizeofW(StgTSO);
438
439 case STACK:
440 checkSTACK((StgStack*)p);
441 return stack_sizeW((StgStack*)p);
442
443 case TREC_CHUNK:
444 {
445 nat i;
446 StgTRecChunk *tc = (StgTRecChunk *)p;
447 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->prev_chunk));
448 for (i = 0; i < tc -> next_entry_idx; i ++) {
449 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].tvar));
450 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].expected_value));
451 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].new_value));
452 }
453 return sizeofW(StgTRecChunk);
454 }
455
456 default:
457 barf("checkClosure (closure type %d)", info->type);
458 }
459 }
460
461
462 /* -----------------------------------------------------------------------------
463 Check Heap Sanity
464
465 After garbage collection, the live heap is in a state where we can
466 run through and check that all the pointers point to the right
467 place. This function starts at a given position and sanity-checks
468 all the objects in the remainder of the chain.
469 -------------------------------------------------------------------------- */
470
471 void
472 checkHeap(bdescr *bd)
473 {
474 StgPtr p;
475
476 #if defined(THREADED_RTS)
477 // heap sanity checking doesn't work with SMP, because we can't
478 // zero the slop (see Updates.h).
479 return;
480 #endif
481
482 for (; bd != NULL; bd = bd->link) {
483 if(!(bd->flags & BF_SWEPT)) {
484 p = bd->start;
485 while (p < bd->free) {
486 nat size = checkClosure((StgClosure *)p);
487 /* This is the smallest size of closure that can live in the heap */
488 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
489 p += size;
490
491 /* skip over slop */
492 while (p < bd->free &&
493 (*p < 0x1000 || !LOOKS_LIKE_INFO_PTR(*p))) { p++; }
494 }
495 }
496 }
497 }
498
499 void
500 checkHeapChunk(StgPtr start, StgPtr end)
501 {
502 StgPtr p;
503 nat size;
504
505 for (p=start; p<end; p+=size) {
506 ASSERT(LOOKS_LIKE_INFO_PTR(*p));
507 size = checkClosure((StgClosure *)p);
508 /* This is the smallest size of closure that can live in the heap. */
509 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
510 }
511 }
512
513 void
514 checkLargeObjects(bdescr *bd)
515 {
516 while (bd != NULL) {
517 if (!(bd->flags & BF_PINNED)) {
518 checkClosure((StgClosure *)bd->start);
519 }
520 bd = bd->link;
521 }
522 }
523
524 static void
525 checkSTACK (StgStack *stack)
526 {
527 StgPtr sp = stack->sp;
528 StgOffset stack_size = stack->stack_size;
529 StgPtr stack_end = stack->stack + stack_size;
530
531 ASSERT(stack->stack <= sp && sp <= stack_end);
532
533 checkStackChunk(sp, stack_end);
534 }
535
536 void
537 checkTSO(StgTSO *tso)
538 {
539 if (tso->what_next == ThreadKilled) {
540 /* The garbage collector doesn't bother following any pointers
541 * from dead threads, so don't check sanity here.
542 */
543 return;
544 }
545
546 ASSERT(tso->_link == END_TSO_QUEUE ||
547 tso->_link->header.info == &stg_MVAR_TSO_QUEUE_info ||
548 tso->_link->header.info == &stg_TSO_info);
549
550 if ( tso->why_blocked == BlockedOnMVar
551 || tso->why_blocked == BlockedOnBlackHole
552 || tso->why_blocked == BlockedOnMsgThrowTo
553 || tso->why_blocked == NotBlocked
554 ) {
555 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->block_info.closure));
556 }
557
558 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->bq));
559 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->blocked_exceptions));
560 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->stackobj));
561
562 // XXX are we checking the stack twice?
563 checkSTACK(tso->stackobj);
564 }
565
566 /*
567 Check that all TSOs have been evacuated.
568 Optionally also check the sanity of the TSOs.
569 */
570 void
571 checkGlobalTSOList (rtsBool checkTSOs)
572 {
573 StgTSO *tso;
574 nat g;
575
576 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
577 for (tso=generations[g].threads; tso != END_TSO_QUEUE;
578 tso = tso->global_link) {
579 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso));
580 ASSERT(get_itbl(tso)->type == TSO);
581 if (checkTSOs)
582 checkTSO(tso);
583
584 // If this TSO is dirty and in an old generation, it better
585 // be on the mutable list.
586 if (tso->dirty) {
587 ASSERT(Bdescr((P_)tso)->gen_no == 0 || (tso->flags & TSO_MARKED));
588 tso->flags &= ~TSO_MARKED;
589 }
590 }
591 }
592 }
593
594 /* -----------------------------------------------------------------------------
595 Check mutable list sanity.
596 -------------------------------------------------------------------------- */
597
598 void
599 checkMutableList( bdescr *mut_bd, nat gen )
600 {
601 bdescr *bd;
602 StgPtr q;
603 StgClosure *p;
604
605 for (bd = mut_bd; bd != NULL; bd = bd->link) {
606 for (q = bd->start; q < bd->free; q++) {
607 p = (StgClosure *)*q;
608 ASSERT(!HEAP_ALLOCED(p) || Bdescr((P_)p)->gen_no == gen);
609 if (get_itbl(p)->type == TSO) {
610 ((StgTSO *)p)->flags |= TSO_MARKED;
611 }
612 }
613 }
614 }
615
616 void
617 checkMutableLists (rtsBool checkTSOs)
618 {
619 nat g, i;
620
621 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
622 for (i = 0; i < n_capabilities; i++) {
623 checkMutableList(capabilities[i].mut_lists[g], g);
624 }
625 }
626 checkGlobalTSOList(checkTSOs);
627 }
628
629 /*
630 Check the static objects list.
631 */
632 void
633 checkStaticObjects ( StgClosure* static_objects )
634 {
635 StgClosure *p = static_objects;
636 StgInfoTable *info;
637
638 while (p != END_OF_STATIC_LIST) {
639 checkClosure(p);
640 info = get_itbl(p);
641 switch (info->type) {
642 case IND_STATIC:
643 {
644 StgClosure *indirectee = UNTAG_CLOSURE(((StgIndStatic *)p)->indirectee);
645
646 ASSERT(LOOKS_LIKE_CLOSURE_PTR(indirectee));
647 ASSERT(LOOKS_LIKE_INFO_PTR((StgWord)indirectee->header.info));
648 p = *IND_STATIC_LINK((StgClosure *)p);
649 break;
650 }
651
652 case THUNK_STATIC:
653 p = *THUNK_STATIC_LINK((StgClosure *)p);
654 break;
655
656 case FUN_STATIC:
657 p = *FUN_STATIC_LINK((StgClosure *)p);
658 break;
659
660 case CONSTR_STATIC:
661 p = *STATIC_LINK(info,(StgClosure *)p);
662 break;
663
664 default:
665 barf("checkStaticObjetcs: strange closure %p (%s)",
666 p, info_type(p));
667 }
668 }
669 }
670
671 /* Nursery sanity check */
672 void
673 checkNurserySanity (nursery *nursery)
674 {
675 bdescr *bd, *prev;
676 nat blocks = 0;
677
678 prev = NULL;
679 for (bd = nursery->blocks; bd != NULL; bd = bd->link) {
680 ASSERT(bd->u.back == prev);
681 prev = bd;
682 blocks += bd->blocks;
683 }
684
685 ASSERT(blocks == nursery->n_blocks);
686 }
687
688
689 /* Full heap sanity check. */
690 void
691 checkSanity( rtsBool check_heap )
692 {
693 nat g, n;
694
695 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
696 ASSERT(countBlocks(generations[g].blocks)
697 == generations[g].n_blocks);
698 ASSERT(countBlocks(generations[g].large_objects)
699 == generations[g].n_large_blocks);
700 if (check_heap) {
701 checkHeap(generations[g].blocks);
702 }
703 checkLargeObjects(generations[g].large_objects);
704 }
705
706 for (n = 0; n < n_capabilities; n++) {
707 checkNurserySanity(&nurseries[n]);
708 }
709
710 checkFreeListSanity();
711
712 #if defined(THREADED_RTS)
713 // always check the stacks in threaded mode, because checkHeap()
714 // does nothing in this case.
715 checkMutableLists(rtsTrue);
716 #else
717 if (check_heap) {
718 checkMutableLists(rtsFalse);
719 } else {
720 checkMutableLists(rtsTrue);
721 }
722 #endif
723 }
724
725 // If memInventory() calculates that we have a memory leak, this
726 // function will try to find the block(s) that are leaking by marking
727 // all the ones that we know about, and search through memory to find
728 // blocks that are not marked. In the debugger this can help to give
729 // us a clue about what kind of block leaked. In the future we might
730 // annotate blocks with their allocation site to give more helpful
731 // info.
732 static void
733 findMemoryLeak (void)
734 {
735 nat g, i;
736 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
737 for (i = 0; i < n_capabilities; i++) {
738 markBlocks(capabilities[i].mut_lists[g]);
739 }
740 markBlocks(generations[g].blocks);
741 markBlocks(generations[g].large_objects);
742 }
743
744 for (i = 0; i < n_capabilities; i++) {
745 markBlocks(nurseries[i].blocks);
746 }
747
748 #ifdef PROFILING
749 // TODO:
750 // if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER) {
751 // markRetainerBlocks();
752 // }
753 #endif
754
755 // count the blocks allocated by the arena allocator
756 // TODO:
757 // markArenaBlocks();
758
759 // count the blocks containing executable memory
760 markBlocks(exec_block);
761
762 reportUnmarkedBlocks();
763 }
764
765 void
766 checkRunQueue(Capability *cap)
767 {
768 StgTSO *prev, *tso;
769 prev = END_TSO_QUEUE;
770 for (tso = cap->run_queue_hd; tso != END_TSO_QUEUE;
771 prev = tso, tso = tso->_link) {
772 ASSERT(prev == END_TSO_QUEUE || prev->_link == tso);
773 ASSERT(tso->block_info.prev == prev);
774 }
775 ASSERT(cap->run_queue_tl == prev);
776 }
777
778 /* -----------------------------------------------------------------------------
779 Memory leak detection
780
781 memInventory() checks for memory leaks by counting up all the
782 blocks we know about and comparing that to the number of blocks
783 allegedly floating around in the system.
784 -------------------------------------------------------------------------- */
785
786 // Useful for finding partially full blocks in gdb
787 void findSlop(bdescr *bd);
788 void findSlop(bdescr *bd)
789 {
790 lnat slop;
791
792 for (; bd != NULL; bd = bd->link) {
793 slop = (bd->blocks * BLOCK_SIZE_W) - (bd->free - bd->start);
794 if (slop > (1024/sizeof(W_))) {
795 debugBelch("block at %p (bdescr %p) has %ldKB slop\n",
796 bd->start, bd, slop / (1024/sizeof(W_)));
797 }
798 }
799 }
800
801 static lnat
802 genBlocks (generation *gen)
803 {
804 ASSERT(countBlocks(gen->blocks) == gen->n_blocks);
805 ASSERT(countBlocks(gen->large_objects) == gen->n_large_blocks);
806 return gen->n_blocks + gen->n_old_blocks +
807 countAllocdBlocks(gen->large_objects);
808 }
809
810 void
811 memInventory (rtsBool show)
812 {
813 nat g, i;
814 lnat gen_blocks[RtsFlags.GcFlags.generations];
815 lnat nursery_blocks, retainer_blocks,
816 arena_blocks, exec_blocks;
817 lnat live_blocks = 0, free_blocks = 0;
818 rtsBool leak;
819
820 // count the blocks we current have
821
822 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
823 gen_blocks[g] = 0;
824 for (i = 0; i < n_capabilities; i++) {
825 gen_blocks[g] += countBlocks(capabilities[i].mut_lists[g]);
826 }
827 gen_blocks[g] += genBlocks(&generations[g]);
828 }
829
830 nursery_blocks = 0;
831 for (i = 0; i < n_capabilities; i++) {
832 ASSERT(countBlocks(nurseries[i].blocks) == nurseries[i].n_blocks);
833 nursery_blocks += nurseries[i].n_blocks;
834 }
835
836 retainer_blocks = 0;
837 #ifdef PROFILING
838 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER) {
839 retainer_blocks = retainerStackBlocks();
840 }
841 #endif
842
843 // count the blocks allocated by the arena allocator
844 arena_blocks = arenaBlocks();
845
846 // count the blocks containing executable memory
847 exec_blocks = countAllocdBlocks(exec_block);
848
849 /* count the blocks on the free list */
850 free_blocks = countFreeList();
851
852 live_blocks = 0;
853 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
854 live_blocks += gen_blocks[g];
855 }
856 live_blocks += nursery_blocks +
857 + retainer_blocks + arena_blocks + exec_blocks;
858
859 #define MB(n) (((n) * BLOCK_SIZE_W) / ((1024*1024)/sizeof(W_)))
860
861 leak = live_blocks + free_blocks != mblocks_allocated * BLOCKS_PER_MBLOCK;
862
863 if (show || leak)
864 {
865 if (leak) {
866 debugBelch("Memory leak detected:\n");
867 } else {
868 debugBelch("Memory inventory:\n");
869 }
870 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
871 debugBelch(" gen %d blocks : %5lu blocks (%lu MB)\n", g,
872 gen_blocks[g], MB(gen_blocks[g]));
873 }
874 debugBelch(" nursery : %5lu blocks (%lu MB)\n",
875 nursery_blocks, MB(nursery_blocks));
876 debugBelch(" retainer : %5lu blocks (%lu MB)\n",
877 retainer_blocks, MB(retainer_blocks));
878 debugBelch(" arena blocks : %5lu blocks (%lu MB)\n",
879 arena_blocks, MB(arena_blocks));
880 debugBelch(" exec : %5lu blocks (%lu MB)\n",
881 exec_blocks, MB(exec_blocks));
882 debugBelch(" free : %5lu blocks (%lu MB)\n",
883 free_blocks, MB(free_blocks));
884 debugBelch(" total : %5lu blocks (%lu MB)\n",
885 live_blocks + free_blocks, MB(live_blocks+free_blocks));
886 if (leak) {
887 debugBelch("\n in system : %5lu blocks (%lu MB)\n",
888 mblocks_allocated * BLOCKS_PER_MBLOCK, mblocks_allocated);
889 }
890 }
891
892 if (leak) {
893 debugBelch("\n");
894 findMemoryLeak();
895 }
896 ASSERT(n_alloc_blocks == live_blocks);
897 ASSERT(!leak);
898 }
899
900
901 #endif /* DEBUG */