Fix the RTS debug_p build
[ghc.git] / rts / sm / Sanity.c
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 1998-2006
4 *
5 * Sanity checking code for the heap and stack.
6 *
7 * Used when debugging: check that everything reasonable.
8 *
9 * - All things that are supposed to be pointers look like pointers.
10 *
11 * - Objects in text space are marked as static closures, those
12 * in the heap are dynamic.
13 *
14 * ---------------------------------------------------------------------------*/
15
16 #include "PosixSource.h"
17 #include "Rts.h"
18
19 #ifdef DEBUG /* whole file */
20
21 #include "RtsUtils.h"
22 #include "sm/Storage.h"
23 #include "sm/BlockAlloc.h"
24 #include "Sanity.h"
25 #include "Schedule.h"
26 #include "Apply.h"
27 #include "Printer.h"
28 #include "Arena.h"
29 #include "RetainerProfile.h"
30
31 /* -----------------------------------------------------------------------------
32 Forward decls.
33 -------------------------------------------------------------------------- */
34
35 static void checkSmallBitmap ( StgPtr payload, StgWord bitmap, nat );
36 static void checkLargeBitmap ( StgPtr payload, StgLargeBitmap*, nat );
37 static void checkClosureShallow ( StgClosure * );
38
39 /* -----------------------------------------------------------------------------
40 Check stack sanity
41 -------------------------------------------------------------------------- */
42
43 static void
44 checkSmallBitmap( StgPtr payload, StgWord bitmap, nat size )
45 {
46 StgPtr p;
47 nat i;
48
49 p = payload;
50 for(i = 0; i < size; i++, bitmap >>= 1 ) {
51 if ((bitmap & 1) == 0) {
52 checkClosureShallow((StgClosure *)payload[i]);
53 }
54 }
55 }
56
57 static void
58 checkLargeBitmap( StgPtr payload, StgLargeBitmap* large_bitmap, nat size )
59 {
60 StgWord bmp;
61 nat i, j;
62
63 i = 0;
64 for (bmp=0; i < size; bmp++) {
65 StgWord bitmap = large_bitmap->bitmap[bmp];
66 j = 0;
67 for(; i < size && j < BITS_IN(W_); j++, i++, bitmap >>= 1 ) {
68 if ((bitmap & 1) == 0) {
69 checkClosureShallow((StgClosure *)payload[i]);
70 }
71 }
72 }
73 }
74
75 /*
76 * check that it looks like a valid closure - without checking its payload
77 * used to avoid recursion between checking PAPs and checking stack
78 * chunks.
79 */
80
81 static void
82 checkClosureShallow( StgClosure* p )
83 {
84 StgClosure *q;
85
86 q = UNTAG_CLOSURE(p);
87 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q));
88
89 /* Is it a static closure? */
90 if (!HEAP_ALLOCED(q)) {
91 ASSERT(closure_STATIC(q));
92 } else {
93 ASSERT(!closure_STATIC(q));
94 }
95 }
96
97 // check an individual stack object
98 StgOffset
99 checkStackFrame( StgPtr c )
100 {
101 nat size;
102 const StgRetInfoTable* info;
103
104 info = get_ret_itbl((StgClosure *)c);
105
106 /* All activation records have 'bitmap' style layout info. */
107 switch (info->i.type) {
108 case RET_DYN: /* Dynamic bitmap: the mask is stored on the stack */
109 {
110 StgWord dyn;
111 StgPtr p;
112 StgRetDyn* r;
113
114 r = (StgRetDyn *)c;
115 dyn = r->liveness;
116
117 p = (P_)(r->payload);
118 checkSmallBitmap(p,RET_DYN_LIVENESS(r->liveness),RET_DYN_BITMAP_SIZE);
119 p += RET_DYN_BITMAP_SIZE + RET_DYN_NONPTR_REGS_SIZE;
120
121 // skip over the non-pointers
122 p += RET_DYN_NONPTRS(dyn);
123
124 // follow the ptr words
125 for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
126 checkClosureShallow((StgClosure *)*p);
127 p++;
128 }
129
130 return sizeofW(StgRetDyn) + RET_DYN_BITMAP_SIZE +
131 RET_DYN_NONPTR_REGS_SIZE +
132 RET_DYN_NONPTRS(dyn) + RET_DYN_PTRS(dyn);
133 }
134
135 case UPDATE_FRAME:
136 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgUpdateFrame*)c)->updatee));
137 case ATOMICALLY_FRAME:
138 case CATCH_RETRY_FRAME:
139 case CATCH_STM_FRAME:
140 case CATCH_FRAME:
141 // small bitmap cases (<= 32 entries)
142 case STOP_FRAME:
143 case RET_SMALL:
144 size = BITMAP_SIZE(info->i.layout.bitmap);
145 checkSmallBitmap((StgPtr)c + 1,
146 BITMAP_BITS(info->i.layout.bitmap), size);
147 return 1 + size;
148
149 case RET_BCO: {
150 StgBCO *bco;
151 nat size;
152 bco = (StgBCO *)*(c+1);
153 size = BCO_BITMAP_SIZE(bco);
154 checkLargeBitmap((StgPtr)c + 2, BCO_BITMAP(bco), size);
155 return 2 + size;
156 }
157
158 case RET_BIG: // large bitmap (> 32 entries)
159 size = GET_LARGE_BITMAP(&info->i)->size;
160 checkLargeBitmap((StgPtr)c + 1, GET_LARGE_BITMAP(&info->i), size);
161 return 1 + size;
162
163 case RET_FUN:
164 {
165 StgFunInfoTable *fun_info;
166 StgRetFun *ret_fun;
167
168 ret_fun = (StgRetFun *)c;
169 fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun));
170 size = ret_fun->size;
171 switch (fun_info->f.fun_type) {
172 case ARG_GEN:
173 checkSmallBitmap((StgPtr)ret_fun->payload,
174 BITMAP_BITS(fun_info->f.b.bitmap), size);
175 break;
176 case ARG_GEN_BIG:
177 checkLargeBitmap((StgPtr)ret_fun->payload,
178 GET_FUN_LARGE_BITMAP(fun_info), size);
179 break;
180 default:
181 checkSmallBitmap((StgPtr)ret_fun->payload,
182 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
183 size);
184 break;
185 }
186 return sizeofW(StgRetFun) + size;
187 }
188
189 default:
190 barf("checkStackFrame: weird activation record found on stack (%p %d).",c,info->i.type);
191 }
192 }
193
194 // check sections of stack between update frames
195 void
196 checkStackChunk( StgPtr sp, StgPtr stack_end )
197 {
198 StgPtr p;
199
200 p = sp;
201 while (p < stack_end) {
202 p += checkStackFrame( p );
203 }
204 // ASSERT( p == stack_end ); -- HWL
205 }
206
207 static void
208 checkPAP (StgClosure *tagged_fun, StgClosure** payload, StgWord n_args)
209 {
210 StgClosure *fun;
211 StgClosure *p;
212 StgFunInfoTable *fun_info;
213
214 fun = UNTAG_CLOSURE(tagged_fun);
215 ASSERT(LOOKS_LIKE_CLOSURE_PTR(fun));
216 fun_info = get_fun_itbl(fun);
217
218 p = (StgClosure *)payload;
219 switch (fun_info->f.fun_type) {
220 case ARG_GEN:
221 checkSmallBitmap( (StgPtr)payload,
222 BITMAP_BITS(fun_info->f.b.bitmap), n_args );
223 break;
224 case ARG_GEN_BIG:
225 checkLargeBitmap( (StgPtr)payload,
226 GET_FUN_LARGE_BITMAP(fun_info),
227 n_args );
228 break;
229 case ARG_BCO:
230 checkLargeBitmap( (StgPtr)payload,
231 BCO_BITMAP(fun),
232 n_args );
233 break;
234 default:
235 checkSmallBitmap( (StgPtr)payload,
236 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
237 n_args );
238 break;
239 }
240
241 ASSERT(fun_info->f.arity > TAG_MASK ? GET_CLOSURE_TAG(tagged_fun) == 0
242 : GET_CLOSURE_TAG(tagged_fun) == fun_info->f.arity);
243 }
244
245
246 StgOffset
247 checkClosure( StgClosure* p )
248 {
249 const StgInfoTable *info;
250
251 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
252
253 p = UNTAG_CLOSURE(p);
254 /* Is it a static closure (i.e. in the data segment)? */
255 if (!HEAP_ALLOCED(p)) {
256 ASSERT(closure_STATIC(p));
257 } else {
258 ASSERT(!closure_STATIC(p));
259 }
260
261 info = p->header.info;
262
263 if (IS_FORWARDING_PTR(info)) {
264 barf("checkClosure: found EVACUATED closure %d", info->type);
265 }
266 info = INFO_PTR_TO_STRUCT(info);
267
268 switch (info->type) {
269
270 case MVAR_CLEAN:
271 case MVAR_DIRTY:
272 {
273 StgMVar *mvar = (StgMVar *)p;
274 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->head));
275 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->tail));
276 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->value));
277 return sizeofW(StgMVar);
278 }
279
280 case THUNK:
281 case THUNK_1_0:
282 case THUNK_0_1:
283 case THUNK_1_1:
284 case THUNK_0_2:
285 case THUNK_2_0:
286 {
287 nat i;
288 for (i = 0; i < info->layout.payload.ptrs; i++) {
289 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgThunk *)p)->payload[i]));
290 }
291 return thunk_sizeW_fromITBL(info);
292 }
293
294 case FUN:
295 case FUN_1_0:
296 case FUN_0_1:
297 case FUN_1_1:
298 case FUN_0_2:
299 case FUN_2_0:
300 case CONSTR:
301 case CONSTR_1_0:
302 case CONSTR_0_1:
303 case CONSTR_1_1:
304 case CONSTR_0_2:
305 case CONSTR_2_0:
306 case IND_PERM:
307 case BLACKHOLE:
308 case PRIM:
309 case MUT_PRIM:
310 case MUT_VAR_CLEAN:
311 case MUT_VAR_DIRTY:
312 case CONSTR_STATIC:
313 case CONSTR_NOCAF_STATIC:
314 case THUNK_STATIC:
315 case FUN_STATIC:
316 {
317 nat i;
318 for (i = 0; i < info->layout.payload.ptrs; i++) {
319 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p->payload[i]));
320 }
321 return sizeW_fromITBL(info);
322 }
323
324 case BLOCKING_QUEUE:
325 {
326 StgBlockingQueue *bq = (StgBlockingQueue *)p;
327
328 // NO: the BH might have been updated now
329 // ASSERT(get_itbl(bq->bh)->type == BLACKHOLE);
330 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bq->bh));
331
332 ASSERT(get_itbl(bq->owner)->type == TSO);
333 ASSERT(bq->queue == (MessageBlackHole*)END_TSO_QUEUE
334 || get_itbl(bq->queue)->type == TSO);
335 ASSERT(bq->link == (StgBlockingQueue*)END_TSO_QUEUE ||
336 get_itbl(bq->link)->type == IND ||
337 get_itbl(bq->link)->type == BLOCKING_QUEUE);
338
339 return sizeofW(StgBlockingQueue);
340 }
341
342 case BCO: {
343 StgBCO *bco = (StgBCO *)p;
344 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->instrs));
345 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->literals));
346 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->ptrs));
347 return bco_sizeW(bco);
348 }
349
350 case IND_STATIC: /* (1, 0) closure */
351 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgIndStatic*)p)->indirectee));
352 return sizeW_fromITBL(info);
353
354 case WEAK:
355 /* deal with these specially - the info table isn't
356 * representative of the actual layout.
357 */
358 { StgWeak *w = (StgWeak *)p;
359 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->key));
360 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->value));
361 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->finalizer));
362 if (w->link) {
363 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->link));
364 }
365 return sizeW_fromITBL(info);
366 }
367
368 case THUNK_SELECTOR:
369 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgSelector *)p)->selectee));
370 return THUNK_SELECTOR_sizeW();
371
372 case IND:
373 {
374 /* we don't expect to see any of these after GC
375 * but they might appear during execution
376 */
377 StgInd *ind = (StgInd *)p;
378 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ind->indirectee));
379 return sizeofW(StgInd);
380 }
381
382 case RET_BCO:
383 case RET_SMALL:
384 case RET_BIG:
385 case RET_DYN:
386 case UPDATE_FRAME:
387 case STOP_FRAME:
388 case CATCH_FRAME:
389 case ATOMICALLY_FRAME:
390 case CATCH_RETRY_FRAME:
391 case CATCH_STM_FRAME:
392 barf("checkClosure: stack frame");
393
394 case AP:
395 {
396 StgAP* ap = (StgAP *)p;
397 checkPAP (ap->fun, ap->payload, ap->n_args);
398 return ap_sizeW(ap);
399 }
400
401 case PAP:
402 {
403 StgPAP* pap = (StgPAP *)p;
404 checkPAP (pap->fun, pap->payload, pap->n_args);
405 return pap_sizeW(pap);
406 }
407
408 case AP_STACK:
409 {
410 StgAP_STACK *ap = (StgAP_STACK *)p;
411 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ap->fun));
412 checkStackChunk((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
413 return ap_stack_sizeW(ap);
414 }
415
416 case ARR_WORDS:
417 return arr_words_sizeW((StgArrWords *)p);
418
419 case MUT_ARR_PTRS_CLEAN:
420 case MUT_ARR_PTRS_DIRTY:
421 case MUT_ARR_PTRS_FROZEN:
422 case MUT_ARR_PTRS_FROZEN0:
423 {
424 StgMutArrPtrs* a = (StgMutArrPtrs *)p;
425 nat i;
426 for (i = 0; i < a->ptrs; i++) {
427 ASSERT(LOOKS_LIKE_CLOSURE_PTR(a->payload[i]));
428 }
429 return mut_arr_ptrs_sizeW(a);
430 }
431
432 case TSO:
433 checkTSO((StgTSO *)p);
434 return tso_sizeW((StgTSO *)p);
435
436 case TREC_CHUNK:
437 {
438 nat i;
439 StgTRecChunk *tc = (StgTRecChunk *)p;
440 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->prev_chunk));
441 for (i = 0; i < tc -> next_entry_idx; i ++) {
442 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].tvar));
443 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].expected_value));
444 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].new_value));
445 }
446 return sizeofW(StgTRecChunk);
447 }
448
449 default:
450 barf("checkClosure (closure type %d)", info->type);
451 }
452 }
453
454
455 /* -----------------------------------------------------------------------------
456 Check Heap Sanity
457
458 After garbage collection, the live heap is in a state where we can
459 run through and check that all the pointers point to the right
460 place. This function starts at a given position and sanity-checks
461 all the objects in the remainder of the chain.
462 -------------------------------------------------------------------------- */
463
464 void
465 checkHeap(bdescr *bd)
466 {
467 StgPtr p;
468
469 #if defined(THREADED_RTS)
470 // heap sanity checking doesn't work with SMP, because we can't
471 // zero the slop (see Updates.h).
472 return;
473 #endif
474
475 for (; bd != NULL; bd = bd->link) {
476 p = bd->start;
477 while (p < bd->free) {
478 nat size = checkClosure((StgClosure *)p);
479 /* This is the smallest size of closure that can live in the heap */
480 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
481 p += size;
482
483 /* skip over slop */
484 while (p < bd->free &&
485 (*p < 0x1000 || !LOOKS_LIKE_INFO_PTR(*p))) { p++; }
486 }
487 }
488 }
489
490 void
491 checkHeapChunk(StgPtr start, StgPtr end)
492 {
493 StgPtr p;
494 nat size;
495
496 for (p=start; p<end; p+=size) {
497 ASSERT(LOOKS_LIKE_INFO_PTR(*p));
498 size = checkClosure((StgClosure *)p);
499 /* This is the smallest size of closure that can live in the heap. */
500 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
501 }
502 }
503
504 void
505 checkLargeObjects(bdescr *bd)
506 {
507 while (bd != NULL) {
508 if (!(bd->flags & BF_PINNED)) {
509 checkClosure((StgClosure *)bd->start);
510 }
511 bd = bd->link;
512 }
513 }
514
515 void
516 checkTSO(StgTSO *tso)
517 {
518 StgPtr sp = tso->sp;
519 StgPtr stack = tso->stack;
520 StgOffset stack_size = tso->stack_size;
521 StgPtr stack_end = stack + stack_size;
522
523 if (tso->what_next == ThreadRelocated) {
524 checkTSO(tso->_link);
525 return;
526 }
527
528 if (tso->what_next == ThreadKilled) {
529 /* The garbage collector doesn't bother following any pointers
530 * from dead threads, so don't check sanity here.
531 */
532 return;
533 }
534
535 ASSERT(tso->_link == END_TSO_QUEUE ||
536 tso->_link->header.info == &stg_MVAR_TSO_QUEUE_info ||
537 tso->_link->header.info == &stg_TSO_info);
538 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->block_info.closure));
539 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->bq));
540 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->blocked_exceptions));
541
542 ASSERT(stack <= sp && sp < stack_end);
543
544 checkStackChunk(sp, stack_end);
545 }
546
547 /*
548 Check that all TSOs have been evacuated.
549 Optionally also check the sanity of the TSOs.
550 */
551 void
552 checkGlobalTSOList (rtsBool checkTSOs)
553 {
554 StgTSO *tso;
555 nat g;
556
557 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
558 for (tso=generations[g].threads; tso != END_TSO_QUEUE;
559 tso = tso->global_link) {
560 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso));
561 ASSERT(get_itbl(tso)->type == TSO);
562 if (checkTSOs)
563 checkTSO(tso);
564
565 tso = deRefTSO(tso);
566
567 // If this TSO is dirty and in an old generation, it better
568 // be on the mutable list.
569 if (tso->dirty || (tso->flags & TSO_LINK_DIRTY)) {
570 ASSERT(Bdescr((P_)tso)->gen_no == 0 || (tso->flags & TSO_MARKED));
571 tso->flags &= ~TSO_MARKED;
572 }
573 }
574 }
575 }
576
577 /* -----------------------------------------------------------------------------
578 Check mutable list sanity.
579 -------------------------------------------------------------------------- */
580
581 void
582 checkMutableList( bdescr *mut_bd, nat gen )
583 {
584 bdescr *bd;
585 StgPtr q;
586 StgClosure *p;
587
588 for (bd = mut_bd; bd != NULL; bd = bd->link) {
589 for (q = bd->start; q < bd->free; q++) {
590 p = (StgClosure *)*q;
591 ASSERT(!HEAP_ALLOCED(p) || Bdescr((P_)p)->gen_no == gen);
592 if (get_itbl(p)->type == TSO) {
593 ((StgTSO *)p)->flags |= TSO_MARKED;
594 }
595 }
596 }
597 }
598
599 void
600 checkMutableLists (rtsBool checkTSOs)
601 {
602 nat g, i;
603
604 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
605 checkMutableList(generations[g].mut_list, g);
606 for (i = 0; i < n_capabilities; i++) {
607 checkMutableList(capabilities[i].mut_lists[g], g);
608 }
609 }
610 checkGlobalTSOList(checkTSOs);
611 }
612
613 /*
614 Check the static objects list.
615 */
616 void
617 checkStaticObjects ( StgClosure* static_objects )
618 {
619 StgClosure *p = static_objects;
620 StgInfoTable *info;
621
622 while (p != END_OF_STATIC_LIST) {
623 checkClosure(p);
624 info = get_itbl(p);
625 switch (info->type) {
626 case IND_STATIC:
627 {
628 StgClosure *indirectee = UNTAG_CLOSURE(((StgIndStatic *)p)->indirectee);
629
630 ASSERT(LOOKS_LIKE_CLOSURE_PTR(indirectee));
631 ASSERT(LOOKS_LIKE_INFO_PTR((StgWord)indirectee->header.info));
632 p = *IND_STATIC_LINK((StgClosure *)p);
633 break;
634 }
635
636 case THUNK_STATIC:
637 p = *THUNK_STATIC_LINK((StgClosure *)p);
638 break;
639
640 case FUN_STATIC:
641 p = *FUN_STATIC_LINK((StgClosure *)p);
642 break;
643
644 case CONSTR_STATIC:
645 p = *STATIC_LINK(info,(StgClosure *)p);
646 break;
647
648 default:
649 barf("checkStaticObjetcs: strange closure %p (%s)",
650 p, info_type(p));
651 }
652 }
653 }
654
655 /* Nursery sanity check */
656 void
657 checkNurserySanity (nursery *nursery)
658 {
659 bdescr *bd, *prev;
660 nat blocks = 0;
661
662 prev = NULL;
663 for (bd = nursery->blocks; bd != NULL; bd = bd->link) {
664 ASSERT(bd->u.back == prev);
665 prev = bd;
666 blocks += bd->blocks;
667 }
668
669 ASSERT(blocks == nursery->n_blocks);
670 }
671
672
673 /* Full heap sanity check. */
674 void
675 checkSanity( rtsBool check_heap )
676 {
677 nat g, n;
678
679 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
680 ASSERT(countBlocks(generations[g].blocks)
681 == generations[g].n_blocks);
682 ASSERT(countBlocks(generations[g].large_objects)
683 == generations[g].n_large_blocks);
684 if (check_heap) {
685 checkHeap(generations[g].blocks);
686 }
687 checkLargeObjects(generations[g].large_objects);
688 }
689
690 for (n = 0; n < n_capabilities; n++) {
691 checkNurserySanity(&nurseries[n]);
692 }
693
694 checkFreeListSanity();
695
696 #if defined(THREADED_RTS)
697 // always check the stacks in threaded mode, because checkHeap()
698 // does nothing in this case.
699 checkMutableLists(rtsTrue);
700 #else
701 if (check_heap) {
702 checkMutableLists(rtsFalse);
703 } else {
704 checkMutableLists(rtsTrue);
705 }
706 #endif
707 }
708
709 // If memInventory() calculates that we have a memory leak, this
710 // function will try to find the block(s) that are leaking by marking
711 // all the ones that we know about, and search through memory to find
712 // blocks that are not marked. In the debugger this can help to give
713 // us a clue about what kind of block leaked. In the future we might
714 // annotate blocks with their allocation site to give more helpful
715 // info.
716 static void
717 findMemoryLeak (void)
718 {
719 nat g, i;
720 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
721 for (i = 0; i < n_capabilities; i++) {
722 markBlocks(capabilities[i].mut_lists[g]);
723 }
724 markBlocks(generations[g].mut_list);
725 markBlocks(generations[g].blocks);
726 markBlocks(generations[g].large_objects);
727 }
728
729 for (i = 0; i < n_capabilities; i++) {
730 markBlocks(nurseries[i].blocks);
731 }
732
733 #ifdef PROFILING
734 // TODO:
735 // if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER) {
736 // markRetainerBlocks();
737 // }
738 #endif
739
740 // count the blocks allocated by the arena allocator
741 // TODO:
742 // markArenaBlocks();
743
744 // count the blocks containing executable memory
745 markBlocks(exec_block);
746
747 reportUnmarkedBlocks();
748 }
749
750 void
751 checkRunQueue(Capability *cap)
752 {
753 StgTSO *prev, *tso;
754 prev = END_TSO_QUEUE;
755 for (tso = cap->run_queue_hd; tso != END_TSO_QUEUE;
756 prev = tso, tso = tso->_link) {
757 ASSERT(prev == END_TSO_QUEUE || prev->_link == tso);
758 ASSERT(tso->block_info.prev == prev);
759 }
760 ASSERT(cap->run_queue_tl == prev);
761 }
762
763 /* -----------------------------------------------------------------------------
764 Memory leak detection
765
766 memInventory() checks for memory leaks by counting up all the
767 blocks we know about and comparing that to the number of blocks
768 allegedly floating around in the system.
769 -------------------------------------------------------------------------- */
770
771 // Useful for finding partially full blocks in gdb
772 void findSlop(bdescr *bd);
773 void findSlop(bdescr *bd)
774 {
775 lnat slop;
776
777 for (; bd != NULL; bd = bd->link) {
778 slop = (bd->blocks * BLOCK_SIZE_W) - (bd->free - bd->start);
779 if (slop > (1024/sizeof(W_))) {
780 debugBelch("block at %p (bdescr %p) has %ldKB slop\n",
781 bd->start, bd, slop / (1024/sizeof(W_)));
782 }
783 }
784 }
785
786 static lnat
787 genBlocks (generation *gen)
788 {
789 ASSERT(countBlocks(gen->blocks) == gen->n_blocks);
790 ASSERT(countBlocks(gen->large_objects) == gen->n_large_blocks);
791 return gen->n_blocks + gen->n_old_blocks +
792 countAllocdBlocks(gen->large_objects);
793 }
794
795 void
796 memInventory (rtsBool show)
797 {
798 nat g, i;
799 lnat gen_blocks[RtsFlags.GcFlags.generations];
800 lnat nursery_blocks, retainer_blocks,
801 arena_blocks, exec_blocks;
802 lnat live_blocks = 0, free_blocks = 0;
803 rtsBool leak;
804
805 // count the blocks we current have
806
807 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
808 gen_blocks[g] = 0;
809 for (i = 0; i < n_capabilities; i++) {
810 gen_blocks[g] += countBlocks(capabilities[i].mut_lists[g]);
811 }
812 gen_blocks[g] += countAllocdBlocks(generations[g].mut_list);
813 gen_blocks[g] += genBlocks(&generations[g]);
814 }
815
816 nursery_blocks = 0;
817 for (i = 0; i < n_capabilities; i++) {
818 ASSERT(countBlocks(nurseries[i].blocks) == nurseries[i].n_blocks);
819 nursery_blocks += nurseries[i].n_blocks;
820 }
821
822 retainer_blocks = 0;
823 #ifdef PROFILING
824 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER) {
825 retainer_blocks = retainerStackBlocks();
826 }
827 #endif
828
829 // count the blocks allocated by the arena allocator
830 arena_blocks = arenaBlocks();
831
832 // count the blocks containing executable memory
833 exec_blocks = countAllocdBlocks(exec_block);
834
835 /* count the blocks on the free list */
836 free_blocks = countFreeList();
837
838 live_blocks = 0;
839 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
840 live_blocks += gen_blocks[g];
841 }
842 live_blocks += nursery_blocks +
843 + retainer_blocks + arena_blocks + exec_blocks;
844
845 #define MB(n) (((n) * BLOCK_SIZE_W) / ((1024*1024)/sizeof(W_)))
846
847 leak = live_blocks + free_blocks != mblocks_allocated * BLOCKS_PER_MBLOCK;
848
849 if (show || leak)
850 {
851 if (leak) {
852 debugBelch("Memory leak detected:\n");
853 } else {
854 debugBelch("Memory inventory:\n");
855 }
856 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
857 debugBelch(" gen %d blocks : %5lu blocks (%lu MB)\n", g,
858 gen_blocks[g], MB(gen_blocks[g]));
859 }
860 debugBelch(" nursery : %5lu blocks (%lu MB)\n",
861 nursery_blocks, MB(nursery_blocks));
862 debugBelch(" retainer : %5lu blocks (%lu MB)\n",
863 retainer_blocks, MB(retainer_blocks));
864 debugBelch(" arena blocks : %5lu blocks (%lu MB)\n",
865 arena_blocks, MB(arena_blocks));
866 debugBelch(" exec : %5lu blocks (%lu MB)\n",
867 exec_blocks, MB(exec_blocks));
868 debugBelch(" free : %5lu blocks (%lu MB)\n",
869 free_blocks, MB(free_blocks));
870 debugBelch(" total : %5lu blocks (%lu MB)\n",
871 live_blocks + free_blocks, MB(live_blocks+free_blocks));
872 if (leak) {
873 debugBelch("\n in system : %5lu blocks (%lu MB)\n",
874 mblocks_allocated * BLOCKS_PER_MBLOCK, mblocks_allocated);
875 }
876 }
877
878 if (leak) {
879 debugBelch("\n");
880 findMemoryLeak();
881 }
882 ASSERT(n_alloc_blocks == live_blocks);
883 ASSERT(!leak);
884 }
885
886
887 #endif /* DEBUG */