Remove the IND_OLDGEN and IND_OLDGEN_PERM closure types
[ghc.git] / rts / sm / Sanity.c
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 1998-2006
4 *
5 * Sanity checking code for the heap and stack.
6 *
7 * Used when debugging: check that everything reasonable.
8 *
9 * - All things that are supposed to be pointers look like pointers.
10 *
11 * - Objects in text space are marked as static closures, those
12 * in the heap are dynamic.
13 *
14 * ---------------------------------------------------------------------------*/
15
16 #include "PosixSource.h"
17 #include "Rts.h"
18
19 #ifdef DEBUG /* whole file */
20
21 #include "RtsUtils.h"
22 #include "sm/Storage.h"
23 #include "sm/BlockAlloc.h"
24 #include "Sanity.h"
25 #include "Schedule.h"
26 #include "Apply.h"
27 #include "Printer.h"
28 #include "Arena.h"
29
30 /* -----------------------------------------------------------------------------
31 Forward decls.
32 -------------------------------------------------------------------------- */
33
34 static void checkSmallBitmap ( StgPtr payload, StgWord bitmap, nat );
35 static void checkLargeBitmap ( StgPtr payload, StgLargeBitmap*, nat );
36 static void checkClosureShallow ( StgClosure * );
37
38 /* -----------------------------------------------------------------------------
39 Check stack sanity
40 -------------------------------------------------------------------------- */
41
42 static void
43 checkSmallBitmap( StgPtr payload, StgWord bitmap, nat size )
44 {
45 StgPtr p;
46 nat i;
47
48 p = payload;
49 for(i = 0; i < size; i++, bitmap >>= 1 ) {
50 if ((bitmap & 1) == 0) {
51 checkClosureShallow((StgClosure *)payload[i]);
52 }
53 }
54 }
55
56 static void
57 checkLargeBitmap( StgPtr payload, StgLargeBitmap* large_bitmap, nat size )
58 {
59 StgWord bmp;
60 nat i, j;
61
62 i = 0;
63 for (bmp=0; i < size; bmp++) {
64 StgWord bitmap = large_bitmap->bitmap[bmp];
65 j = 0;
66 for(; i < size && j < BITS_IN(W_); j++, i++, bitmap >>= 1 ) {
67 if ((bitmap & 1) == 0) {
68 checkClosureShallow((StgClosure *)payload[i]);
69 }
70 }
71 }
72 }
73
74 /*
75 * check that it looks like a valid closure - without checking its payload
76 * used to avoid recursion between checking PAPs and checking stack
77 * chunks.
78 */
79
80 static void
81 checkClosureShallow( StgClosure* p )
82 {
83 StgClosure *q;
84
85 q = UNTAG_CLOSURE(p);
86 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q));
87
88 /* Is it a static closure? */
89 if (!HEAP_ALLOCED(q)) {
90 ASSERT(closure_STATIC(q));
91 } else {
92 ASSERT(!closure_STATIC(q));
93 }
94 }
95
96 // check an individual stack object
97 StgOffset
98 checkStackFrame( StgPtr c )
99 {
100 nat size;
101 const StgRetInfoTable* info;
102
103 info = get_ret_itbl((StgClosure *)c);
104
105 /* All activation records have 'bitmap' style layout info. */
106 switch (info->i.type) {
107 case RET_DYN: /* Dynamic bitmap: the mask is stored on the stack */
108 {
109 StgWord dyn;
110 StgPtr p;
111 StgRetDyn* r;
112
113 r = (StgRetDyn *)c;
114 dyn = r->liveness;
115
116 p = (P_)(r->payload);
117 checkSmallBitmap(p,RET_DYN_LIVENESS(r->liveness),RET_DYN_BITMAP_SIZE);
118 p += RET_DYN_BITMAP_SIZE + RET_DYN_NONPTR_REGS_SIZE;
119
120 // skip over the non-pointers
121 p += RET_DYN_NONPTRS(dyn);
122
123 // follow the ptr words
124 for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
125 checkClosureShallow((StgClosure *)*p);
126 p++;
127 }
128
129 return sizeofW(StgRetDyn) + RET_DYN_BITMAP_SIZE +
130 RET_DYN_NONPTR_REGS_SIZE +
131 RET_DYN_NONPTRS(dyn) + RET_DYN_PTRS(dyn);
132 }
133
134 case UPDATE_FRAME:
135 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgUpdateFrame*)c)->updatee));
136 case ATOMICALLY_FRAME:
137 case CATCH_RETRY_FRAME:
138 case CATCH_STM_FRAME:
139 case CATCH_FRAME:
140 // small bitmap cases (<= 32 entries)
141 case STOP_FRAME:
142 case RET_SMALL:
143 size = BITMAP_SIZE(info->i.layout.bitmap);
144 checkSmallBitmap((StgPtr)c + 1,
145 BITMAP_BITS(info->i.layout.bitmap), size);
146 return 1 + size;
147
148 case RET_BCO: {
149 StgBCO *bco;
150 nat size;
151 bco = (StgBCO *)*(c+1);
152 size = BCO_BITMAP_SIZE(bco);
153 checkLargeBitmap((StgPtr)c + 2, BCO_BITMAP(bco), size);
154 return 2 + size;
155 }
156
157 case RET_BIG: // large bitmap (> 32 entries)
158 size = GET_LARGE_BITMAP(&info->i)->size;
159 checkLargeBitmap((StgPtr)c + 1, GET_LARGE_BITMAP(&info->i), size);
160 return 1 + size;
161
162 case RET_FUN:
163 {
164 StgFunInfoTable *fun_info;
165 StgRetFun *ret_fun;
166
167 ret_fun = (StgRetFun *)c;
168 fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun));
169 size = ret_fun->size;
170 switch (fun_info->f.fun_type) {
171 case ARG_GEN:
172 checkSmallBitmap((StgPtr)ret_fun->payload,
173 BITMAP_BITS(fun_info->f.b.bitmap), size);
174 break;
175 case ARG_GEN_BIG:
176 checkLargeBitmap((StgPtr)ret_fun->payload,
177 GET_FUN_LARGE_BITMAP(fun_info), size);
178 break;
179 default:
180 checkSmallBitmap((StgPtr)ret_fun->payload,
181 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
182 size);
183 break;
184 }
185 return sizeofW(StgRetFun) + size;
186 }
187
188 default:
189 barf("checkStackFrame: weird activation record found on stack (%p %d).",c,info->i.type);
190 }
191 }
192
193 // check sections of stack between update frames
194 void
195 checkStackChunk( StgPtr sp, StgPtr stack_end )
196 {
197 StgPtr p;
198
199 p = sp;
200 while (p < stack_end) {
201 p += checkStackFrame( p );
202 }
203 // ASSERT( p == stack_end ); -- HWL
204 }
205
206 static void
207 checkPAP (StgClosure *tagged_fun, StgClosure** payload, StgWord n_args)
208 {
209 StgClosure *fun;
210 StgClosure *p;
211 StgFunInfoTable *fun_info;
212
213 fun = UNTAG_CLOSURE(tagged_fun);
214 ASSERT(LOOKS_LIKE_CLOSURE_PTR(fun));
215 fun_info = get_fun_itbl(fun);
216
217 p = (StgClosure *)payload;
218 switch (fun_info->f.fun_type) {
219 case ARG_GEN:
220 checkSmallBitmap( (StgPtr)payload,
221 BITMAP_BITS(fun_info->f.b.bitmap), n_args );
222 break;
223 case ARG_GEN_BIG:
224 checkLargeBitmap( (StgPtr)payload,
225 GET_FUN_LARGE_BITMAP(fun_info),
226 n_args );
227 break;
228 case ARG_BCO:
229 checkLargeBitmap( (StgPtr)payload,
230 BCO_BITMAP(fun),
231 n_args );
232 break;
233 default:
234 checkSmallBitmap( (StgPtr)payload,
235 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
236 n_args );
237 break;
238 }
239
240 ASSERT(fun_info->f.arity > TAG_MASK ? GET_CLOSURE_TAG(tagged_fun) == 0
241 : GET_CLOSURE_TAG(tagged_fun) == fun_info->f.arity);
242 }
243
244
245 StgOffset
246 checkClosure( StgClosure* p )
247 {
248 const StgInfoTable *info;
249
250 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
251
252 p = UNTAG_CLOSURE(p);
253 /* Is it a static closure (i.e. in the data segment)? */
254 if (!HEAP_ALLOCED(p)) {
255 ASSERT(closure_STATIC(p));
256 } else {
257 ASSERT(!closure_STATIC(p));
258 }
259
260 info = p->header.info;
261
262 if (IS_FORWARDING_PTR(info)) {
263 barf("checkClosure: found EVACUATED closure %d", info->type);
264 }
265 info = INFO_PTR_TO_STRUCT(info);
266
267 switch (info->type) {
268
269 case MVAR_CLEAN:
270 case MVAR_DIRTY:
271 {
272 StgMVar *mvar = (StgMVar *)p;
273 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->head));
274 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->tail));
275 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->value));
276 return sizeofW(StgMVar);
277 }
278
279 case THUNK:
280 case THUNK_1_0:
281 case THUNK_0_1:
282 case THUNK_1_1:
283 case THUNK_0_2:
284 case THUNK_2_0:
285 {
286 nat i;
287 for (i = 0; i < info->layout.payload.ptrs; i++) {
288 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgThunk *)p)->payload[i]));
289 }
290 return thunk_sizeW_fromITBL(info);
291 }
292
293 case FUN:
294 case FUN_1_0:
295 case FUN_0_1:
296 case FUN_1_1:
297 case FUN_0_2:
298 case FUN_2_0:
299 case CONSTR:
300 case CONSTR_1_0:
301 case CONSTR_0_1:
302 case CONSTR_1_1:
303 case CONSTR_0_2:
304 case CONSTR_2_0:
305 case IND_PERM:
306 case BLACKHOLE:
307 case PRIM:
308 case MUT_PRIM:
309 case MUT_VAR_CLEAN:
310 case MUT_VAR_DIRTY:
311 case CONSTR_STATIC:
312 case CONSTR_NOCAF_STATIC:
313 case THUNK_STATIC:
314 case FUN_STATIC:
315 {
316 nat i;
317 for (i = 0; i < info->layout.payload.ptrs; i++) {
318 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p->payload[i]));
319 }
320 return sizeW_fromITBL(info);
321 }
322
323 case BLOCKING_QUEUE:
324 {
325 StgBlockingQueue *bq = (StgBlockingQueue *)p;
326
327 // NO: the BH might have been updated now
328 // ASSERT(get_itbl(bq->bh)->type == BLACKHOLE);
329 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bq->bh));
330
331 ASSERT(get_itbl(bq->owner)->type == TSO);
332 ASSERT(bq->queue == (MessageBlackHole*)END_TSO_QUEUE
333 || get_itbl(bq->queue)->type == TSO);
334 ASSERT(bq->link == (StgBlockingQueue*)END_TSO_QUEUE ||
335 get_itbl(bq->link)->type == IND ||
336 get_itbl(bq->link)->type == BLOCKING_QUEUE);
337
338 return sizeofW(StgBlockingQueue);
339 }
340
341 case BCO: {
342 StgBCO *bco = (StgBCO *)p;
343 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->instrs));
344 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->literals));
345 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->ptrs));
346 return bco_sizeW(bco);
347 }
348
349 case IND_STATIC: /* (1, 0) closure */
350 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgIndStatic*)p)->indirectee));
351 return sizeW_fromITBL(info);
352
353 case WEAK:
354 /* deal with these specially - the info table isn't
355 * representative of the actual layout.
356 */
357 { StgWeak *w = (StgWeak *)p;
358 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->key));
359 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->value));
360 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->finalizer));
361 if (w->link) {
362 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->link));
363 }
364 return sizeW_fromITBL(info);
365 }
366
367 case THUNK_SELECTOR:
368 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgSelector *)p)->selectee));
369 return THUNK_SELECTOR_sizeW();
370
371 case IND:
372 {
373 /* we don't expect to see any of these after GC
374 * but they might appear during execution
375 */
376 StgInd *ind = (StgInd *)p;
377 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ind->indirectee));
378 return sizeofW(StgInd);
379 }
380
381 case RET_BCO:
382 case RET_SMALL:
383 case RET_BIG:
384 case RET_DYN:
385 case UPDATE_FRAME:
386 case STOP_FRAME:
387 case CATCH_FRAME:
388 case ATOMICALLY_FRAME:
389 case CATCH_RETRY_FRAME:
390 case CATCH_STM_FRAME:
391 barf("checkClosure: stack frame");
392
393 case AP:
394 {
395 StgAP* ap = (StgAP *)p;
396 checkPAP (ap->fun, ap->payload, ap->n_args);
397 return ap_sizeW(ap);
398 }
399
400 case PAP:
401 {
402 StgPAP* pap = (StgPAP *)p;
403 checkPAP (pap->fun, pap->payload, pap->n_args);
404 return pap_sizeW(pap);
405 }
406
407 case AP_STACK:
408 {
409 StgAP_STACK *ap = (StgAP_STACK *)p;
410 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ap->fun));
411 checkStackChunk((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
412 return ap_stack_sizeW(ap);
413 }
414
415 case ARR_WORDS:
416 return arr_words_sizeW((StgArrWords *)p);
417
418 case MUT_ARR_PTRS_CLEAN:
419 case MUT_ARR_PTRS_DIRTY:
420 case MUT_ARR_PTRS_FROZEN:
421 case MUT_ARR_PTRS_FROZEN0:
422 {
423 StgMutArrPtrs* a = (StgMutArrPtrs *)p;
424 nat i;
425 for (i = 0; i < a->ptrs; i++) {
426 ASSERT(LOOKS_LIKE_CLOSURE_PTR(a->payload[i]));
427 }
428 return mut_arr_ptrs_sizeW(a);
429 }
430
431 case TSO:
432 checkTSO((StgTSO *)p);
433 return tso_sizeW((StgTSO *)p);
434
435 case TREC_CHUNK:
436 {
437 nat i;
438 StgTRecChunk *tc = (StgTRecChunk *)p;
439 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->prev_chunk));
440 for (i = 0; i < tc -> next_entry_idx; i ++) {
441 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].tvar));
442 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].expected_value));
443 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].new_value));
444 }
445 return sizeofW(StgTRecChunk);
446 }
447
448 default:
449 barf("checkClosure (closure type %d)", info->type);
450 }
451 }
452
453
454 /* -----------------------------------------------------------------------------
455 Check Heap Sanity
456
457 After garbage collection, the live heap is in a state where we can
458 run through and check that all the pointers point to the right
459 place. This function starts at a given position and sanity-checks
460 all the objects in the remainder of the chain.
461 -------------------------------------------------------------------------- */
462
463 void
464 checkHeap(bdescr *bd)
465 {
466 StgPtr p;
467
468 #if defined(THREADED_RTS)
469 // heap sanity checking doesn't work with SMP, because we can't
470 // zero the slop (see Updates.h).
471 return;
472 #endif
473
474 for (; bd != NULL; bd = bd->link) {
475 p = bd->start;
476 while (p < bd->free) {
477 nat size = checkClosure((StgClosure *)p);
478 /* This is the smallest size of closure that can live in the heap */
479 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
480 p += size;
481
482 /* skip over slop */
483 while (p < bd->free &&
484 (*p < 0x1000 || !LOOKS_LIKE_INFO_PTR(*p))) { p++; }
485 }
486 }
487 }
488
489 void
490 checkHeapChunk(StgPtr start, StgPtr end)
491 {
492 StgPtr p;
493 nat size;
494
495 for (p=start; p<end; p+=size) {
496 ASSERT(LOOKS_LIKE_INFO_PTR(*p));
497 size = checkClosure((StgClosure *)p);
498 /* This is the smallest size of closure that can live in the heap. */
499 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
500 }
501 }
502
503 void
504 checkLargeObjects(bdescr *bd)
505 {
506 while (bd != NULL) {
507 if (!(bd->flags & BF_PINNED)) {
508 checkClosure((StgClosure *)bd->start);
509 }
510 bd = bd->link;
511 }
512 }
513
514 void
515 checkTSO(StgTSO *tso)
516 {
517 StgPtr sp = tso->sp;
518 StgPtr stack = tso->stack;
519 StgOffset stack_size = tso->stack_size;
520 StgPtr stack_end = stack + stack_size;
521
522 if (tso->what_next == ThreadRelocated) {
523 checkTSO(tso->_link);
524 return;
525 }
526
527 if (tso->what_next == ThreadKilled) {
528 /* The garbage collector doesn't bother following any pointers
529 * from dead threads, so don't check sanity here.
530 */
531 return;
532 }
533
534 ASSERT(tso->_link == END_TSO_QUEUE || get_itbl(tso->_link)->type == TSO);
535 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->block_info.closure));
536 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->bq));
537 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->blocked_exceptions));
538
539 ASSERT(stack <= sp && sp < stack_end);
540
541 checkStackChunk(sp, stack_end);
542 }
543
544 /*
545 Check that all TSOs have been evacuated.
546 Optionally also check the sanity of the TSOs.
547 */
548 void
549 checkGlobalTSOList (rtsBool checkTSOs)
550 {
551 StgTSO *tso;
552 nat g;
553
554 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
555 for (tso=generations[g].threads; tso != END_TSO_QUEUE;
556 tso = tso->global_link) {
557 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso));
558 ASSERT(get_itbl(tso)->type == TSO);
559 if (checkTSOs)
560 checkTSO(tso);
561
562 tso = deRefTSO(tso);
563
564 // If this TSO is dirty and in an old generation, it better
565 // be on the mutable list.
566 if (tso->dirty || (tso->flags & TSO_LINK_DIRTY)) {
567 ASSERT(Bdescr((P_)tso)->gen_no == 0 || (tso->flags & TSO_MARKED));
568 tso->flags &= ~TSO_MARKED;
569 }
570 }
571 }
572 }
573
574 /* -----------------------------------------------------------------------------
575 Check mutable list sanity.
576 -------------------------------------------------------------------------- */
577
578 void
579 checkMutableList( bdescr *mut_bd, nat gen )
580 {
581 bdescr *bd;
582 StgPtr q;
583 StgClosure *p;
584
585 for (bd = mut_bd; bd != NULL; bd = bd->link) {
586 for (q = bd->start; q < bd->free; q++) {
587 p = (StgClosure *)*q;
588 ASSERT(!HEAP_ALLOCED(p) || Bdescr((P_)p)->gen_no == gen);
589 if (get_itbl(p)->type == TSO) {
590 ((StgTSO *)p)->flags |= TSO_MARKED;
591 }
592 }
593 }
594 }
595
596 void
597 checkMutableLists (rtsBool checkTSOs)
598 {
599 nat g, i;
600
601 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
602 checkMutableList(generations[g].mut_list, g);
603 for (i = 0; i < n_capabilities; i++) {
604 checkMutableList(capabilities[i].mut_lists[g], g);
605 }
606 }
607 checkGlobalTSOList(checkTSOs);
608 }
609
610 /*
611 Check the static objects list.
612 */
613 void
614 checkStaticObjects ( StgClosure* static_objects )
615 {
616 StgClosure *p = static_objects;
617 StgInfoTable *info;
618
619 while (p != END_OF_STATIC_LIST) {
620 checkClosure(p);
621 info = get_itbl(p);
622 switch (info->type) {
623 case IND_STATIC:
624 {
625 StgClosure *indirectee = UNTAG_CLOSURE(((StgIndStatic *)p)->indirectee);
626
627 ASSERT(LOOKS_LIKE_CLOSURE_PTR(indirectee));
628 ASSERT(LOOKS_LIKE_INFO_PTR((StgWord)indirectee->header.info));
629 p = *IND_STATIC_LINK((StgClosure *)p);
630 break;
631 }
632
633 case THUNK_STATIC:
634 p = *THUNK_STATIC_LINK((StgClosure *)p);
635 break;
636
637 case FUN_STATIC:
638 p = *FUN_STATIC_LINK((StgClosure *)p);
639 break;
640
641 case CONSTR_STATIC:
642 p = *STATIC_LINK(info,(StgClosure *)p);
643 break;
644
645 default:
646 barf("checkStaticObjetcs: strange closure %p (%s)",
647 p, info_type(p));
648 }
649 }
650 }
651
652 /* Nursery sanity check */
653 void
654 checkNurserySanity (nursery *nursery)
655 {
656 bdescr *bd, *prev;
657 nat blocks = 0;
658
659 prev = NULL;
660 for (bd = nursery->blocks; bd != NULL; bd = bd->link) {
661 ASSERT(bd->u.back == prev);
662 prev = bd;
663 blocks += bd->blocks;
664 }
665
666 ASSERT(blocks == nursery->n_blocks);
667 }
668
669
670 /* Full heap sanity check. */
671 void
672 checkSanity( rtsBool check_heap )
673 {
674 nat g, n;
675
676 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
677 ASSERT(countBlocks(generations[g].blocks)
678 == generations[g].n_blocks);
679 ASSERT(countBlocks(generations[g].large_objects)
680 == generations[g].n_large_blocks);
681 if (check_heap) {
682 checkHeap(generations[g].blocks);
683 }
684 checkLargeObjects(generations[g].large_objects);
685 }
686
687 for (n = 0; n < n_capabilities; n++) {
688 checkNurserySanity(&nurseries[n]);
689 }
690
691 checkFreeListSanity();
692
693 #if defined(THREADED_RTS)
694 // always check the stacks in threaded mode, because checkHeap()
695 // does nothing in this case.
696 checkMutableLists(rtsTrue);
697 #else
698 if (check_heap) {
699 checkMutableLists(rtsFalse);
700 } else {
701 checkMutableLists(rtsTrue);
702 }
703 #endif
704 }
705
706 // If memInventory() calculates that we have a memory leak, this
707 // function will try to find the block(s) that are leaking by marking
708 // all the ones that we know about, and search through memory to find
709 // blocks that are not marked. In the debugger this can help to give
710 // us a clue about what kind of block leaked. In the future we might
711 // annotate blocks with their allocation site to give more helpful
712 // info.
713 static void
714 findMemoryLeak (void)
715 {
716 nat g, i;
717 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
718 for (i = 0; i < n_capabilities; i++) {
719 markBlocks(capabilities[i].mut_lists[g]);
720 }
721 markBlocks(generations[g].mut_list);
722 markBlocks(generations[g].blocks);
723 markBlocks(generations[g].large_objects);
724 }
725
726 for (i = 0; i < n_capabilities; i++) {
727 markBlocks(nurseries[i].blocks);
728 }
729
730 #ifdef PROFILING
731 // TODO:
732 // if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER) {
733 // markRetainerBlocks();
734 // }
735 #endif
736
737 // count the blocks allocated by the arena allocator
738 // TODO:
739 // markArenaBlocks();
740
741 // count the blocks containing executable memory
742 markBlocks(exec_block);
743
744 reportUnmarkedBlocks();
745 }
746
747 void
748 checkRunQueue(Capability *cap)
749 {
750 StgTSO *prev, *tso;
751 prev = END_TSO_QUEUE;
752 for (tso = cap->run_queue_hd; tso != END_TSO_QUEUE;
753 prev = tso, tso = tso->_link) {
754 ASSERT(prev == END_TSO_QUEUE || prev->_link == tso);
755 ASSERT(tso->block_info.prev == prev);
756 }
757 ASSERT(cap->run_queue_tl == prev);
758 }
759
760 /* -----------------------------------------------------------------------------
761 Memory leak detection
762
763 memInventory() checks for memory leaks by counting up all the
764 blocks we know about and comparing that to the number of blocks
765 allegedly floating around in the system.
766 -------------------------------------------------------------------------- */
767
768 // Useful for finding partially full blocks in gdb
769 void findSlop(bdescr *bd);
770 void findSlop(bdescr *bd)
771 {
772 lnat slop;
773
774 for (; bd != NULL; bd = bd->link) {
775 slop = (bd->blocks * BLOCK_SIZE_W) - (bd->free - bd->start);
776 if (slop > (1024/sizeof(W_))) {
777 debugBelch("block at %p (bdescr %p) has %ldKB slop\n",
778 bd->start, bd, slop / (1024/sizeof(W_)));
779 }
780 }
781 }
782
783 static lnat
784 genBlocks (generation *gen)
785 {
786 ASSERT(countBlocks(gen->blocks) == gen->n_blocks);
787 ASSERT(countBlocks(gen->large_objects) == gen->n_large_blocks);
788 return gen->n_blocks + gen->n_old_blocks +
789 countAllocdBlocks(gen->large_objects);
790 }
791
792 void
793 memInventory (rtsBool show)
794 {
795 nat g, i;
796 lnat gen_blocks[RtsFlags.GcFlags.generations];
797 lnat nursery_blocks, retainer_blocks,
798 arena_blocks, exec_blocks;
799 lnat live_blocks = 0, free_blocks = 0;
800 rtsBool leak;
801
802 // count the blocks we current have
803
804 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
805 gen_blocks[g] = 0;
806 for (i = 0; i < n_capabilities; i++) {
807 gen_blocks[g] += countBlocks(capabilities[i].mut_lists[g]);
808 }
809 gen_blocks[g] += countAllocdBlocks(generations[g].mut_list);
810 gen_blocks[g] += genBlocks(&generations[g]);
811 }
812
813 nursery_blocks = 0;
814 for (i = 0; i < n_capabilities; i++) {
815 ASSERT(countBlocks(nurseries[i].blocks) == nurseries[i].n_blocks);
816 nursery_blocks += nurseries[i].n_blocks;
817 }
818
819 retainer_blocks = 0;
820 #ifdef PROFILING
821 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER) {
822 retainer_blocks = retainerStackBlocks();
823 }
824 #endif
825
826 // count the blocks allocated by the arena allocator
827 arena_blocks = arenaBlocks();
828
829 // count the blocks containing executable memory
830 exec_blocks = countAllocdBlocks(exec_block);
831
832 /* count the blocks on the free list */
833 free_blocks = countFreeList();
834
835 live_blocks = 0;
836 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
837 live_blocks += gen_blocks[g];
838 }
839 live_blocks += nursery_blocks +
840 + retainer_blocks + arena_blocks + exec_blocks;
841
842 #define MB(n) (((n) * BLOCK_SIZE_W) / ((1024*1024)/sizeof(W_)))
843
844 leak = live_blocks + free_blocks != mblocks_allocated * BLOCKS_PER_MBLOCK;
845
846 if (show || leak)
847 {
848 if (leak) {
849 debugBelch("Memory leak detected:\n");
850 } else {
851 debugBelch("Memory inventory:\n");
852 }
853 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
854 debugBelch(" gen %d blocks : %5lu blocks (%lu MB)\n", g,
855 gen_blocks[g], MB(gen_blocks[g]));
856 }
857 debugBelch(" nursery : %5lu blocks (%lu MB)\n",
858 nursery_blocks, MB(nursery_blocks));
859 debugBelch(" retainer : %5lu blocks (%lu MB)\n",
860 retainer_blocks, MB(retainer_blocks));
861 debugBelch(" arena blocks : %5lu blocks (%lu MB)\n",
862 arena_blocks, MB(arena_blocks));
863 debugBelch(" exec : %5lu blocks (%lu MB)\n",
864 exec_blocks, MB(exec_blocks));
865 debugBelch(" free : %5lu blocks (%lu MB)\n",
866 free_blocks, MB(free_blocks));
867 debugBelch(" total : %5lu blocks (%lu MB)\n",
868 live_blocks + free_blocks, MB(live_blocks+free_blocks));
869 if (leak) {
870 debugBelch("\n in system : %5lu blocks (%lu MB)\n",
871 mblocks_allocated * BLOCKS_PER_MBLOCK, mblocks_allocated);
872 }
873 }
874
875 if (leak) {
876 debugBelch("\n");
877 findMemoryLeak();
878 }
879 ASSERT(n_alloc_blocks == live_blocks);
880 ASSERT(!leak);
881 }
882
883
884 #endif /* DEBUG */