Kill Type pretty-printer
[ghc.git] / rts / sm / Sanity.c
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 1998-2006
4 *
5 * Sanity checking code for the heap and stack.
6 *
7 * Used when debugging: check that everything reasonable.
8 *
9 * - All things that are supposed to be pointers look like pointers.
10 *
11 * - Objects in text space are marked as static closures, those
12 * in the heap are dynamic.
13 *
14 * ---------------------------------------------------------------------------*/
15
16 #include "PosixSource.h"
17 #include "Rts.h"
18
19 #ifdef DEBUG /* whole file */
20
21 #include "RtsUtils.h"
22 #include "sm/Storage.h"
23 #include "sm/BlockAlloc.h"
24 #include "GCThread.h"
25 #include "Sanity.h"
26 #include "Schedule.h"
27 #include "Apply.h"
28 #include "Printer.h"
29 #include "Arena.h"
30 #include "RetainerProfile.h"
31 #include "CNF.h"
32
33 /* -----------------------------------------------------------------------------
34 Forward decls.
35 -------------------------------------------------------------------------- */
36
37 static void checkSmallBitmap ( StgPtr payload, StgWord bitmap, uint32_t );
38 static void checkLargeBitmap ( StgPtr payload, StgLargeBitmap*, uint32_t );
39 static void checkClosureShallow ( const StgClosure * );
40 static void checkSTACK (StgStack *stack);
41
42 /* -----------------------------------------------------------------------------
43 Check stack sanity
44 -------------------------------------------------------------------------- */
45
46 static void
47 checkSmallBitmap( StgPtr payload, StgWord bitmap, uint32_t size )
48 {
49 uint32_t i;
50
51 for(i = 0; i < size; i++, bitmap >>= 1 ) {
52 if ((bitmap & 1) == 0) {
53 checkClosureShallow((StgClosure *)payload[i]);
54 }
55 }
56 }
57
58 static void
59 checkLargeBitmap( StgPtr payload, StgLargeBitmap* large_bitmap, uint32_t size )
60 {
61 StgWord bmp;
62 uint32_t i, j;
63
64 i = 0;
65 for (bmp=0; i < size; bmp++) {
66 StgWord bitmap = large_bitmap->bitmap[bmp];
67 j = 0;
68 for(; i < size && j < BITS_IN(W_); j++, i++, bitmap >>= 1 ) {
69 if ((bitmap & 1) == 0) {
70 checkClosureShallow((StgClosure *)payload[i]);
71 }
72 }
73 }
74 }
75
76 /*
77 * check that it looks like a valid closure - without checking its payload
78 * used to avoid recursion between checking PAPs and checking stack
79 * chunks.
80 */
81
82 static void
83 checkClosureShallow( const StgClosure* p )
84 {
85 const StgClosure *q;
86
87 q = UNTAG_CONST_CLOSURE(p);
88 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q));
89
90 /* Is it a static closure? */
91 if (!HEAP_ALLOCED(q)) {
92 ASSERT(closure_STATIC(q));
93 } else {
94 ASSERT(!closure_STATIC(q));
95 }
96 }
97
98 // check an individual stack object
99 StgOffset
100 checkStackFrame( StgPtr c )
101 {
102 uint32_t size;
103 const StgRetInfoTable* info;
104
105 info = get_ret_itbl((StgClosure *)c);
106
107 /* All activation records have 'bitmap' style layout info. */
108 switch (info->i.type) {
109
110 case UPDATE_FRAME:
111 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgUpdateFrame*)c)->updatee));
112 case ATOMICALLY_FRAME:
113 case CATCH_RETRY_FRAME:
114 case CATCH_STM_FRAME:
115 case CATCH_FRAME:
116 // small bitmap cases (<= 32 entries)
117 case UNDERFLOW_FRAME:
118 case STOP_FRAME:
119 case RET_SMALL:
120 size = BITMAP_SIZE(info->i.layout.bitmap);
121 checkSmallBitmap((StgPtr)c + 1,
122 BITMAP_BITS(info->i.layout.bitmap), size);
123 return 1 + size;
124
125 case RET_BCO: {
126 StgBCO *bco;
127 uint32_t size;
128 bco = (StgBCO *)*(c+1);
129 size = BCO_BITMAP_SIZE(bco);
130 checkLargeBitmap((StgPtr)c + 2, BCO_BITMAP(bco), size);
131 return 2 + size;
132 }
133
134 case RET_BIG: // large bitmap (> 32 entries)
135 size = GET_LARGE_BITMAP(&info->i)->size;
136 checkLargeBitmap((StgPtr)c + 1, GET_LARGE_BITMAP(&info->i), size);
137 return 1 + size;
138
139 case RET_FUN:
140 {
141 const StgFunInfoTable *fun_info;
142 StgRetFun *ret_fun;
143
144 ret_fun = (StgRetFun *)c;
145 fun_info = get_fun_itbl(UNTAG_CONST_CLOSURE(ret_fun->fun));
146 size = ret_fun->size;
147 switch (fun_info->f.fun_type) {
148 case ARG_GEN:
149 checkSmallBitmap((StgPtr)ret_fun->payload,
150 BITMAP_BITS(fun_info->f.b.bitmap), size);
151 break;
152 case ARG_GEN_BIG:
153 checkLargeBitmap((StgPtr)ret_fun->payload,
154 GET_FUN_LARGE_BITMAP(fun_info), size);
155 break;
156 default:
157 checkSmallBitmap((StgPtr)ret_fun->payload,
158 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
159 size);
160 break;
161 }
162 return sizeofW(StgRetFun) + size;
163 }
164
165 default:
166 barf("checkStackFrame: weird activation record found on stack (%p %d).",c,info->i.type);
167 }
168 }
169
170 // check sections of stack between update frames
171 void
172 checkStackChunk( StgPtr sp, StgPtr stack_end )
173 {
174 StgPtr p;
175
176 p = sp;
177 while (p < stack_end) {
178 p += checkStackFrame( p );
179 }
180 // ASSERT( p == stack_end ); -- HWL
181 }
182
183 static void
184 checkPAP (StgClosure *tagged_fun, StgClosure** payload, StgWord n_args)
185 {
186 const StgClosure *fun;
187 const StgFunInfoTable *fun_info;
188
189 fun = UNTAG_CONST_CLOSURE(tagged_fun);
190 ASSERT(LOOKS_LIKE_CLOSURE_PTR(fun));
191 fun_info = get_fun_itbl(fun);
192
193 switch (fun_info->f.fun_type) {
194 case ARG_GEN:
195 checkSmallBitmap( (StgPtr)payload,
196 BITMAP_BITS(fun_info->f.b.bitmap), n_args );
197 break;
198 case ARG_GEN_BIG:
199 checkLargeBitmap( (StgPtr)payload,
200 GET_FUN_LARGE_BITMAP(fun_info),
201 n_args );
202 break;
203 case ARG_BCO:
204 checkLargeBitmap( (StgPtr)payload,
205 BCO_BITMAP(fun),
206 n_args );
207 break;
208 default:
209 checkSmallBitmap( (StgPtr)payload,
210 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
211 n_args );
212 break;
213 }
214
215 ASSERT(fun_info->f.arity > TAG_MASK ? GET_CLOSURE_TAG(tagged_fun) == 0
216 : GET_CLOSURE_TAG(tagged_fun) == fun_info->f.arity);
217 }
218
219
220 StgOffset
221 checkClosure( const StgClosure* p )
222 {
223 const StgInfoTable *info;
224
225 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
226
227 p = UNTAG_CONST_CLOSURE(p);
228 /* Is it a static closure (i.e. in the data segment)? */
229 if (!HEAP_ALLOCED(p)) {
230 ASSERT(closure_STATIC(p));
231 } else {
232 ASSERT(!closure_STATIC(p));
233 }
234
235 info = p->header.info;
236
237 if (IS_FORWARDING_PTR(info)) {
238 barf("checkClosure: found EVACUATED closure %d", info->type);
239 }
240 info = INFO_PTR_TO_STRUCT(info);
241
242 switch (info->type) {
243
244 case MVAR_CLEAN:
245 case MVAR_DIRTY:
246 {
247 StgMVar *mvar = (StgMVar *)p;
248 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->head));
249 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->tail));
250 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->value));
251 return sizeofW(StgMVar);
252 }
253
254 case THUNK:
255 case THUNK_1_0:
256 case THUNK_0_1:
257 case THUNK_1_1:
258 case THUNK_0_2:
259 case THUNK_2_0:
260 {
261 uint32_t i;
262 for (i = 0; i < info->layout.payload.ptrs; i++) {
263 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgThunk *)p)->payload[i]));
264 }
265 return thunk_sizeW_fromITBL(info);
266 }
267
268 case FUN:
269 case FUN_1_0:
270 case FUN_0_1:
271 case FUN_1_1:
272 case FUN_0_2:
273 case FUN_2_0:
274 case CONSTR:
275 case CONSTR_1_0:
276 case CONSTR_0_1:
277 case CONSTR_1_1:
278 case CONSTR_0_2:
279 case CONSTR_2_0:
280 case BLACKHOLE:
281 case PRIM:
282 case MUT_PRIM:
283 case MUT_VAR_CLEAN:
284 case MUT_VAR_DIRTY:
285 case TVAR:
286 case CONSTR_STATIC:
287 case CONSTR_NOCAF_STATIC:
288 case THUNK_STATIC:
289 case FUN_STATIC:
290 {
291 uint32_t i;
292 for (i = 0; i < info->layout.payload.ptrs; i++) {
293 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p->payload[i]));
294 }
295 return sizeW_fromITBL(info);
296 }
297
298 case BLOCKING_QUEUE:
299 {
300 StgBlockingQueue *bq = (StgBlockingQueue *)p;
301
302 // NO: the BH might have been updated now
303 // ASSERT(get_itbl(bq->bh)->type == BLACKHOLE);
304 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bq->bh));
305
306 ASSERT(get_itbl((StgClosure *)(bq->owner))->type == TSO);
307 ASSERT(bq->queue == (MessageBlackHole*)END_TSO_QUEUE
308 || bq->queue->header.info == &stg_MSG_BLACKHOLE_info);
309 ASSERT(bq->link == (StgBlockingQueue*)END_TSO_QUEUE ||
310 get_itbl((StgClosure *)(bq->link))->type == IND ||
311 get_itbl((StgClosure *)(bq->link))->type == BLOCKING_QUEUE);
312
313 return sizeofW(StgBlockingQueue);
314 }
315
316 case BCO: {
317 StgBCO *bco = (StgBCO *)p;
318 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->instrs));
319 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->literals));
320 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->ptrs));
321 return bco_sizeW(bco);
322 }
323
324 case IND_STATIC: /* (1, 0) closure */
325 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgIndStatic*)p)->indirectee));
326 return sizeW_fromITBL(info);
327
328 case WEAK:
329 /* deal with these specially - the info table isn't
330 * representative of the actual layout.
331 */
332 { StgWeak *w = (StgWeak *)p;
333 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->key));
334 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->value));
335 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->finalizer));
336 if (w->link) {
337 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->link));
338 }
339 return sizeW_fromITBL(info);
340 }
341
342 case THUNK_SELECTOR:
343 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgSelector *)p)->selectee));
344 return THUNK_SELECTOR_sizeW();
345
346 case IND:
347 {
348 /* we don't expect to see any of these after GC
349 * but they might appear during execution
350 */
351 StgInd *ind = (StgInd *)p;
352 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ind->indirectee));
353 return sizeofW(StgInd);
354 }
355
356 case RET_BCO:
357 case RET_SMALL:
358 case RET_BIG:
359 case UPDATE_FRAME:
360 case UNDERFLOW_FRAME:
361 case STOP_FRAME:
362 case CATCH_FRAME:
363 case ATOMICALLY_FRAME:
364 case CATCH_RETRY_FRAME:
365 case CATCH_STM_FRAME:
366 barf("checkClosure: stack frame");
367
368 case AP:
369 {
370 StgAP* ap = (StgAP *)p;
371 checkPAP (ap->fun, ap->payload, ap->n_args);
372 return ap_sizeW(ap);
373 }
374
375 case PAP:
376 {
377 StgPAP* pap = (StgPAP *)p;
378 checkPAP (pap->fun, pap->payload, pap->n_args);
379 return pap_sizeW(pap);
380 }
381
382 case AP_STACK:
383 {
384 StgAP_STACK *ap = (StgAP_STACK *)p;
385 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ap->fun));
386 checkStackChunk((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
387 return ap_stack_sizeW(ap);
388 }
389
390 case ARR_WORDS:
391 return arr_words_sizeW((StgArrBytes *)p);
392
393 case MUT_ARR_PTRS_CLEAN:
394 case MUT_ARR_PTRS_DIRTY:
395 case MUT_ARR_PTRS_FROZEN:
396 case MUT_ARR_PTRS_FROZEN0:
397 {
398 StgMutArrPtrs* a = (StgMutArrPtrs *)p;
399 uint32_t i;
400 for (i = 0; i < a->ptrs; i++) {
401 ASSERT(LOOKS_LIKE_CLOSURE_PTR(a->payload[i]));
402 }
403 return mut_arr_ptrs_sizeW(a);
404 }
405
406 case TSO:
407 checkTSO((StgTSO *)p);
408 return sizeofW(StgTSO);
409
410 case STACK:
411 checkSTACK((StgStack*)p);
412 return stack_sizeW((StgStack*)p);
413
414 case TREC_CHUNK:
415 {
416 uint32_t i;
417 StgTRecChunk *tc = (StgTRecChunk *)p;
418 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->prev_chunk));
419 for (i = 0; i < tc -> next_entry_idx; i ++) {
420 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].tvar));
421 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].expected_value));
422 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].new_value));
423 }
424 return sizeofW(StgTRecChunk);
425 }
426
427 default:
428 barf("checkClosure (closure type %d)", info->type);
429 }
430 }
431
432
433 /* -----------------------------------------------------------------------------
434 Check Heap Sanity
435
436 After garbage collection, the live heap is in a state where we can
437 run through and check that all the pointers point to the right
438 place. This function starts at a given position and sanity-checks
439 all the objects in the remainder of the chain.
440 -------------------------------------------------------------------------- */
441
442 void checkHeapChain (bdescr *bd)
443 {
444 StgPtr p;
445
446 for (; bd != NULL; bd = bd->link) {
447 if(!(bd->flags & BF_SWEPT)) {
448 p = bd->start;
449 while (p < bd->free) {
450 uint32_t size = checkClosure((StgClosure *)p);
451 /* This is the smallest size of closure that can live in the heap */
452 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
453 p += size;
454
455 /* skip over slop */
456 while (p < bd->free &&
457 (*p < 0x1000 || !LOOKS_LIKE_INFO_PTR(*p))) { p++; }
458 }
459 }
460 }
461 }
462
463 void
464 checkHeapChunk(StgPtr start, StgPtr end)
465 {
466 StgPtr p;
467 uint32_t size;
468
469 for (p=start; p<end; p+=size) {
470 ASSERT(LOOKS_LIKE_INFO_PTR(*p));
471 size = checkClosure((StgClosure *)p);
472 /* This is the smallest size of closure that can live in the heap. */
473 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
474 }
475 }
476
477 void
478 checkLargeObjects(bdescr *bd)
479 {
480 while (bd != NULL) {
481 if (!(bd->flags & BF_PINNED)) {
482 checkClosure((StgClosure *)bd->start);
483 }
484 bd = bd->link;
485 }
486 }
487
488 static void
489 checkCompactObjects(bdescr *bd)
490 {
491 // Compact objects are similar to large objects,
492 // but they have a StgCompactNFDataBlock at the beginning,
493 // before the actual closure
494
495 for ( ; bd != NULL; bd = bd->link) {
496 StgCompactNFDataBlock *block, *last;
497 StgCompactNFData *str;
498 StgWord totalW;
499
500 ASSERT (bd->flags & BF_COMPACT);
501
502 block = (StgCompactNFDataBlock*)bd->start;
503 str = block->owner;
504 ASSERT ((W_)str == (W_)block + sizeof(StgCompactNFDataBlock));
505
506 totalW = 0;
507 for ( ; block ; block = block->next) {
508 last = block;
509 ASSERT (block->owner == str);
510
511 totalW += Bdescr((P_)block)->blocks * BLOCK_SIZE_W;
512 }
513
514 ASSERT (str->totalW == totalW);
515 ASSERT (str->last == last);
516 }
517 }
518
519 static void
520 checkSTACK (StgStack *stack)
521 {
522 StgPtr sp = stack->sp;
523 StgOffset stack_size = stack->stack_size;
524 StgPtr stack_end = stack->stack + stack_size;
525
526 ASSERT(stack->stack <= sp && sp <= stack_end);
527
528 checkStackChunk(sp, stack_end);
529 }
530
531 void
532 checkTSO(StgTSO *tso)
533 {
534 StgTSO *next;
535 const StgInfoTable *info;
536
537 if (tso->what_next == ThreadKilled) {
538 /* The garbage collector doesn't bother following any pointers
539 * from dead threads, so don't check sanity here.
540 */
541 return;
542 }
543
544 next = tso->_link;
545 info = (const StgInfoTable*) tso->_link->header.info;
546
547 ASSERT(next == END_TSO_QUEUE ||
548 info == &stg_MVAR_TSO_QUEUE_info ||
549 info == &stg_TSO_info ||
550 info == &stg_WHITEHOLE_info); // happens due to STM doing lockTSO()
551
552 if ( tso->why_blocked == BlockedOnMVar
553 || tso->why_blocked == BlockedOnMVarRead
554 || tso->why_blocked == BlockedOnBlackHole
555 || tso->why_blocked == BlockedOnMsgThrowTo
556 || tso->why_blocked == NotBlocked
557 ) {
558 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->block_info.closure));
559 }
560
561 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->bq));
562 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->blocked_exceptions));
563 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->stackobj));
564
565 // XXX are we checking the stack twice?
566 checkSTACK(tso->stackobj);
567 }
568
569 /*
570 Check that all TSOs have been evacuated.
571 Optionally also check the sanity of the TSOs.
572 */
573 void
574 checkGlobalTSOList (rtsBool checkTSOs)
575 {
576 StgTSO *tso;
577 uint32_t g;
578
579 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
580 for (tso=generations[g].threads; tso != END_TSO_QUEUE;
581 tso = tso->global_link) {
582 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso));
583 ASSERT(get_itbl((StgClosure *)tso)->type == TSO);
584 if (checkTSOs)
585 checkTSO(tso);
586
587 // If this TSO is dirty and in an old generation, it better
588 // be on the mutable list.
589 if (tso->dirty) {
590 ASSERT(Bdescr((P_)tso)->gen_no == 0 || (tso->flags & TSO_MARKED));
591 tso->flags &= ~TSO_MARKED;
592 }
593
594 {
595 StgStack *stack;
596 StgUnderflowFrame *frame;
597
598 stack = tso->stackobj;
599 while (1) {
600 if (stack->dirty & 1) {
601 ASSERT(Bdescr((P_)stack)->gen_no == 0 || (stack->dirty & TSO_MARKED));
602 stack->dirty &= ~TSO_MARKED;
603 }
604 frame = (StgUnderflowFrame*) (stack->stack + stack->stack_size
605 - sizeofW(StgUnderflowFrame));
606 if (frame->info != &stg_stack_underflow_frame_info
607 || frame->next_chunk == (StgStack*)END_TSO_QUEUE) break;
608 stack = frame->next_chunk;
609 }
610 }
611 }
612 }
613 }
614
615 /* -----------------------------------------------------------------------------
616 Check mutable list sanity.
617 -------------------------------------------------------------------------- */
618
619 static void
620 checkMutableList( bdescr *mut_bd, uint32_t gen )
621 {
622 bdescr *bd;
623 StgPtr q;
624 StgClosure *p;
625
626 for (bd = mut_bd; bd != NULL; bd = bd->link) {
627 for (q = bd->start; q < bd->free; q++) {
628 p = (StgClosure *)*q;
629 ASSERT(!HEAP_ALLOCED(p) || Bdescr((P_)p)->gen_no == gen);
630 checkClosure(p);
631
632 switch (get_itbl(p)->type) {
633 case TSO:
634 ((StgTSO *)p)->flags |= TSO_MARKED;
635 break;
636 case STACK:
637 ((StgStack *)p)->dirty |= TSO_MARKED;
638 break;
639 }
640 }
641 }
642 }
643
644 static void
645 checkLocalMutableLists (uint32_t cap_no)
646 {
647 uint32_t g;
648 for (g = 1; g < RtsFlags.GcFlags.generations; g++) {
649 checkMutableList(capabilities[cap_no]->mut_lists[g], g);
650 }
651 }
652
653 static void
654 checkMutableLists (void)
655 {
656 uint32_t i;
657 for (i = 0; i < n_capabilities; i++) {
658 checkLocalMutableLists(i);
659 }
660 }
661
662 /*
663 Check the static objects list.
664 */
665 void
666 checkStaticObjects ( StgClosure* static_objects )
667 {
668 StgClosure *p = static_objects;
669 const StgInfoTable *info;
670
671 while (p != END_OF_STATIC_OBJECT_LIST) {
672 p = UNTAG_STATIC_LIST_PTR(p);
673 checkClosure(p);
674 info = get_itbl(p);
675 switch (info->type) {
676 case IND_STATIC:
677 {
678 const StgClosure *indirectee;
679
680 indirectee = UNTAG_CONST_CLOSURE(((StgIndStatic *)p)->indirectee);
681 ASSERT(LOOKS_LIKE_CLOSURE_PTR(indirectee));
682 ASSERT(LOOKS_LIKE_INFO_PTR((StgWord)indirectee->header.info));
683 p = *IND_STATIC_LINK((StgClosure *)p);
684 break;
685 }
686
687 case THUNK_STATIC:
688 p = *THUNK_STATIC_LINK((StgClosure *)p);
689 break;
690
691 case FUN_STATIC:
692 p = *FUN_STATIC_LINK((StgClosure *)p);
693 break;
694
695 case CONSTR_STATIC:
696 p = *STATIC_LINK(info,(StgClosure *)p);
697 break;
698
699 default:
700 barf("checkStaticObjetcs: strange closure %p (%s)",
701 p, info_type(p));
702 }
703 }
704 }
705
706 /* Nursery sanity check */
707 void
708 checkNurserySanity (nursery *nursery)
709 {
710 bdescr *bd, *prev;
711 uint32_t blocks = 0;
712
713 prev = NULL;
714 for (bd = nursery->blocks; bd != NULL; bd = bd->link) {
715 ASSERT(bd->gen == g0);
716 ASSERT(bd->u.back == prev);
717 prev = bd;
718 blocks += bd->blocks;
719 }
720
721 ASSERT(blocks == nursery->n_blocks);
722 }
723
724 static void checkGeneration (generation *gen,
725 rtsBool after_major_gc USED_IF_THREADS)
726 {
727 uint32_t n;
728 gen_workspace *ws;
729
730 ASSERT(countBlocks(gen->blocks) == gen->n_blocks);
731 ASSERT(countBlocks(gen->large_objects) == gen->n_large_blocks);
732
733 #if defined(THREADED_RTS)
734 // heap sanity checking doesn't work with SMP, because we can't
735 // zero the slop (see Updates.h). However, we can sanity-check
736 // the heap after a major gc, because there is no slop.
737 if (!after_major_gc) return;
738 #endif
739
740 checkHeapChain(gen->blocks);
741
742 for (n = 0; n < n_capabilities; n++) {
743 ws = &gc_threads[n]->gens[gen->no];
744 checkHeapChain(ws->todo_bd);
745 checkHeapChain(ws->part_list);
746 checkHeapChain(ws->scavd_list);
747 }
748
749 checkLargeObjects(gen->large_objects);
750 checkCompactObjects(gen->compact_objects);
751 }
752
753 /* Full heap sanity check. */
754 static void checkFullHeap (rtsBool after_major_gc)
755 {
756 uint32_t g, n;
757
758 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
759 checkGeneration(&generations[g], after_major_gc);
760 }
761 for (n = 0; n < n_capabilities; n++) {
762 checkNurserySanity(&nurseries[n]);
763 }
764 }
765
766 void checkSanity (rtsBool after_gc, rtsBool major_gc)
767 {
768 checkFullHeap(after_gc && major_gc);
769
770 checkFreeListSanity();
771
772 // always check the stacks in threaded mode, because checkHeap()
773 // does nothing in this case.
774 if (after_gc) {
775 checkMutableLists();
776 checkGlobalTSOList(rtsTrue);
777 }
778 }
779
780 static void
781 markCompactBlocks(bdescr *bd)
782 {
783 for (; bd != NULL; bd = bd->link) {
784 compactMarkKnown(((StgCompactNFDataBlock*)bd->start)->owner);
785 }
786 }
787
788 // If memInventory() calculates that we have a memory leak, this
789 // function will try to find the block(s) that are leaking by marking
790 // all the ones that we know about, and search through memory to find
791 // blocks that are not marked. In the debugger this can help to give
792 // us a clue about what kind of block leaked. In the future we might
793 // annotate blocks with their allocation site to give more helpful
794 // info.
795 static void
796 findMemoryLeak (void)
797 {
798 uint32_t g, i;
799 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
800 for (i = 0; i < n_capabilities; i++) {
801 markBlocks(capabilities[i]->mut_lists[g]);
802 markBlocks(gc_threads[i]->gens[g].part_list);
803 markBlocks(gc_threads[i]->gens[g].scavd_list);
804 markBlocks(gc_threads[i]->gens[g].todo_bd);
805 }
806 markBlocks(generations[g].blocks);
807 markBlocks(generations[g].large_objects);
808 markCompactBlocks(generations[g].compact_objects);
809 }
810
811 for (i = 0; i < n_nurseries; i++) {
812 markBlocks(nurseries[i].blocks);
813 }
814
815 for (i = 0; i < n_capabilities; i++) {
816 markBlocks(gc_threads[i]->free_blocks);
817 markBlocks(capabilities[i]->pinned_object_block);
818 }
819
820 #ifdef PROFILING
821 // TODO:
822 // if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER) {
823 // markRetainerBlocks();
824 // }
825 #endif
826
827 // count the blocks allocated by the arena allocator
828 // TODO:
829 // markArenaBlocks();
830
831 // count the blocks containing executable memory
832 markBlocks(exec_block);
833
834 reportUnmarkedBlocks();
835 }
836
837 void
838 checkRunQueue(Capability *cap)
839 {
840 StgTSO *prev, *tso;
841 prev = END_TSO_QUEUE;
842 uint32_t n;
843 for (n = 0, tso = cap->run_queue_hd; tso != END_TSO_QUEUE;
844 prev = tso, tso = tso->_link, n++) {
845 ASSERT(prev == END_TSO_QUEUE || prev->_link == tso);
846 ASSERT(tso->block_info.prev == prev);
847 }
848 ASSERT(cap->run_queue_tl == prev);
849 ASSERT(cap->n_run_queue == n);
850 }
851
852 /* -----------------------------------------------------------------------------
853 Memory leak detection
854
855 memInventory() checks for memory leaks by counting up all the
856 blocks we know about and comparing that to the number of blocks
857 allegedly floating around in the system.
858 -------------------------------------------------------------------------- */
859
860 // Useful for finding partially full blocks in gdb
861 void findSlop(bdescr *bd);
862 void findSlop(bdescr *bd)
863 {
864 W_ slop;
865
866 for (; bd != NULL; bd = bd->link) {
867 slop = (bd->blocks * BLOCK_SIZE_W) - (bd->free - bd->start);
868 if (slop > (1024/sizeof(W_))) {
869 debugBelch("block at %p (bdescr %p) has %" FMT_Word "KB slop\n",
870 bd->start, bd, slop / (1024/sizeof(W_)));
871 }
872 }
873 }
874
875 static W_
876 genBlocks (generation *gen)
877 {
878 ASSERT(countBlocks(gen->blocks) == gen->n_blocks);
879 ASSERT(countBlocks(gen->large_objects) == gen->n_large_blocks);
880 ASSERT(countCompactBlocks(gen->compact_objects) == gen->n_compact_blocks);
881 ASSERT(countCompactBlocks(gen->compact_blocks_in_import) == gen->n_compact_blocks_in_import);
882 return gen->n_blocks + gen->n_old_blocks +
883 countAllocdBlocks(gen->large_objects) +
884 gen->n_compact_blocks + gen->n_compact_blocks_in_import;
885 }
886
887 void
888 memInventory (rtsBool show)
889 {
890 uint32_t g, i;
891 W_ gen_blocks[RtsFlags.GcFlags.generations];
892 W_ nursery_blocks, retainer_blocks,
893 arena_blocks, exec_blocks, gc_free_blocks = 0;
894 W_ live_blocks = 0, free_blocks = 0;
895 rtsBool leak;
896
897 // count the blocks we current have
898
899 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
900 gen_blocks[g] = 0;
901 for (i = 0; i < n_capabilities; i++) {
902 gen_blocks[g] += countBlocks(capabilities[i]->mut_lists[g]);
903 gen_blocks[g] += countBlocks(gc_threads[i]->gens[g].part_list);
904 gen_blocks[g] += countBlocks(gc_threads[i]->gens[g].scavd_list);
905 gen_blocks[g] += countBlocks(gc_threads[i]->gens[g].todo_bd);
906 }
907 gen_blocks[g] += genBlocks(&generations[g]);
908 }
909
910 nursery_blocks = 0;
911 for (i = 0; i < n_nurseries; i++) {
912 ASSERT(countBlocks(nurseries[i].blocks) == nurseries[i].n_blocks);
913 nursery_blocks += nurseries[i].n_blocks;
914 }
915 for (i = 0; i < n_capabilities; i++) {
916 gc_free_blocks += countBlocks(gc_threads[i]->free_blocks);
917 if (capabilities[i]->pinned_object_block != NULL) {
918 nursery_blocks += capabilities[i]->pinned_object_block->blocks;
919 }
920 nursery_blocks += countBlocks(capabilities[i]->pinned_object_blocks);
921 }
922
923 retainer_blocks = 0;
924 #ifdef PROFILING
925 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER) {
926 retainer_blocks = retainerStackBlocks();
927 }
928 #endif
929
930 // count the blocks allocated by the arena allocator
931 arena_blocks = arenaBlocks();
932
933 // count the blocks containing executable memory
934 exec_blocks = countAllocdBlocks(exec_block);
935
936 /* count the blocks on the free list */
937 free_blocks = countFreeList();
938
939 live_blocks = 0;
940 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
941 live_blocks += gen_blocks[g];
942 }
943 live_blocks += nursery_blocks +
944 + retainer_blocks + arena_blocks + exec_blocks + gc_free_blocks;
945
946 #define MB(n) (((double)(n) * BLOCK_SIZE_W) / ((1024*1024)/sizeof(W_)))
947
948 leak = live_blocks + free_blocks != mblocks_allocated * BLOCKS_PER_MBLOCK;
949
950 if (show || leak)
951 {
952 if (leak) {
953 debugBelch("Memory leak detected:\n");
954 } else {
955 debugBelch("Memory inventory:\n");
956 }
957 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
958 debugBelch(" gen %d blocks : %5" FMT_Word " blocks (%6.1lf MB)\n", g,
959 gen_blocks[g], MB(gen_blocks[g]));
960 }
961 debugBelch(" nursery : %5" FMT_Word " blocks (%6.1lf MB)\n",
962 nursery_blocks, MB(nursery_blocks));
963 debugBelch(" retainer : %5" FMT_Word " blocks (%6.1lf MB)\n",
964 retainer_blocks, MB(retainer_blocks));
965 debugBelch(" arena blocks : %5" FMT_Word " blocks (%6.1lf MB)\n",
966 arena_blocks, MB(arena_blocks));
967 debugBelch(" exec : %5" FMT_Word " blocks (%6.1lf MB)\n",
968 exec_blocks, MB(exec_blocks));
969 debugBelch(" GC free pool : %5" FMT_Word " blocks (%6.1lf MB)\n",
970 gc_free_blocks, MB(gc_free_blocks));
971 debugBelch(" free : %5" FMT_Word " blocks (%6.1lf MB)\n",
972 free_blocks, MB(free_blocks));
973 debugBelch(" total : %5" FMT_Word " blocks (%6.1lf MB)\n",
974 live_blocks + free_blocks, MB(live_blocks+free_blocks));
975 if (leak) {
976 debugBelch("\n in system : %5" FMT_Word " blocks (%" FMT_Word " MB)\n",
977 (W_)(mblocks_allocated * BLOCKS_PER_MBLOCK), mblocks_allocated);
978 }
979 }
980
981 if (leak) {
982 debugBelch("\n");
983 findMemoryLeak();
984 }
985 ASSERT(n_alloc_blocks == live_blocks);
986 ASSERT(!leak);
987 }
988
989
990 #endif /* DEBUG */