7a0ad1672f163ea4711cd9d338fbc7b539e8beec
[ghc.git] / rts / sm / Sanity.c
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 1998-2006
4 *
5 * Sanity checking code for the heap and stack.
6 *
7 * Used when debugging: check that everything reasonable.
8 *
9 * - All things that are supposed to be pointers look like pointers.
10 *
11 * - Objects in text space are marked as static closures, those
12 * in the heap are dynamic.
13 *
14 * ---------------------------------------------------------------------------*/
15
16 #include "PosixSource.h"
17 #include "Rts.h"
18
19 #if defined(DEBUG) /* whole file */
20
21 #include "RtsUtils.h"
22 #include "sm/Storage.h"
23 #include "sm/BlockAlloc.h"
24 #include "GCThread.h"
25 #include "Sanity.h"
26 #include "Schedule.h"
27 #include "Apply.h"
28 #include "Printer.h"
29 #include "Arena.h"
30 #include "RetainerProfile.h"
31 #include "CNF.h"
32
33 /* -----------------------------------------------------------------------------
34 Forward decls.
35 -------------------------------------------------------------------------- */
36
37 static void checkSmallBitmap ( StgPtr payload, StgWord bitmap, uint32_t );
38 static void checkLargeBitmap ( StgPtr payload, StgLargeBitmap*, uint32_t );
39 static void checkClosureShallow ( const StgClosure * );
40 static void checkSTACK (StgStack *stack);
41
42 /* -----------------------------------------------------------------------------
43 Check stack sanity
44 -------------------------------------------------------------------------- */
45
46 static void
47 checkSmallBitmap( StgPtr payload, StgWord bitmap, uint32_t size )
48 {
49 uint32_t i;
50
51 for(i = 0; i < size; i++, bitmap >>= 1 ) {
52 if ((bitmap & 1) == 0) {
53 checkClosureShallow((StgClosure *)payload[i]);
54 }
55 }
56 }
57
58 static void
59 checkLargeBitmap( StgPtr payload, StgLargeBitmap* large_bitmap, uint32_t size )
60 {
61 StgWord bmp;
62 uint32_t i, j;
63
64 i = 0;
65 for (bmp=0; i < size; bmp++) {
66 StgWord bitmap = large_bitmap->bitmap[bmp];
67 j = 0;
68 for(; i < size && j < BITS_IN(W_); j++, i++, bitmap >>= 1 ) {
69 if ((bitmap & 1) == 0) {
70 checkClosureShallow((StgClosure *)payload[i]);
71 }
72 }
73 }
74 }
75
76 /*
77 * check that it looks like a valid closure - without checking its payload
78 * used to avoid recursion between checking PAPs and checking stack
79 * chunks.
80 */
81
82 static void
83 checkClosureShallow( const StgClosure* p )
84 {
85 const StgClosure *q;
86
87 q = UNTAG_CONST_CLOSURE(p);
88 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q));
89 }
90
91 // check an individual stack object
92 StgOffset
93 checkStackFrame( StgPtr c )
94 {
95 uint32_t size;
96 const StgRetInfoTable* info;
97
98 info = get_ret_itbl((StgClosure *)c);
99
100 /* All activation records have 'bitmap' style layout info. */
101 switch (info->i.type) {
102
103 case UPDATE_FRAME:
104 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgUpdateFrame*)c)->updatee));
105 /* fallthrough */
106 case ATOMICALLY_FRAME:
107 case CATCH_RETRY_FRAME:
108 case CATCH_STM_FRAME:
109 case CATCH_FRAME:
110 // small bitmap cases (<= 32 entries)
111 case UNDERFLOW_FRAME:
112 case STOP_FRAME:
113 case RET_SMALL:
114 size = BITMAP_SIZE(info->i.layout.bitmap);
115 checkSmallBitmap((StgPtr)c + 1,
116 BITMAP_BITS(info->i.layout.bitmap), size);
117 return 1 + size;
118
119 case RET_BCO: {
120 StgBCO *bco;
121 uint32_t size;
122 bco = (StgBCO *)*(c+1);
123 size = BCO_BITMAP_SIZE(bco);
124 checkLargeBitmap((StgPtr)c + 2, BCO_BITMAP(bco), size);
125 return 2 + size;
126 }
127
128 case RET_BIG: // large bitmap (> 32 entries)
129 size = GET_LARGE_BITMAP(&info->i)->size;
130 checkLargeBitmap((StgPtr)c + 1, GET_LARGE_BITMAP(&info->i), size);
131 return 1 + size;
132
133 case RET_FUN:
134 {
135 const StgFunInfoTable *fun_info;
136 StgRetFun *ret_fun;
137
138 ret_fun = (StgRetFun *)c;
139 fun_info = get_fun_itbl(UNTAG_CONST_CLOSURE(ret_fun->fun));
140 size = ret_fun->size;
141 switch (fun_info->f.fun_type) {
142 case ARG_GEN:
143 checkSmallBitmap((StgPtr)ret_fun->payload,
144 BITMAP_BITS(fun_info->f.b.bitmap), size);
145 break;
146 case ARG_GEN_BIG:
147 checkLargeBitmap((StgPtr)ret_fun->payload,
148 GET_FUN_LARGE_BITMAP(fun_info), size);
149 break;
150 default:
151 checkSmallBitmap((StgPtr)ret_fun->payload,
152 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
153 size);
154 break;
155 }
156 return sizeofW(StgRetFun) + size;
157 }
158
159 default:
160 barf("checkStackFrame: weird activation record found on stack (%p %d).",c,info->i.type);
161 }
162 }
163
164 // check sections of stack between update frames
165 void
166 checkStackChunk( StgPtr sp, StgPtr stack_end )
167 {
168 StgPtr p;
169
170 p = sp;
171 while (p < stack_end) {
172 p += checkStackFrame( p );
173 }
174 // ASSERT( p == stack_end ); -- HWL
175 }
176
177 static void
178 checkPAP (StgClosure *tagged_fun, StgClosure** payload, StgWord n_args)
179 {
180 const StgClosure *fun;
181 const StgFunInfoTable *fun_info;
182
183 fun = UNTAG_CONST_CLOSURE(tagged_fun);
184 ASSERT(LOOKS_LIKE_CLOSURE_PTR(fun));
185 fun_info = get_fun_itbl(fun);
186
187 switch (fun_info->f.fun_type) {
188 case ARG_GEN:
189 checkSmallBitmap( (StgPtr)payload,
190 BITMAP_BITS(fun_info->f.b.bitmap), n_args );
191 break;
192 case ARG_GEN_BIG:
193 checkLargeBitmap( (StgPtr)payload,
194 GET_FUN_LARGE_BITMAP(fun_info),
195 n_args );
196 break;
197 case ARG_BCO:
198 checkLargeBitmap( (StgPtr)payload,
199 BCO_BITMAP(fun),
200 n_args );
201 break;
202 default:
203 checkSmallBitmap( (StgPtr)payload,
204 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
205 n_args );
206 break;
207 }
208
209 ASSERT(fun_info->f.arity > TAG_MASK ? GET_CLOSURE_TAG(tagged_fun) == 0
210 : GET_CLOSURE_TAG(tagged_fun) == fun_info->f.arity);
211 }
212
213
214 StgOffset
215 checkClosure( const StgClosure* p )
216 {
217 const StgInfoTable *info;
218
219 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
220
221 p = UNTAG_CONST_CLOSURE(p);
222
223 info = p->header.info;
224
225 if (IS_FORWARDING_PTR(info)) {
226 barf("checkClosure: found EVACUATED closure %d", info->type);
227 }
228 info = INFO_PTR_TO_STRUCT(info);
229
230 switch (info->type) {
231
232 case MVAR_CLEAN:
233 case MVAR_DIRTY:
234 {
235 StgMVar *mvar = (StgMVar *)p;
236 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->head));
237 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->tail));
238 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->value));
239 return sizeofW(StgMVar);
240 }
241
242 case THUNK:
243 case THUNK_1_0:
244 case THUNK_0_1:
245 case THUNK_1_1:
246 case THUNK_0_2:
247 case THUNK_2_0:
248 {
249 uint32_t i;
250 for (i = 0; i < info->layout.payload.ptrs; i++) {
251 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgThunk *)p)->payload[i]));
252 }
253 return thunk_sizeW_fromITBL(info);
254 }
255
256 case FUN:
257 case FUN_1_0:
258 case FUN_0_1:
259 case FUN_1_1:
260 case FUN_0_2:
261 case FUN_2_0:
262 case CONSTR:
263 case CONSTR_NOCAF:
264 case CONSTR_1_0:
265 case CONSTR_0_1:
266 case CONSTR_1_1:
267 case CONSTR_0_2:
268 case CONSTR_2_0:
269 case BLACKHOLE:
270 case PRIM:
271 case MUT_PRIM:
272 case MUT_VAR_CLEAN:
273 case MUT_VAR_DIRTY:
274 case TVAR:
275 case THUNK_STATIC:
276 case FUN_STATIC:
277 case COMPACT_NFDATA:
278 {
279 uint32_t i;
280 for (i = 0; i < info->layout.payload.ptrs; i++) {
281 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p->payload[i]));
282 }
283 return sizeW_fromITBL(info);
284 }
285
286 case BLOCKING_QUEUE:
287 {
288 StgBlockingQueue *bq = (StgBlockingQueue *)p;
289
290 // NO: the BH might have been updated now
291 // ASSERT(get_itbl(bq->bh)->type == BLACKHOLE);
292 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bq->bh));
293
294 ASSERT(get_itbl((StgClosure *)(bq->owner))->type == TSO);
295 ASSERT(bq->queue == (MessageBlackHole*)END_TSO_QUEUE
296 || bq->queue->header.info == &stg_MSG_BLACKHOLE_info);
297 ASSERT(bq->link == (StgBlockingQueue*)END_TSO_QUEUE ||
298 get_itbl((StgClosure *)(bq->link))->type == IND ||
299 get_itbl((StgClosure *)(bq->link))->type == BLOCKING_QUEUE);
300
301 return sizeofW(StgBlockingQueue);
302 }
303
304 case BCO: {
305 StgBCO *bco = (StgBCO *)p;
306 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->instrs));
307 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->literals));
308 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->ptrs));
309 return bco_sizeW(bco);
310 }
311
312 case IND_STATIC: /* (1, 0) closure */
313 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgIndStatic*)p)->indirectee));
314 return sizeW_fromITBL(info);
315
316 case WEAK:
317 /* deal with these specially - the info table isn't
318 * representative of the actual layout.
319 */
320 { StgWeak *w = (StgWeak *)p;
321 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->key));
322 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->value));
323 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->finalizer));
324 if (w->link) {
325 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->link));
326 }
327 return sizeW_fromITBL(info);
328 }
329
330 case THUNK_SELECTOR:
331 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgSelector *)p)->selectee));
332 return THUNK_SELECTOR_sizeW();
333
334 case IND:
335 {
336 /* we don't expect to see any of these after GC
337 * but they might appear during execution
338 */
339 StgInd *ind = (StgInd *)p;
340 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ind->indirectee));
341 return sizeofW(StgInd);
342 }
343
344 case RET_BCO:
345 case RET_SMALL:
346 case RET_BIG:
347 case UPDATE_FRAME:
348 case UNDERFLOW_FRAME:
349 case STOP_FRAME:
350 case CATCH_FRAME:
351 case ATOMICALLY_FRAME:
352 case CATCH_RETRY_FRAME:
353 case CATCH_STM_FRAME:
354 barf("checkClosure: stack frame");
355
356 case AP:
357 {
358 StgAP* ap = (StgAP *)p;
359 checkPAP (ap->fun, ap->payload, ap->n_args);
360 return ap_sizeW(ap);
361 }
362
363 case PAP:
364 {
365 StgPAP* pap = (StgPAP *)p;
366 checkPAP (pap->fun, pap->payload, pap->n_args);
367 return pap_sizeW(pap);
368 }
369
370 case AP_STACK:
371 {
372 StgAP_STACK *ap = (StgAP_STACK *)p;
373 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ap->fun));
374 checkStackChunk((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
375 return ap_stack_sizeW(ap);
376 }
377
378 case ARR_WORDS:
379 return arr_words_sizeW((StgArrBytes *)p);
380
381 case MUT_ARR_PTRS_CLEAN:
382 case MUT_ARR_PTRS_DIRTY:
383 case MUT_ARR_PTRS_FROZEN:
384 case MUT_ARR_PTRS_FROZEN0:
385 {
386 StgMutArrPtrs* a = (StgMutArrPtrs *)p;
387 uint32_t i;
388 for (i = 0; i < a->ptrs; i++) {
389 ASSERT(LOOKS_LIKE_CLOSURE_PTR(a->payload[i]));
390 }
391 return mut_arr_ptrs_sizeW(a);
392 }
393
394 case TSO:
395 checkTSO((StgTSO *)p);
396 return sizeofW(StgTSO);
397
398 case STACK:
399 checkSTACK((StgStack*)p);
400 return stack_sizeW((StgStack*)p);
401
402 case TREC_CHUNK:
403 {
404 uint32_t i;
405 StgTRecChunk *tc = (StgTRecChunk *)p;
406 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->prev_chunk));
407 for (i = 0; i < tc -> next_entry_idx; i ++) {
408 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].tvar));
409 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].expected_value));
410 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].new_value));
411 }
412 return sizeofW(StgTRecChunk);
413 }
414
415 default:
416 barf("checkClosure (closure type %d)", info->type);
417 }
418 }
419
420
421 /* -----------------------------------------------------------------------------
422 Check Heap Sanity
423
424 After garbage collection, the live heap is in a state where we can
425 run through and check that all the pointers point to the right
426 place. This function starts at a given position and sanity-checks
427 all the objects in the remainder of the chain.
428 -------------------------------------------------------------------------- */
429
430 void checkHeapChain (bdescr *bd)
431 {
432 StgPtr p;
433
434 for (; bd != NULL; bd = bd->link) {
435 if(!(bd->flags & BF_SWEPT)) {
436 p = bd->start;
437 while (p < bd->free) {
438 uint32_t size = checkClosure((StgClosure *)p);
439 /* This is the smallest size of closure that can live in the heap */
440 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
441 p += size;
442
443 /* skip over slop */
444 while (p < bd->free &&
445 (*p < 0x1000 || !LOOKS_LIKE_INFO_PTR(*p))) { p++; }
446 }
447 }
448 }
449 }
450
451 void
452 checkHeapChunk(StgPtr start, StgPtr end)
453 {
454 StgPtr p;
455 uint32_t size;
456
457 for (p=start; p<end; p+=size) {
458 ASSERT(LOOKS_LIKE_INFO_PTR(*p));
459 size = checkClosure((StgClosure *)p);
460 /* This is the smallest size of closure that can live in the heap. */
461 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
462 }
463 }
464
465 void
466 checkLargeObjects(bdescr *bd)
467 {
468 while (bd != NULL) {
469 if (!(bd->flags & BF_PINNED)) {
470 checkClosure((StgClosure *)bd->start);
471 }
472 bd = bd->link;
473 }
474 }
475
476 static void
477 checkCompactObjects(bdescr *bd)
478 {
479 // Compact objects are similar to large objects,
480 // but they have a StgCompactNFDataBlock at the beginning,
481 // before the actual closure
482
483 for ( ; bd != NULL; bd = bd->link) {
484 StgCompactNFDataBlock *block, *last;
485 StgCompactNFData *str;
486 StgWord totalW;
487
488 ASSERT(bd->flags & BF_COMPACT);
489
490 block = (StgCompactNFDataBlock*)bd->start;
491 str = block->owner;
492 ASSERT((W_)str == (W_)block + sizeof(StgCompactNFDataBlock));
493
494 totalW = 0;
495 for ( ; block ; block = block->next) {
496 last = block;
497 ASSERT(block->owner == str);
498
499 totalW += Bdescr((P_)block)->blocks * BLOCK_SIZE_W;
500 }
501
502 ASSERT(str->totalW == totalW);
503 ASSERT(str->last == last);
504 }
505 }
506
507 static void
508 checkSTACK (StgStack *stack)
509 {
510 StgPtr sp = stack->sp;
511 StgOffset stack_size = stack->stack_size;
512 StgPtr stack_end = stack->stack + stack_size;
513
514 ASSERT(stack->stack <= sp && sp <= stack_end);
515
516 checkStackChunk(sp, stack_end);
517 }
518
519 void
520 checkTSO(StgTSO *tso)
521 {
522 StgTSO *next;
523 const StgInfoTable *info;
524
525 if (tso->what_next == ThreadKilled) {
526 /* The garbage collector doesn't bother following any pointers
527 * from dead threads, so don't check sanity here.
528 */
529 return;
530 }
531
532 next = tso->_link;
533 info = (const StgInfoTable*) tso->_link->header.info;
534
535 ASSERT(next == END_TSO_QUEUE ||
536 info == &stg_MVAR_TSO_QUEUE_info ||
537 info == &stg_TSO_info ||
538 info == &stg_WHITEHOLE_info); // happens due to STM doing lockTSO()
539
540 if ( tso->why_blocked == BlockedOnMVar
541 || tso->why_blocked == BlockedOnMVarRead
542 || tso->why_blocked == BlockedOnBlackHole
543 || tso->why_blocked == BlockedOnMsgThrowTo
544 || tso->why_blocked == NotBlocked
545 ) {
546 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->block_info.closure));
547 }
548
549 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->bq));
550 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->blocked_exceptions));
551 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->stackobj));
552
553 // XXX are we checking the stack twice?
554 checkSTACK(tso->stackobj);
555 }
556
557 /*
558 Check that all TSOs have been evacuated.
559 Optionally also check the sanity of the TSOs.
560 */
561 void
562 checkGlobalTSOList (bool checkTSOs)
563 {
564 StgTSO *tso;
565 uint32_t g;
566
567 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
568 for (tso=generations[g].threads; tso != END_TSO_QUEUE;
569 tso = tso->global_link) {
570 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso));
571 ASSERT(get_itbl((StgClosure *)tso)->type == TSO);
572 if (checkTSOs)
573 checkTSO(tso);
574
575 // If this TSO is dirty and in an old generation, it better
576 // be on the mutable list.
577 if (tso->dirty) {
578 ASSERT(Bdescr((P_)tso)->gen_no == 0 || (tso->flags & TSO_MARKED));
579 tso->flags &= ~TSO_MARKED;
580 }
581
582 {
583 StgStack *stack;
584 StgUnderflowFrame *frame;
585
586 stack = tso->stackobj;
587 while (1) {
588 if (stack->dirty & 1) {
589 ASSERT(Bdescr((P_)stack)->gen_no == 0 || (stack->dirty & TSO_MARKED));
590 stack->dirty &= ~TSO_MARKED;
591 }
592 frame = (StgUnderflowFrame*) (stack->stack + stack->stack_size
593 - sizeofW(StgUnderflowFrame));
594 if (frame->info != &stg_stack_underflow_frame_info
595 || frame->next_chunk == (StgStack*)END_TSO_QUEUE) break;
596 stack = frame->next_chunk;
597 }
598 }
599 }
600 }
601 }
602
603 /* -----------------------------------------------------------------------------
604 Check mutable list sanity.
605 -------------------------------------------------------------------------- */
606
607 static void
608 checkMutableList( bdescr *mut_bd, uint32_t gen )
609 {
610 bdescr *bd;
611 StgPtr q;
612 StgClosure *p;
613
614 for (bd = mut_bd; bd != NULL; bd = bd->link) {
615 for (q = bd->start; q < bd->free; q++) {
616 p = (StgClosure *)*q;
617 ASSERT(!HEAP_ALLOCED(p) || Bdescr((P_)p)->gen_no == gen);
618 checkClosure(p);
619
620 switch (get_itbl(p)->type) {
621 case TSO:
622 ((StgTSO *)p)->flags |= TSO_MARKED;
623 break;
624 case STACK:
625 ((StgStack *)p)->dirty |= TSO_MARKED;
626 break;
627 }
628 }
629 }
630 }
631
632 static void
633 checkLocalMutableLists (uint32_t cap_no)
634 {
635 uint32_t g;
636 for (g = 1; g < RtsFlags.GcFlags.generations; g++) {
637 checkMutableList(capabilities[cap_no]->mut_lists[g], g);
638 }
639 }
640
641 static void
642 checkMutableLists (void)
643 {
644 uint32_t i;
645 for (i = 0; i < n_capabilities; i++) {
646 checkLocalMutableLists(i);
647 }
648 }
649
650 /*
651 Check the static objects list.
652 */
653 void
654 checkStaticObjects ( StgClosure* static_objects )
655 {
656 StgClosure *p = static_objects;
657 const StgInfoTable *info;
658
659 while (p != END_OF_STATIC_OBJECT_LIST) {
660 p = UNTAG_STATIC_LIST_PTR(p);
661 checkClosure(p);
662 info = get_itbl(p);
663 switch (info->type) {
664 case IND_STATIC:
665 {
666 const StgClosure *indirectee;
667
668 indirectee = UNTAG_CONST_CLOSURE(((StgIndStatic *)p)->indirectee);
669 ASSERT(LOOKS_LIKE_CLOSURE_PTR(indirectee));
670 ASSERT(LOOKS_LIKE_INFO_PTR((StgWord)indirectee->header.info));
671 p = *IND_STATIC_LINK((StgClosure *)p);
672 break;
673 }
674
675 case THUNK_STATIC:
676 p = *THUNK_STATIC_LINK((StgClosure *)p);
677 break;
678
679 case FUN_STATIC:
680 p = *FUN_STATIC_LINK((StgClosure *)p);
681 break;
682
683 case CONSTR:
684 case CONSTR_NOCAF:
685 case CONSTR_1_0:
686 case CONSTR_2_0:
687 case CONSTR_1_1:
688 p = *STATIC_LINK(info,(StgClosure *)p);
689 break;
690
691 default:
692 barf("checkStaticObjetcs: strange closure %p (%s)",
693 p, info_type(p));
694 }
695 }
696 }
697
698 /* Nursery sanity check */
699 void
700 checkNurserySanity (nursery *nursery)
701 {
702 bdescr *bd, *prev;
703 uint32_t blocks = 0;
704
705 prev = NULL;
706 for (bd = nursery->blocks; bd != NULL; bd = bd->link) {
707 ASSERT(bd->gen == g0);
708 ASSERT(bd->u.back == prev);
709 prev = bd;
710 blocks += bd->blocks;
711 }
712
713 ASSERT(blocks == nursery->n_blocks);
714 }
715
716 static void checkGeneration (generation *gen,
717 bool after_major_gc USED_IF_THREADS)
718 {
719 uint32_t n;
720 gen_workspace *ws;
721
722 ASSERT(countBlocks(gen->blocks) == gen->n_blocks);
723 ASSERT(countBlocks(gen->large_objects) == gen->n_large_blocks);
724
725 #if defined(THREADED_RTS)
726 // heap sanity checking doesn't work with SMP, because we can't
727 // zero the slop (see Updates.h). However, we can sanity-check
728 // the heap after a major gc, because there is no slop.
729 if (!after_major_gc) return;
730 #endif
731
732 checkHeapChain(gen->blocks);
733
734 for (n = 0; n < n_capabilities; n++) {
735 ws = &gc_threads[n]->gens[gen->no];
736 checkHeapChain(ws->todo_bd);
737 checkHeapChain(ws->part_list);
738 checkHeapChain(ws->scavd_list);
739 }
740
741 checkLargeObjects(gen->large_objects);
742 checkCompactObjects(gen->compact_objects);
743 }
744
745 /* Full heap sanity check. */
746 static void checkFullHeap (bool after_major_gc)
747 {
748 uint32_t g, n;
749
750 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
751 checkGeneration(&generations[g], after_major_gc);
752 }
753 for (n = 0; n < n_capabilities; n++) {
754 checkNurserySanity(&nurseries[n]);
755 }
756 }
757
758 void checkSanity (bool after_gc, bool major_gc)
759 {
760 checkFullHeap(after_gc && major_gc);
761
762 checkFreeListSanity();
763
764 // always check the stacks in threaded mode, because checkHeap()
765 // does nothing in this case.
766 if (after_gc) {
767 checkMutableLists();
768 checkGlobalTSOList(true);
769 }
770 }
771
772 static void
773 markCompactBlocks(bdescr *bd)
774 {
775 for (; bd != NULL; bd = bd->link) {
776 compactMarkKnown(((StgCompactNFDataBlock*)bd->start)->owner);
777 }
778 }
779
780 // If memInventory() calculates that we have a memory leak, this
781 // function will try to find the block(s) that are leaking by marking
782 // all the ones that we know about, and search through memory to find
783 // blocks that are not marked. In the debugger this can help to give
784 // us a clue about what kind of block leaked. In the future we might
785 // annotate blocks with their allocation site to give more helpful
786 // info.
787 static void
788 findMemoryLeak (void)
789 {
790 uint32_t g, i;
791 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
792 for (i = 0; i < n_capabilities; i++) {
793 markBlocks(capabilities[i]->mut_lists[g]);
794 markBlocks(gc_threads[i]->gens[g].part_list);
795 markBlocks(gc_threads[i]->gens[g].scavd_list);
796 markBlocks(gc_threads[i]->gens[g].todo_bd);
797 }
798 markBlocks(generations[g].blocks);
799 markBlocks(generations[g].large_objects);
800 markCompactBlocks(generations[g].compact_objects);
801 }
802
803 for (i = 0; i < n_nurseries; i++) {
804 markBlocks(nurseries[i].blocks);
805 }
806
807 for (i = 0; i < n_capabilities; i++) {
808 markBlocks(gc_threads[i]->free_blocks);
809 markBlocks(capabilities[i]->pinned_object_block);
810 }
811
812 #if defined(PROFILING)
813 // TODO:
814 // if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER) {
815 // markRetainerBlocks();
816 // }
817 #endif
818
819 // count the blocks allocated by the arena allocator
820 // TODO:
821 // markArenaBlocks();
822
823 // count the blocks containing executable memory
824 markBlocks(exec_block);
825
826 reportUnmarkedBlocks();
827 }
828
829 void
830 checkRunQueue(Capability *cap)
831 {
832 StgTSO *prev, *tso;
833 prev = END_TSO_QUEUE;
834 uint32_t n;
835 for (n = 0, tso = cap->run_queue_hd; tso != END_TSO_QUEUE;
836 prev = tso, tso = tso->_link, n++) {
837 ASSERT(prev == END_TSO_QUEUE || prev->_link == tso);
838 ASSERT(tso->block_info.prev == prev);
839 }
840 ASSERT(cap->run_queue_tl == prev);
841 ASSERT(cap->n_run_queue == n);
842 }
843
844 /* -----------------------------------------------------------------------------
845 Memory leak detection
846
847 memInventory() checks for memory leaks by counting up all the
848 blocks we know about and comparing that to the number of blocks
849 allegedly floating around in the system.
850 -------------------------------------------------------------------------- */
851
852 // Useful for finding partially full blocks in gdb
853 void findSlop(bdescr *bd);
854 void findSlop(bdescr *bd)
855 {
856 W_ slop;
857
858 for (; bd != NULL; bd = bd->link) {
859 slop = (bd->blocks * BLOCK_SIZE_W) - (bd->free - bd->start);
860 if (slop > (1024/sizeof(W_))) {
861 debugBelch("block at %p (bdescr %p) has %" FMT_Word "KB slop\n",
862 bd->start, bd, slop / (1024/(W_)sizeof(W_)));
863 }
864 }
865 }
866
867 static W_
868 genBlocks (generation *gen)
869 {
870 ASSERT(countBlocks(gen->blocks) == gen->n_blocks);
871 ASSERT(countBlocks(gen->large_objects) == gen->n_large_blocks);
872 ASSERT(countCompactBlocks(gen->compact_objects) == gen->n_compact_blocks);
873 ASSERT(countCompactBlocks(gen->compact_blocks_in_import) == gen->n_compact_blocks_in_import);
874 return gen->n_blocks + gen->n_old_blocks +
875 countAllocdBlocks(gen->large_objects) +
876 countAllocdCompactBlocks(gen->compact_objects) +
877 countAllocdCompactBlocks(gen->compact_blocks_in_import);
878 }
879
880 void
881 memInventory (bool show)
882 {
883 uint32_t g, i;
884 W_ gen_blocks[RtsFlags.GcFlags.generations];
885 W_ nursery_blocks, retainer_blocks,
886 arena_blocks, exec_blocks, gc_free_blocks = 0;
887 W_ live_blocks = 0, free_blocks = 0;
888 bool leak;
889
890 // count the blocks we current have
891
892 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
893 gen_blocks[g] = 0;
894 for (i = 0; i < n_capabilities; i++) {
895 gen_blocks[g] += countBlocks(capabilities[i]->mut_lists[g]);
896 gen_blocks[g] += countBlocks(gc_threads[i]->gens[g].part_list);
897 gen_blocks[g] += countBlocks(gc_threads[i]->gens[g].scavd_list);
898 gen_blocks[g] += countBlocks(gc_threads[i]->gens[g].todo_bd);
899 }
900 gen_blocks[g] += genBlocks(&generations[g]);
901 }
902
903 nursery_blocks = 0;
904 for (i = 0; i < n_nurseries; i++) {
905 ASSERT(countBlocks(nurseries[i].blocks) == nurseries[i].n_blocks);
906 nursery_blocks += nurseries[i].n_blocks;
907 }
908 for (i = 0; i < n_capabilities; i++) {
909 gc_free_blocks += countBlocks(gc_threads[i]->free_blocks);
910 if (capabilities[i]->pinned_object_block != NULL) {
911 nursery_blocks += capabilities[i]->pinned_object_block->blocks;
912 }
913 nursery_blocks += countBlocks(capabilities[i]->pinned_object_blocks);
914 }
915
916 retainer_blocks = 0;
917 #if defined(PROFILING)
918 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER) {
919 retainer_blocks = retainerStackBlocks();
920 }
921 #endif
922
923 // count the blocks allocated by the arena allocator
924 arena_blocks = arenaBlocks();
925
926 // count the blocks containing executable memory
927 exec_blocks = countAllocdBlocks(exec_block);
928
929 /* count the blocks on the free list */
930 free_blocks = countFreeList();
931
932 live_blocks = 0;
933 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
934 live_blocks += gen_blocks[g];
935 }
936 live_blocks += nursery_blocks +
937 + retainer_blocks + arena_blocks + exec_blocks + gc_free_blocks;
938
939 #define MB(n) (((double)(n) * BLOCK_SIZE_W) / ((1024*1024)/sizeof(W_)))
940
941 leak = live_blocks + free_blocks != mblocks_allocated * BLOCKS_PER_MBLOCK;
942
943 if (show || leak)
944 {
945 if (leak) {
946 debugBelch("Memory leak detected:\n");
947 } else {
948 debugBelch("Memory inventory:\n");
949 }
950 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
951 debugBelch(" gen %d blocks : %5" FMT_Word " blocks (%6.1lf MB)\n", g,
952 gen_blocks[g], MB(gen_blocks[g]));
953 }
954 debugBelch(" nursery : %5" FMT_Word " blocks (%6.1lf MB)\n",
955 nursery_blocks, MB(nursery_blocks));
956 debugBelch(" retainer : %5" FMT_Word " blocks (%6.1lf MB)\n",
957 retainer_blocks, MB(retainer_blocks));
958 debugBelch(" arena blocks : %5" FMT_Word " blocks (%6.1lf MB)\n",
959 arena_blocks, MB(arena_blocks));
960 debugBelch(" exec : %5" FMT_Word " blocks (%6.1lf MB)\n",
961 exec_blocks, MB(exec_blocks));
962 debugBelch(" GC free pool : %5" FMT_Word " blocks (%6.1lf MB)\n",
963 gc_free_blocks, MB(gc_free_blocks));
964 debugBelch(" free : %5" FMT_Word " blocks (%6.1lf MB)\n",
965 free_blocks, MB(free_blocks));
966 debugBelch(" total : %5" FMT_Word " blocks (%6.1lf MB)\n",
967 live_blocks + free_blocks, MB(live_blocks+free_blocks));
968 if (leak) {
969 debugBelch("\n in system : %5" FMT_Word " blocks (%" FMT_Word " MB)\n",
970 (W_)(mblocks_allocated * BLOCKS_PER_MBLOCK), mblocks_allocated);
971 }
972 }
973
974 if (leak) {
975 debugBelch("\n");
976 findMemoryLeak();
977 }
978 ASSERT(n_alloc_blocks == live_blocks);
979 ASSERT(!leak);
980 }
981
982
983 #endif /* DEBUG */