62376627206f6ea260e2f9034d7da8b3252de7bf
[ghc.git] / rts / sm / Sanity.c
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 1998-2006
4 *
5 * Sanity checking code for the heap and stack.
6 *
7 * Used when debugging: check that everything reasonable.
8 *
9 * - All things that are supposed to be pointers look like pointers.
10 *
11 * - Objects in text space are marked as static closures, those
12 * in the heap are dynamic.
13 *
14 * ---------------------------------------------------------------------------*/
15
16 #include "PosixSource.h"
17 #include "Rts.h"
18
19 #ifdef DEBUG /* whole file */
20
21 #include "RtsUtils.h"
22 #include "sm/Storage.h"
23 #include "sm/BlockAlloc.h"
24 #include "GCThread.h"
25 #include "Sanity.h"
26 #include "Schedule.h"
27 #include "Apply.h"
28 #include "Printer.h"
29 #include "Arena.h"
30 #include "RetainerProfile.h"
31
32 /* -----------------------------------------------------------------------------
33 Forward decls.
34 -------------------------------------------------------------------------- */
35
36 static void checkSmallBitmap ( StgPtr payload, StgWord bitmap, nat );
37 static void checkLargeBitmap ( StgPtr payload, StgLargeBitmap*, nat );
38 static void checkClosureShallow ( StgClosure * );
39 static void checkSTACK (StgStack *stack);
40
41 /* -----------------------------------------------------------------------------
42 Check stack sanity
43 -------------------------------------------------------------------------- */
44
45 static void
46 checkSmallBitmap( StgPtr payload, StgWord bitmap, nat size )
47 {
48 nat i;
49
50 for(i = 0; i < size; i++, bitmap >>= 1 ) {
51 if ((bitmap & 1) == 0) {
52 checkClosureShallow((StgClosure *)payload[i]);
53 }
54 }
55 }
56
57 static void
58 checkLargeBitmap( StgPtr payload, StgLargeBitmap* large_bitmap, nat size )
59 {
60 StgWord bmp;
61 nat i, j;
62
63 i = 0;
64 for (bmp=0; i < size; bmp++) {
65 StgWord bitmap = large_bitmap->bitmap[bmp];
66 j = 0;
67 for(; i < size && j < BITS_IN(W_); j++, i++, bitmap >>= 1 ) {
68 if ((bitmap & 1) == 0) {
69 checkClosureShallow((StgClosure *)payload[i]);
70 }
71 }
72 }
73 }
74
75 /*
76 * check that it looks like a valid closure - without checking its payload
77 * used to avoid recursion between checking PAPs and checking stack
78 * chunks.
79 */
80
81 static void
82 checkClosureShallow( StgClosure* p )
83 {
84 StgClosure *q;
85
86 q = UNTAG_CLOSURE(p);
87 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q));
88
89 /* Is it a static closure? */
90 if (!HEAP_ALLOCED(q)) {
91 ASSERT(closure_STATIC(q));
92 } else {
93 ASSERT(!closure_STATIC(q));
94 }
95 }
96
97 // check an individual stack object
98 StgOffset
99 checkStackFrame( StgPtr c )
100 {
101 nat size;
102 const StgRetInfoTable* info;
103
104 info = get_ret_itbl((StgClosure *)c);
105
106 /* All activation records have 'bitmap' style layout info. */
107 switch (info->i.type) {
108
109 case UPDATE_FRAME:
110 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgUpdateFrame*)c)->updatee));
111 case ATOMICALLY_FRAME:
112 case CATCH_RETRY_FRAME:
113 case CATCH_STM_FRAME:
114 case CATCH_FRAME:
115 // small bitmap cases (<= 32 entries)
116 case UNDERFLOW_FRAME:
117 case STOP_FRAME:
118 case RET_SMALL:
119 size = BITMAP_SIZE(info->i.layout.bitmap);
120 checkSmallBitmap((StgPtr)c + 1,
121 BITMAP_BITS(info->i.layout.bitmap), size);
122 return 1 + size;
123
124 case RET_BCO: {
125 StgBCO *bco;
126 nat size;
127 bco = (StgBCO *)*(c+1);
128 size = BCO_BITMAP_SIZE(bco);
129 checkLargeBitmap((StgPtr)c + 2, BCO_BITMAP(bco), size);
130 return 2 + size;
131 }
132
133 case RET_BIG: // large bitmap (> 32 entries)
134 size = GET_LARGE_BITMAP(&info->i)->size;
135 checkLargeBitmap((StgPtr)c + 1, GET_LARGE_BITMAP(&info->i), size);
136 return 1 + size;
137
138 case RET_FUN:
139 {
140 StgFunInfoTable *fun_info;
141 StgRetFun *ret_fun;
142
143 ret_fun = (StgRetFun *)c;
144 fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun));
145 size = ret_fun->size;
146 switch (fun_info->f.fun_type) {
147 case ARG_GEN:
148 checkSmallBitmap((StgPtr)ret_fun->payload,
149 BITMAP_BITS(fun_info->f.b.bitmap), size);
150 break;
151 case ARG_GEN_BIG:
152 checkLargeBitmap((StgPtr)ret_fun->payload,
153 GET_FUN_LARGE_BITMAP(fun_info), size);
154 break;
155 default:
156 checkSmallBitmap((StgPtr)ret_fun->payload,
157 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
158 size);
159 break;
160 }
161 return sizeofW(StgRetFun) + size;
162 }
163
164 default:
165 barf("checkStackFrame: weird activation record found on stack (%p %d).",c,info->i.type);
166 }
167 }
168
169 // check sections of stack between update frames
170 void
171 checkStackChunk( StgPtr sp, StgPtr stack_end )
172 {
173 StgPtr p;
174
175 p = sp;
176 while (p < stack_end) {
177 p += checkStackFrame( p );
178 }
179 // ASSERT( p == stack_end ); -- HWL
180 }
181
182 static void
183 checkPAP (StgClosure *tagged_fun, StgClosure** payload, StgWord n_args)
184 {
185 StgClosure *fun;
186 StgFunInfoTable *fun_info;
187
188 fun = UNTAG_CLOSURE(tagged_fun);
189 ASSERT(LOOKS_LIKE_CLOSURE_PTR(fun));
190 fun_info = get_fun_itbl(fun);
191
192 switch (fun_info->f.fun_type) {
193 case ARG_GEN:
194 checkSmallBitmap( (StgPtr)payload,
195 BITMAP_BITS(fun_info->f.b.bitmap), n_args );
196 break;
197 case ARG_GEN_BIG:
198 checkLargeBitmap( (StgPtr)payload,
199 GET_FUN_LARGE_BITMAP(fun_info),
200 n_args );
201 break;
202 case ARG_BCO:
203 checkLargeBitmap( (StgPtr)payload,
204 BCO_BITMAP(fun),
205 n_args );
206 break;
207 default:
208 checkSmallBitmap( (StgPtr)payload,
209 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
210 n_args );
211 break;
212 }
213
214 ASSERT(fun_info->f.arity > TAG_MASK ? GET_CLOSURE_TAG(tagged_fun) == 0
215 : GET_CLOSURE_TAG(tagged_fun) == fun_info->f.arity);
216 }
217
218
219 StgOffset
220 checkClosure( StgClosure* p )
221 {
222 const StgInfoTable *info;
223
224 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
225
226 p = UNTAG_CLOSURE(p);
227 /* Is it a static closure (i.e. in the data segment)? */
228 if (!HEAP_ALLOCED(p)) {
229 ASSERT(closure_STATIC(p));
230 } else {
231 ASSERT(!closure_STATIC(p));
232 }
233
234 info = p->header.info;
235
236 if (IS_FORWARDING_PTR(info)) {
237 barf("checkClosure: found EVACUATED closure %d", info->type);
238 }
239 info = INFO_PTR_TO_STRUCT(info);
240
241 switch (info->type) {
242
243 case MVAR_CLEAN:
244 case MVAR_DIRTY:
245 {
246 StgMVar *mvar = (StgMVar *)p;
247 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->head));
248 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->tail));
249 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->value));
250 return sizeofW(StgMVar);
251 }
252
253 case THUNK:
254 case THUNK_1_0:
255 case THUNK_0_1:
256 case THUNK_1_1:
257 case THUNK_0_2:
258 case THUNK_2_0:
259 {
260 nat i;
261 for (i = 0; i < info->layout.payload.ptrs; i++) {
262 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgThunk *)p)->payload[i]));
263 }
264 return thunk_sizeW_fromITBL(info);
265 }
266
267 case FUN:
268 case FUN_1_0:
269 case FUN_0_1:
270 case FUN_1_1:
271 case FUN_0_2:
272 case FUN_2_0:
273 case CONSTR:
274 case CONSTR_1_0:
275 case CONSTR_0_1:
276 case CONSTR_1_1:
277 case CONSTR_0_2:
278 case CONSTR_2_0:
279 case IND_PERM:
280 case BLACKHOLE:
281 case PRIM:
282 case MUT_PRIM:
283 case MUT_VAR_CLEAN:
284 case MUT_VAR_DIRTY:
285 case CONSTR_STATIC:
286 case CONSTR_NOCAF_STATIC:
287 case THUNK_STATIC:
288 case FUN_STATIC:
289 {
290 nat i;
291 for (i = 0; i < info->layout.payload.ptrs; i++) {
292 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p->payload[i]));
293 }
294 return sizeW_fromITBL(info);
295 }
296
297 case BLOCKING_QUEUE:
298 {
299 StgBlockingQueue *bq = (StgBlockingQueue *)p;
300
301 // NO: the BH might have been updated now
302 // ASSERT(get_itbl(bq->bh)->type == BLACKHOLE);
303 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bq->bh));
304
305 ASSERT(get_itbl((StgClosure *)(bq->owner))->type == TSO);
306 ASSERT(bq->queue == (MessageBlackHole*)END_TSO_QUEUE
307 || bq->queue->header.info == &stg_MSG_BLACKHOLE_info);
308 ASSERT(bq->link == (StgBlockingQueue*)END_TSO_QUEUE ||
309 get_itbl((StgClosure *)(bq->link))->type == IND ||
310 get_itbl((StgClosure *)(bq->link))->type == BLOCKING_QUEUE);
311
312 return sizeofW(StgBlockingQueue);
313 }
314
315 case BCO: {
316 StgBCO *bco = (StgBCO *)p;
317 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->instrs));
318 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->literals));
319 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->ptrs));
320 return bco_sizeW(bco);
321 }
322
323 case IND_STATIC: /* (1, 0) closure */
324 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgIndStatic*)p)->indirectee));
325 return sizeW_fromITBL(info);
326
327 case WEAK:
328 /* deal with these specially - the info table isn't
329 * representative of the actual layout.
330 */
331 { StgWeak *w = (StgWeak *)p;
332 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->key));
333 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->value));
334 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->finalizer));
335 if (w->link) {
336 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->link));
337 }
338 return sizeW_fromITBL(info);
339 }
340
341 case THUNK_SELECTOR:
342 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgSelector *)p)->selectee));
343 return THUNK_SELECTOR_sizeW();
344
345 case IND:
346 {
347 /* we don't expect to see any of these after GC
348 * but they might appear during execution
349 */
350 StgInd *ind = (StgInd *)p;
351 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ind->indirectee));
352 return sizeofW(StgInd);
353 }
354
355 case RET_BCO:
356 case RET_SMALL:
357 case RET_BIG:
358 case UPDATE_FRAME:
359 case UNDERFLOW_FRAME:
360 case STOP_FRAME:
361 case CATCH_FRAME:
362 case ATOMICALLY_FRAME:
363 case CATCH_RETRY_FRAME:
364 case CATCH_STM_FRAME:
365 barf("checkClosure: stack frame");
366
367 case AP:
368 {
369 StgAP* ap = (StgAP *)p;
370 checkPAP (ap->fun, ap->payload, ap->n_args);
371 return ap_sizeW(ap);
372 }
373
374 case PAP:
375 {
376 StgPAP* pap = (StgPAP *)p;
377 checkPAP (pap->fun, pap->payload, pap->n_args);
378 return pap_sizeW(pap);
379 }
380
381 case AP_STACK:
382 {
383 StgAP_STACK *ap = (StgAP_STACK *)p;
384 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ap->fun));
385 checkStackChunk((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
386 return ap_stack_sizeW(ap);
387 }
388
389 case ARR_WORDS:
390 return arr_words_sizeW((StgArrWords *)p);
391
392 case MUT_ARR_PTRS_CLEAN:
393 case MUT_ARR_PTRS_DIRTY:
394 case MUT_ARR_PTRS_FROZEN:
395 case MUT_ARR_PTRS_FROZEN0:
396 {
397 StgMutArrPtrs* a = (StgMutArrPtrs *)p;
398 nat i;
399 for (i = 0; i < a->ptrs; i++) {
400 ASSERT(LOOKS_LIKE_CLOSURE_PTR(a->payload[i]));
401 }
402 return mut_arr_ptrs_sizeW(a);
403 }
404
405 case TSO:
406 checkTSO((StgTSO *)p);
407 return sizeofW(StgTSO);
408
409 case STACK:
410 checkSTACK((StgStack*)p);
411 return stack_sizeW((StgStack*)p);
412
413 case TREC_CHUNK:
414 {
415 nat i;
416 StgTRecChunk *tc = (StgTRecChunk *)p;
417 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->prev_chunk));
418 for (i = 0; i < tc -> next_entry_idx; i ++) {
419 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].tvar));
420 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].expected_value));
421 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].new_value));
422 }
423 return sizeofW(StgTRecChunk);
424 }
425
426 default:
427 barf("checkClosure (closure type %d)", info->type);
428 }
429 }
430
431
432 /* -----------------------------------------------------------------------------
433 Check Heap Sanity
434
435 After garbage collection, the live heap is in a state where we can
436 run through and check that all the pointers point to the right
437 place. This function starts at a given position and sanity-checks
438 all the objects in the remainder of the chain.
439 -------------------------------------------------------------------------- */
440
441 void checkHeapChain (bdescr *bd)
442 {
443 StgPtr p;
444
445 for (; bd != NULL; bd = bd->link) {
446 if(!(bd->flags & BF_SWEPT)) {
447 p = bd->start;
448 while (p < bd->free) {
449 nat size = checkClosure((StgClosure *)p);
450 /* This is the smallest size of closure that can live in the heap */
451 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
452 p += size;
453
454 /* skip over slop */
455 while (p < bd->free &&
456 (*p < 0x1000 || !LOOKS_LIKE_INFO_PTR(*p))) { p++; }
457 }
458 }
459 }
460 }
461
462 void
463 checkHeapChunk(StgPtr start, StgPtr end)
464 {
465 StgPtr p;
466 nat size;
467
468 for (p=start; p<end; p+=size) {
469 ASSERT(LOOKS_LIKE_INFO_PTR(*p));
470 size = checkClosure((StgClosure *)p);
471 /* This is the smallest size of closure that can live in the heap. */
472 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
473 }
474 }
475
476 void
477 checkLargeObjects(bdescr *bd)
478 {
479 while (bd != NULL) {
480 if (!(bd->flags & BF_PINNED)) {
481 checkClosure((StgClosure *)bd->start);
482 }
483 bd = bd->link;
484 }
485 }
486
487 static void
488 checkSTACK (StgStack *stack)
489 {
490 StgPtr sp = stack->sp;
491 StgOffset stack_size = stack->stack_size;
492 StgPtr stack_end = stack->stack + stack_size;
493
494 ASSERT(stack->stack <= sp && sp <= stack_end);
495
496 checkStackChunk(sp, stack_end);
497 }
498
499 void
500 checkTSO(StgTSO *tso)
501 {
502 if (tso->what_next == ThreadKilled) {
503 /* The garbage collector doesn't bother following any pointers
504 * from dead threads, so don't check sanity here.
505 */
506 return;
507 }
508
509 ASSERT(tso->_link == END_TSO_QUEUE ||
510 tso->_link->header.info == &stg_MVAR_TSO_QUEUE_info ||
511 tso->_link->header.info == &stg_TSO_info);
512
513 if ( tso->why_blocked == BlockedOnMVar
514 || tso->why_blocked == BlockedOnBlackHole
515 || tso->why_blocked == BlockedOnMsgThrowTo
516 || tso->why_blocked == NotBlocked
517 ) {
518 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->block_info.closure));
519 }
520
521 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->bq));
522 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->blocked_exceptions));
523 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->stackobj));
524
525 // XXX are we checking the stack twice?
526 checkSTACK(tso->stackobj);
527 }
528
529 /*
530 Check that all TSOs have been evacuated.
531 Optionally also check the sanity of the TSOs.
532 */
533 void
534 checkGlobalTSOList (rtsBool checkTSOs)
535 {
536 StgTSO *tso;
537 nat g;
538
539 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
540 for (tso=generations[g].threads; tso != END_TSO_QUEUE;
541 tso = tso->global_link) {
542 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso));
543 ASSERT(get_itbl((StgClosure *)tso)->type == TSO);
544 if (checkTSOs)
545 checkTSO(tso);
546
547 // If this TSO is dirty and in an old generation, it better
548 // be on the mutable list.
549 if (tso->dirty) {
550 ASSERT(Bdescr((P_)tso)->gen_no == 0 || (tso->flags & TSO_MARKED));
551 tso->flags &= ~TSO_MARKED;
552 }
553
554 {
555 StgStack *stack;
556 StgUnderflowFrame *frame;
557
558 stack = tso->stackobj;
559 while (1) {
560 if (stack->dirty & 1) {
561 ASSERT(Bdescr((P_)stack)->gen_no == 0 || (stack->dirty & TSO_MARKED));
562 stack->dirty &= ~TSO_MARKED;
563 }
564 frame = (StgUnderflowFrame*) (stack->stack + stack->stack_size
565 - sizeofW(StgUnderflowFrame));
566 if (frame->info != &stg_stack_underflow_frame_info
567 || frame->next_chunk == (StgStack*)END_TSO_QUEUE) break;
568 stack = frame->next_chunk;
569 }
570 }
571 }
572 }
573 }
574
575 /* -----------------------------------------------------------------------------
576 Check mutable list sanity.
577 -------------------------------------------------------------------------- */
578
579 static void
580 checkMutableList( bdescr *mut_bd, nat gen )
581 {
582 bdescr *bd;
583 StgPtr q;
584 StgClosure *p;
585
586 for (bd = mut_bd; bd != NULL; bd = bd->link) {
587 for (q = bd->start; q < bd->free; q++) {
588 p = (StgClosure *)*q;
589 ASSERT(!HEAP_ALLOCED(p) || Bdescr((P_)p)->gen_no == gen);
590 checkClosure(p);
591
592 switch (get_itbl(p)->type) {
593 case TSO:
594 ((StgTSO *)p)->flags |= TSO_MARKED;
595 break;
596 case STACK:
597 ((StgStack *)p)->dirty |= TSO_MARKED;
598 break;
599 }
600 }
601 }
602 }
603
604 static void
605 checkLocalMutableLists (nat cap_no)
606 {
607 nat g;
608 for (g = 1; g < RtsFlags.GcFlags.generations; g++) {
609 checkMutableList(capabilities[cap_no].mut_lists[g], g);
610 }
611 }
612
613 static void
614 checkMutableLists (void)
615 {
616 nat i;
617 for (i = 0; i < n_capabilities; i++) {
618 checkLocalMutableLists(i);
619 }
620 }
621
622 /*
623 Check the static objects list.
624 */
625 void
626 checkStaticObjects ( StgClosure* static_objects )
627 {
628 StgClosure *p = static_objects;
629 StgInfoTable *info;
630
631 while (p != END_OF_STATIC_LIST) {
632 checkClosure(p);
633 info = get_itbl(p);
634 switch (info->type) {
635 case IND_STATIC:
636 {
637 StgClosure *indirectee = UNTAG_CLOSURE(((StgIndStatic *)p)->indirectee);
638
639 ASSERT(LOOKS_LIKE_CLOSURE_PTR(indirectee));
640 ASSERT(LOOKS_LIKE_INFO_PTR((StgWord)indirectee->header.info));
641 p = *IND_STATIC_LINK((StgClosure *)p);
642 break;
643 }
644
645 case THUNK_STATIC:
646 p = *THUNK_STATIC_LINK((StgClosure *)p);
647 break;
648
649 case FUN_STATIC:
650 p = *FUN_STATIC_LINK((StgClosure *)p);
651 break;
652
653 case CONSTR_STATIC:
654 p = *STATIC_LINK(info,(StgClosure *)p);
655 break;
656
657 default:
658 barf("checkStaticObjetcs: strange closure %p (%s)",
659 p, info_type(p));
660 }
661 }
662 }
663
664 /* Nursery sanity check */
665 void
666 checkNurserySanity (nursery *nursery)
667 {
668 bdescr *bd, *prev;
669 nat blocks = 0;
670
671 prev = NULL;
672 for (bd = nursery->blocks; bd != NULL; bd = bd->link) {
673 ASSERT(bd->gen == g0);
674 ASSERT(bd->u.back == prev);
675 prev = bd;
676 blocks += bd->blocks;
677 }
678
679 ASSERT(blocks == nursery->n_blocks);
680 }
681
682 static void checkGeneration (generation *gen,
683 rtsBool after_major_gc USED_IF_THREADS)
684 {
685 nat n;
686 gen_workspace *ws;
687
688 ASSERT(countBlocks(gen->blocks) == gen->n_blocks);
689 ASSERT(countBlocks(gen->large_objects) == gen->n_large_blocks);
690
691 #if defined(THREADED_RTS)
692 // heap sanity checking doesn't work with SMP, because we can't
693 // zero the slop (see Updates.h). However, we can sanity-check
694 // the heap after a major gc, because there is no slop.
695 if (!after_major_gc) return;
696 #endif
697
698 checkHeapChain(gen->blocks);
699
700 for (n = 0; n < n_capabilities; n++) {
701 ws = &gc_threads[n]->gens[gen->no];
702 checkHeapChain(ws->todo_bd);
703 checkHeapChain(ws->part_list);
704 checkHeapChain(ws->scavd_list);
705 }
706
707 checkLargeObjects(gen->large_objects);
708 }
709
710 /* Full heap sanity check. */
711 static void checkFullHeap (rtsBool after_major_gc)
712 {
713 nat g, n;
714
715 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
716 checkGeneration(&generations[g], after_major_gc);
717 }
718 for (n = 0; n < n_capabilities; n++) {
719 checkNurserySanity(&nurseries[n]);
720 }
721 }
722
723 void checkSanity (rtsBool after_gc, rtsBool major_gc)
724 {
725 checkFullHeap(after_gc && major_gc);
726
727 checkFreeListSanity();
728
729 // always check the stacks in threaded mode, because checkHeap()
730 // does nothing in this case.
731 if (after_gc) {
732 checkMutableLists();
733 checkGlobalTSOList(rtsTrue);
734 }
735 }
736
737 // If memInventory() calculates that we have a memory leak, this
738 // function will try to find the block(s) that are leaking by marking
739 // all the ones that we know about, and search through memory to find
740 // blocks that are not marked. In the debugger this can help to give
741 // us a clue about what kind of block leaked. In the future we might
742 // annotate blocks with their allocation site to give more helpful
743 // info.
744 static void
745 findMemoryLeak (void)
746 {
747 nat g, i;
748 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
749 for (i = 0; i < n_capabilities; i++) {
750 markBlocks(capabilities[i].mut_lists[g]);
751 markBlocks(gc_threads[i]->gens[g].part_list);
752 markBlocks(gc_threads[i]->gens[g].scavd_list);
753 markBlocks(gc_threads[i]->gens[g].todo_bd);
754 }
755 markBlocks(generations[g].blocks);
756 markBlocks(generations[g].large_objects);
757 }
758
759 for (i = 0; i < n_capabilities; i++) {
760 markBlocks(nurseries[i].blocks);
761 markBlocks(capabilities[i].pinned_object_block);
762 }
763
764 #ifdef PROFILING
765 // TODO:
766 // if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER) {
767 // markRetainerBlocks();
768 // }
769 #endif
770
771 // count the blocks allocated by the arena allocator
772 // TODO:
773 // markArenaBlocks();
774
775 // count the blocks containing executable memory
776 markBlocks(exec_block);
777
778 reportUnmarkedBlocks();
779 }
780
781 void
782 checkRunQueue(Capability *cap)
783 {
784 StgTSO *prev, *tso;
785 prev = END_TSO_QUEUE;
786 for (tso = cap->run_queue_hd; tso != END_TSO_QUEUE;
787 prev = tso, tso = tso->_link) {
788 ASSERT(prev == END_TSO_QUEUE || prev->_link == tso);
789 ASSERT(tso->block_info.prev == prev);
790 }
791 ASSERT(cap->run_queue_tl == prev);
792 }
793
794 /* -----------------------------------------------------------------------------
795 Memory leak detection
796
797 memInventory() checks for memory leaks by counting up all the
798 blocks we know about and comparing that to the number of blocks
799 allegedly floating around in the system.
800 -------------------------------------------------------------------------- */
801
802 // Useful for finding partially full blocks in gdb
803 void findSlop(bdescr *bd);
804 void findSlop(bdescr *bd)
805 {
806 W_ slop;
807
808 for (; bd != NULL; bd = bd->link) {
809 slop = (bd->blocks * BLOCK_SIZE_W) - (bd->free - bd->start);
810 if (slop > (1024/sizeof(W_))) {
811 debugBelch("block at %p (bdescr %p) has %" FMT_SizeT "KB slop\n",
812 bd->start, bd, slop / (1024/sizeof(W_)));
813 }
814 }
815 }
816
817 static W_
818 genBlocks (generation *gen)
819 {
820 ASSERT(countBlocks(gen->blocks) == gen->n_blocks);
821 ASSERT(countBlocks(gen->large_objects) == gen->n_large_blocks);
822 return gen->n_blocks + gen->n_old_blocks +
823 countAllocdBlocks(gen->large_objects);
824 }
825
826 void
827 memInventory (rtsBool show)
828 {
829 nat g, i;
830 W_ gen_blocks[RtsFlags.GcFlags.generations];
831 W_ nursery_blocks, retainer_blocks,
832 arena_blocks, exec_blocks;
833 W_ live_blocks = 0, free_blocks = 0;
834 rtsBool leak;
835
836 // count the blocks we current have
837
838 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
839 gen_blocks[g] = 0;
840 for (i = 0; i < n_capabilities; i++) {
841 gen_blocks[g] += countBlocks(capabilities[i].mut_lists[g]);
842 gen_blocks[g] += countBlocks(gc_threads[i]->gens[g].part_list);
843 gen_blocks[g] += countBlocks(gc_threads[i]->gens[g].scavd_list);
844 gen_blocks[g] += countBlocks(gc_threads[i]->gens[g].todo_bd);
845 }
846 gen_blocks[g] += genBlocks(&generations[g]);
847 }
848
849 nursery_blocks = 0;
850 for (i = 0; i < n_capabilities; i++) {
851 ASSERT(countBlocks(nurseries[i].blocks) == nurseries[i].n_blocks);
852 nursery_blocks += nurseries[i].n_blocks;
853 if (capabilities[i].pinned_object_block != NULL) {
854 nursery_blocks += capabilities[i].pinned_object_block->blocks;
855 }
856 nursery_blocks += countBlocks(capabilities[i].pinned_object_blocks);
857 }
858
859 retainer_blocks = 0;
860 #ifdef PROFILING
861 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER) {
862 retainer_blocks = retainerStackBlocks();
863 }
864 #endif
865
866 // count the blocks allocated by the arena allocator
867 arena_blocks = arenaBlocks();
868
869 // count the blocks containing executable memory
870 exec_blocks = countAllocdBlocks(exec_block);
871
872 /* count the blocks on the free list */
873 free_blocks = countFreeList();
874
875 live_blocks = 0;
876 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
877 live_blocks += gen_blocks[g];
878 }
879 live_blocks += nursery_blocks +
880 + retainer_blocks + arena_blocks + exec_blocks;
881
882 #define MB(n) (((double)(n) * BLOCK_SIZE_W) / ((1024*1024)/sizeof(W_)))
883
884 leak = live_blocks + free_blocks != mblocks_allocated * BLOCKS_PER_MBLOCK;
885
886 if (show || leak)
887 {
888 if (leak) {
889 debugBelch("Memory leak detected:\n");
890 } else {
891 debugBelch("Memory inventory:\n");
892 }
893 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
894 debugBelch(" gen %d blocks : %5" FMT_Word " blocks (%6.1lf MB)\n", g,
895 gen_blocks[g], MB(gen_blocks[g]));
896 }
897 debugBelch(" nursery : %5" FMT_Word " blocks (%6.1lf MB)\n",
898 nursery_blocks, MB(nursery_blocks));
899 debugBelch(" retainer : %5" FMT_Word " blocks (%6.1lf MB)\n",
900 retainer_blocks, MB(retainer_blocks));
901 debugBelch(" arena blocks : %5" FMT_Word " blocks (%6.1lf MB)\n",
902 arena_blocks, MB(arena_blocks));
903 debugBelch(" exec : %5" FMT_Word " blocks (%6.1lf MB)\n",
904 exec_blocks, MB(exec_blocks));
905 debugBelch(" free : %5" FMT_Word " blocks (%6.1lf MB)\n",
906 free_blocks, MB(free_blocks));
907 debugBelch(" total : %5" FMT_Word " blocks (%6.1lf MB)\n",
908 live_blocks + free_blocks, MB(live_blocks+free_blocks));
909 if (leak) {
910 debugBelch("\n in system : %5" FMT_Word " blocks (%" FMT_Word " MB)\n",
911 mblocks_allocated * BLOCKS_PER_MBLOCK, mblocks_allocated);
912 }
913 }
914
915 if (leak) {
916 debugBelch("\n");
917 findMemoryLeak();
918 }
919 ASSERT(n_alloc_blocks == live_blocks);
920 ASSERT(!leak);
921 }
922
923
924 #endif /* DEBUG */