Merge branch 'master' of http://darcs.haskell.org/ghc
[ghc.git] / rts / sm / Sanity.c
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 1998-2006
4 *
5 * Sanity checking code for the heap and stack.
6 *
7 * Used when debugging: check that everything reasonable.
8 *
9 * - All things that are supposed to be pointers look like pointers.
10 *
11 * - Objects in text space are marked as static closures, those
12 * in the heap are dynamic.
13 *
14 * ---------------------------------------------------------------------------*/
15
16 #include "PosixSource.h"
17 #include "Rts.h"
18
19 #ifdef DEBUG /* whole file */
20
21 #include "RtsUtils.h"
22 #include "sm/Storage.h"
23 #include "sm/BlockAlloc.h"
24 #include "GCThread.h"
25 #include "Sanity.h"
26 #include "Schedule.h"
27 #include "Apply.h"
28 #include "Printer.h"
29 #include "Arena.h"
30 #include "RetainerProfile.h"
31
32 /* -----------------------------------------------------------------------------
33 Forward decls.
34 -------------------------------------------------------------------------- */
35
36 static void checkSmallBitmap ( StgPtr payload, StgWord bitmap, nat );
37 static void checkLargeBitmap ( StgPtr payload, StgLargeBitmap*, nat );
38 static void checkClosureShallow ( StgClosure * );
39 static void checkSTACK (StgStack *stack);
40
41 /* -----------------------------------------------------------------------------
42 Check stack sanity
43 -------------------------------------------------------------------------- */
44
45 static void
46 checkSmallBitmap( StgPtr payload, StgWord bitmap, nat size )
47 {
48 nat i;
49
50 for(i = 0; i < size; i++, bitmap >>= 1 ) {
51 if ((bitmap & 1) == 0) {
52 checkClosureShallow((StgClosure *)payload[i]);
53 }
54 }
55 }
56
57 static void
58 checkLargeBitmap( StgPtr payload, StgLargeBitmap* large_bitmap, nat size )
59 {
60 StgWord bmp;
61 nat i, j;
62
63 i = 0;
64 for (bmp=0; i < size; bmp++) {
65 StgWord bitmap = large_bitmap->bitmap[bmp];
66 j = 0;
67 for(; i < size && j < BITS_IN(W_); j++, i++, bitmap >>= 1 ) {
68 if ((bitmap & 1) == 0) {
69 checkClosureShallow((StgClosure *)payload[i]);
70 }
71 }
72 }
73 }
74
75 /*
76 * check that it looks like a valid closure - without checking its payload
77 * used to avoid recursion between checking PAPs and checking stack
78 * chunks.
79 */
80
81 static void
82 checkClosureShallow( StgClosure* p )
83 {
84 StgClosure *q;
85
86 q = UNTAG_CLOSURE(p);
87 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q));
88
89 /* Is it a static closure? */
90 if (!HEAP_ALLOCED(q)) {
91 ASSERT(closure_STATIC(q));
92 } else {
93 ASSERT(!closure_STATIC(q));
94 }
95 }
96
97 // check an individual stack object
98 StgOffset
99 checkStackFrame( StgPtr c )
100 {
101 nat size;
102 const StgRetInfoTable* info;
103
104 info = get_ret_itbl((StgClosure *)c);
105
106 /* All activation records have 'bitmap' style layout info. */
107 switch (info->i.type) {
108 case RET_DYN: /* Dynamic bitmap: the mask is stored on the stack */
109 {
110 StgWord dyn;
111 StgPtr p;
112 StgRetDyn* r;
113
114 r = (StgRetDyn *)c;
115 dyn = r->liveness;
116
117 p = (P_)(r->payload);
118 checkSmallBitmap(p,RET_DYN_LIVENESS(r->liveness),RET_DYN_BITMAP_SIZE);
119 p += RET_DYN_BITMAP_SIZE + RET_DYN_NONPTR_REGS_SIZE;
120
121 // skip over the non-pointers
122 p += RET_DYN_NONPTRS(dyn);
123
124 // follow the ptr words
125 for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
126 checkClosureShallow((StgClosure *)*p);
127 p++;
128 }
129
130 return sizeofW(StgRetDyn) + RET_DYN_BITMAP_SIZE +
131 RET_DYN_NONPTR_REGS_SIZE +
132 RET_DYN_NONPTRS(dyn) + RET_DYN_PTRS(dyn);
133 }
134
135 case UPDATE_FRAME:
136 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgUpdateFrame*)c)->updatee));
137 case ATOMICALLY_FRAME:
138 case CATCH_RETRY_FRAME:
139 case CATCH_STM_FRAME:
140 case CATCH_FRAME:
141 // small bitmap cases (<= 32 entries)
142 case UNDERFLOW_FRAME:
143 case STOP_FRAME:
144 case RET_SMALL:
145 size = BITMAP_SIZE(info->i.layout.bitmap);
146 checkSmallBitmap((StgPtr)c + 1,
147 BITMAP_BITS(info->i.layout.bitmap), size);
148 return 1 + size;
149
150 case RET_BCO: {
151 StgBCO *bco;
152 nat size;
153 bco = (StgBCO *)*(c+1);
154 size = BCO_BITMAP_SIZE(bco);
155 checkLargeBitmap((StgPtr)c + 2, BCO_BITMAP(bco), size);
156 return 2 + size;
157 }
158
159 case RET_BIG: // large bitmap (> 32 entries)
160 size = GET_LARGE_BITMAP(&info->i)->size;
161 checkLargeBitmap((StgPtr)c + 1, GET_LARGE_BITMAP(&info->i), size);
162 return 1 + size;
163
164 case RET_FUN:
165 {
166 StgFunInfoTable *fun_info;
167 StgRetFun *ret_fun;
168
169 ret_fun = (StgRetFun *)c;
170 fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun));
171 size = ret_fun->size;
172 switch (fun_info->f.fun_type) {
173 case ARG_GEN:
174 checkSmallBitmap((StgPtr)ret_fun->payload,
175 BITMAP_BITS(fun_info->f.b.bitmap), size);
176 break;
177 case ARG_GEN_BIG:
178 checkLargeBitmap((StgPtr)ret_fun->payload,
179 GET_FUN_LARGE_BITMAP(fun_info), size);
180 break;
181 default:
182 checkSmallBitmap((StgPtr)ret_fun->payload,
183 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
184 size);
185 break;
186 }
187 return sizeofW(StgRetFun) + size;
188 }
189
190 default:
191 barf("checkStackFrame: weird activation record found on stack (%p %d).",c,info->i.type);
192 }
193 }
194
195 // check sections of stack between update frames
196 void
197 checkStackChunk( StgPtr sp, StgPtr stack_end )
198 {
199 StgPtr p;
200
201 p = sp;
202 while (p < stack_end) {
203 p += checkStackFrame( p );
204 }
205 // ASSERT( p == stack_end ); -- HWL
206 }
207
208 static void
209 checkPAP (StgClosure *tagged_fun, StgClosure** payload, StgWord n_args)
210 {
211 StgClosure *fun;
212 StgFunInfoTable *fun_info;
213
214 fun = UNTAG_CLOSURE(tagged_fun);
215 ASSERT(LOOKS_LIKE_CLOSURE_PTR(fun));
216 fun_info = get_fun_itbl(fun);
217
218 switch (fun_info->f.fun_type) {
219 case ARG_GEN:
220 checkSmallBitmap( (StgPtr)payload,
221 BITMAP_BITS(fun_info->f.b.bitmap), n_args );
222 break;
223 case ARG_GEN_BIG:
224 checkLargeBitmap( (StgPtr)payload,
225 GET_FUN_LARGE_BITMAP(fun_info),
226 n_args );
227 break;
228 case ARG_BCO:
229 checkLargeBitmap( (StgPtr)payload,
230 BCO_BITMAP(fun),
231 n_args );
232 break;
233 default:
234 checkSmallBitmap( (StgPtr)payload,
235 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
236 n_args );
237 break;
238 }
239
240 ASSERT(fun_info->f.arity > TAG_MASK ? GET_CLOSURE_TAG(tagged_fun) == 0
241 : GET_CLOSURE_TAG(tagged_fun) == fun_info->f.arity);
242 }
243
244
245 StgOffset
246 checkClosure( StgClosure* p )
247 {
248 const StgInfoTable *info;
249
250 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
251
252 p = UNTAG_CLOSURE(p);
253 /* Is it a static closure (i.e. in the data segment)? */
254 if (!HEAP_ALLOCED(p)) {
255 ASSERT(closure_STATIC(p));
256 } else {
257 ASSERT(!closure_STATIC(p));
258 }
259
260 info = p->header.info;
261
262 if (IS_FORWARDING_PTR(info)) {
263 barf("checkClosure: found EVACUATED closure %d", info->type);
264 }
265 info = INFO_PTR_TO_STRUCT(info);
266
267 switch (info->type) {
268
269 case MVAR_CLEAN:
270 case MVAR_DIRTY:
271 {
272 StgMVar *mvar = (StgMVar *)p;
273 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->head));
274 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->tail));
275 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->value));
276 return sizeofW(StgMVar);
277 }
278
279 case THUNK:
280 case THUNK_1_0:
281 case THUNK_0_1:
282 case THUNK_1_1:
283 case THUNK_0_2:
284 case THUNK_2_0:
285 {
286 nat i;
287 for (i = 0; i < info->layout.payload.ptrs; i++) {
288 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgThunk *)p)->payload[i]));
289 }
290 return thunk_sizeW_fromITBL(info);
291 }
292
293 case FUN:
294 case FUN_1_0:
295 case FUN_0_1:
296 case FUN_1_1:
297 case FUN_0_2:
298 case FUN_2_0:
299 case CONSTR:
300 case CONSTR_1_0:
301 case CONSTR_0_1:
302 case CONSTR_1_1:
303 case CONSTR_0_2:
304 case CONSTR_2_0:
305 case IND_PERM:
306 case BLACKHOLE:
307 case PRIM:
308 case MUT_PRIM:
309 case MUT_VAR_CLEAN:
310 case MUT_VAR_DIRTY:
311 case CONSTR_STATIC:
312 case CONSTR_NOCAF_STATIC:
313 case THUNK_STATIC:
314 case FUN_STATIC:
315 {
316 nat i;
317 for (i = 0; i < info->layout.payload.ptrs; i++) {
318 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p->payload[i]));
319 }
320 return sizeW_fromITBL(info);
321 }
322
323 case BLOCKING_QUEUE:
324 {
325 StgBlockingQueue *bq = (StgBlockingQueue *)p;
326
327 // NO: the BH might have been updated now
328 // ASSERT(get_itbl(bq->bh)->type == BLACKHOLE);
329 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bq->bh));
330
331 ASSERT(get_itbl((StgClosure *)(bq->owner))->type == TSO);
332 ASSERT(bq->queue == (MessageBlackHole*)END_TSO_QUEUE
333 || bq->queue->header.info == &stg_MSG_BLACKHOLE_info);
334 ASSERT(bq->link == (StgBlockingQueue*)END_TSO_QUEUE ||
335 get_itbl((StgClosure *)(bq->link))->type == IND ||
336 get_itbl((StgClosure *)(bq->link))->type == BLOCKING_QUEUE);
337
338 return sizeofW(StgBlockingQueue);
339 }
340
341 case BCO: {
342 StgBCO *bco = (StgBCO *)p;
343 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->instrs));
344 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->literals));
345 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->ptrs));
346 return bco_sizeW(bco);
347 }
348
349 case IND_STATIC: /* (1, 0) closure */
350 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgIndStatic*)p)->indirectee));
351 return sizeW_fromITBL(info);
352
353 case WEAK:
354 /* deal with these specially - the info table isn't
355 * representative of the actual layout.
356 */
357 { StgWeak *w = (StgWeak *)p;
358 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->key));
359 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->value));
360 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->finalizer));
361 if (w->link) {
362 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->link));
363 }
364 return sizeW_fromITBL(info);
365 }
366
367 case THUNK_SELECTOR:
368 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgSelector *)p)->selectee));
369 return THUNK_SELECTOR_sizeW();
370
371 case IND:
372 {
373 /* we don't expect to see any of these after GC
374 * but they might appear during execution
375 */
376 StgInd *ind = (StgInd *)p;
377 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ind->indirectee));
378 return sizeofW(StgInd);
379 }
380
381 case RET_BCO:
382 case RET_SMALL:
383 case RET_BIG:
384 case RET_DYN:
385 case UPDATE_FRAME:
386 case UNDERFLOW_FRAME:
387 case STOP_FRAME:
388 case CATCH_FRAME:
389 case ATOMICALLY_FRAME:
390 case CATCH_RETRY_FRAME:
391 case CATCH_STM_FRAME:
392 barf("checkClosure: stack frame");
393
394 case AP:
395 {
396 StgAP* ap = (StgAP *)p;
397 checkPAP (ap->fun, ap->payload, ap->n_args);
398 return ap_sizeW(ap);
399 }
400
401 case PAP:
402 {
403 StgPAP* pap = (StgPAP *)p;
404 checkPAP (pap->fun, pap->payload, pap->n_args);
405 return pap_sizeW(pap);
406 }
407
408 case AP_STACK:
409 {
410 StgAP_STACK *ap = (StgAP_STACK *)p;
411 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ap->fun));
412 checkStackChunk((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
413 return ap_stack_sizeW(ap);
414 }
415
416 case ARR_WORDS:
417 return arr_words_sizeW((StgArrWords *)p);
418
419 case MUT_ARR_PTRS_CLEAN:
420 case MUT_ARR_PTRS_DIRTY:
421 case MUT_ARR_PTRS_FROZEN:
422 case MUT_ARR_PTRS_FROZEN0:
423 {
424 StgMutArrPtrs* a = (StgMutArrPtrs *)p;
425 nat i;
426 for (i = 0; i < a->ptrs; i++) {
427 ASSERT(LOOKS_LIKE_CLOSURE_PTR(a->payload[i]));
428 }
429 return mut_arr_ptrs_sizeW(a);
430 }
431
432 case TSO:
433 checkTSO((StgTSO *)p);
434 return sizeofW(StgTSO);
435
436 case STACK:
437 checkSTACK((StgStack*)p);
438 return stack_sizeW((StgStack*)p);
439
440 case TREC_CHUNK:
441 {
442 nat i;
443 StgTRecChunk *tc = (StgTRecChunk *)p;
444 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->prev_chunk));
445 for (i = 0; i < tc -> next_entry_idx; i ++) {
446 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].tvar));
447 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].expected_value));
448 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].new_value));
449 }
450 return sizeofW(StgTRecChunk);
451 }
452
453 default:
454 barf("checkClosure (closure type %d)", info->type);
455 }
456 }
457
458
459 /* -----------------------------------------------------------------------------
460 Check Heap Sanity
461
462 After garbage collection, the live heap is in a state where we can
463 run through and check that all the pointers point to the right
464 place. This function starts at a given position and sanity-checks
465 all the objects in the remainder of the chain.
466 -------------------------------------------------------------------------- */
467
468 void checkHeapChain (bdescr *bd)
469 {
470 StgPtr p;
471
472 for (; bd != NULL; bd = bd->link) {
473 if(!(bd->flags & BF_SWEPT)) {
474 p = bd->start;
475 while (p < bd->free) {
476 nat size = checkClosure((StgClosure *)p);
477 /* This is the smallest size of closure that can live in the heap */
478 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
479 p += size;
480
481 /* skip over slop */
482 while (p < bd->free &&
483 (*p < 0x1000 || !LOOKS_LIKE_INFO_PTR(*p))) { p++; }
484 }
485 }
486 }
487 }
488
489 void
490 checkHeapChunk(StgPtr start, StgPtr end)
491 {
492 StgPtr p;
493 nat size;
494
495 for (p=start; p<end; p+=size) {
496 ASSERT(LOOKS_LIKE_INFO_PTR(*p));
497 size = checkClosure((StgClosure *)p);
498 /* This is the smallest size of closure that can live in the heap. */
499 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
500 }
501 }
502
503 void
504 checkLargeObjects(bdescr *bd)
505 {
506 while (bd != NULL) {
507 if (!(bd->flags & BF_PINNED)) {
508 checkClosure((StgClosure *)bd->start);
509 }
510 bd = bd->link;
511 }
512 }
513
514 static void
515 checkSTACK (StgStack *stack)
516 {
517 StgPtr sp = stack->sp;
518 StgOffset stack_size = stack->stack_size;
519 StgPtr stack_end = stack->stack + stack_size;
520
521 ASSERT(stack->stack <= sp && sp <= stack_end);
522
523 checkStackChunk(sp, stack_end);
524 }
525
526 void
527 checkTSO(StgTSO *tso)
528 {
529 if (tso->what_next == ThreadKilled) {
530 /* The garbage collector doesn't bother following any pointers
531 * from dead threads, so don't check sanity here.
532 */
533 return;
534 }
535
536 ASSERT(tso->_link == END_TSO_QUEUE ||
537 tso->_link->header.info == &stg_MVAR_TSO_QUEUE_info ||
538 tso->_link->header.info == &stg_TSO_info);
539
540 if ( tso->why_blocked == BlockedOnMVar
541 || tso->why_blocked == BlockedOnBlackHole
542 || tso->why_blocked == BlockedOnMsgThrowTo
543 || tso->why_blocked == NotBlocked
544 ) {
545 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->block_info.closure));
546 }
547
548 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->bq));
549 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->blocked_exceptions));
550 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->stackobj));
551
552 // XXX are we checking the stack twice?
553 checkSTACK(tso->stackobj);
554 }
555
556 /*
557 Check that all TSOs have been evacuated.
558 Optionally also check the sanity of the TSOs.
559 */
560 void
561 checkGlobalTSOList (rtsBool checkTSOs)
562 {
563 StgTSO *tso;
564 nat g;
565
566 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
567 for (tso=generations[g].threads; tso != END_TSO_QUEUE;
568 tso = tso->global_link) {
569 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso));
570 ASSERT(get_itbl((StgClosure *)tso)->type == TSO);
571 if (checkTSOs)
572 checkTSO(tso);
573
574 // If this TSO is dirty and in an old generation, it better
575 // be on the mutable list.
576 if (tso->dirty) {
577 ASSERT(Bdescr((P_)tso)->gen_no == 0 || (tso->flags & TSO_MARKED));
578 tso->flags &= ~TSO_MARKED;
579 }
580
581 {
582 StgStack *stack;
583 StgUnderflowFrame *frame;
584
585 stack = tso->stackobj;
586 while (1) {
587 if (stack->dirty & 1) {
588 ASSERT(Bdescr((P_)stack)->gen_no == 0 || (stack->dirty & TSO_MARKED));
589 stack->dirty &= ~TSO_MARKED;
590 }
591 frame = (StgUnderflowFrame*) (stack->stack + stack->stack_size
592 - sizeofW(StgUnderflowFrame));
593 if (frame->info != &stg_stack_underflow_frame_info
594 || frame->next_chunk == (StgStack*)END_TSO_QUEUE) break;
595 stack = frame->next_chunk;
596 }
597 }
598 }
599 }
600 }
601
602 /* -----------------------------------------------------------------------------
603 Check mutable list sanity.
604 -------------------------------------------------------------------------- */
605
606 static void
607 checkMutableList( bdescr *mut_bd, nat gen )
608 {
609 bdescr *bd;
610 StgPtr q;
611 StgClosure *p;
612
613 for (bd = mut_bd; bd != NULL; bd = bd->link) {
614 for (q = bd->start; q < bd->free; q++) {
615 p = (StgClosure *)*q;
616 ASSERT(!HEAP_ALLOCED(p) || Bdescr((P_)p)->gen_no == gen);
617 checkClosure(p);
618
619 switch (get_itbl(p)->type) {
620 case TSO:
621 ((StgTSO *)p)->flags |= TSO_MARKED;
622 break;
623 case STACK:
624 ((StgStack *)p)->dirty |= TSO_MARKED;
625 break;
626 }
627 }
628 }
629 }
630
631 static void
632 checkLocalMutableLists (nat cap_no)
633 {
634 nat g;
635 for (g = 1; g < RtsFlags.GcFlags.generations; g++) {
636 checkMutableList(capabilities[cap_no].mut_lists[g], g);
637 }
638 }
639
640 static void
641 checkMutableLists (void)
642 {
643 nat i;
644 for (i = 0; i < n_capabilities; i++) {
645 checkLocalMutableLists(i);
646 }
647 }
648
649 /*
650 Check the static objects list.
651 */
652 void
653 checkStaticObjects ( StgClosure* static_objects )
654 {
655 StgClosure *p = static_objects;
656 StgInfoTable *info;
657
658 while (p != END_OF_STATIC_LIST) {
659 checkClosure(p);
660 info = get_itbl(p);
661 switch (info->type) {
662 case IND_STATIC:
663 {
664 StgClosure *indirectee = UNTAG_CLOSURE(((StgIndStatic *)p)->indirectee);
665
666 ASSERT(LOOKS_LIKE_CLOSURE_PTR(indirectee));
667 ASSERT(LOOKS_LIKE_INFO_PTR((StgWord)indirectee->header.info));
668 p = *IND_STATIC_LINK((StgClosure *)p);
669 break;
670 }
671
672 case THUNK_STATIC:
673 p = *THUNK_STATIC_LINK((StgClosure *)p);
674 break;
675
676 case FUN_STATIC:
677 p = *FUN_STATIC_LINK((StgClosure *)p);
678 break;
679
680 case CONSTR_STATIC:
681 p = *STATIC_LINK(info,(StgClosure *)p);
682 break;
683
684 default:
685 barf("checkStaticObjetcs: strange closure %p (%s)",
686 p, info_type(p));
687 }
688 }
689 }
690
691 /* Nursery sanity check */
692 void
693 checkNurserySanity (nursery *nursery)
694 {
695 bdescr *bd, *prev;
696 nat blocks = 0;
697
698 prev = NULL;
699 for (bd = nursery->blocks; bd != NULL; bd = bd->link) {
700 ASSERT(bd->gen == g0);
701 ASSERT(bd->u.back == prev);
702 prev = bd;
703 blocks += bd->blocks;
704 }
705
706 ASSERT(blocks == nursery->n_blocks);
707 }
708
709 static void checkGeneration (generation *gen,
710 rtsBool after_major_gc USED_IF_THREADS)
711 {
712 nat n;
713 gen_workspace *ws;
714
715 ASSERT(countBlocks(gen->blocks) == gen->n_blocks);
716 ASSERT(countBlocks(gen->large_objects) == gen->n_large_blocks);
717
718 #if defined(THREADED_RTS)
719 // heap sanity checking doesn't work with SMP, because we can't
720 // zero the slop (see Updates.h). However, we can sanity-check
721 // the heap after a major gc, because there is no slop.
722 if (!after_major_gc) return;
723 #endif
724
725 checkHeapChain(gen->blocks);
726
727 for (n = 0; n < n_capabilities; n++) {
728 ws = &gc_threads[n]->gens[gen->no];
729 checkHeapChain(ws->todo_bd);
730 checkHeapChain(ws->part_list);
731 checkHeapChain(ws->scavd_list);
732 }
733
734 checkLargeObjects(gen->large_objects);
735 }
736
737 /* Full heap sanity check. */
738 static void checkFullHeap (rtsBool after_major_gc)
739 {
740 nat g, n;
741
742 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
743 checkGeneration(&generations[g], after_major_gc);
744 }
745 for (n = 0; n < n_capabilities; n++) {
746 checkNurserySanity(&nurseries[n]);
747 }
748 }
749
750 void checkSanity (rtsBool after_gc, rtsBool major_gc)
751 {
752 checkFullHeap(after_gc && major_gc);
753
754 checkFreeListSanity();
755
756 // always check the stacks in threaded mode, because checkHeap()
757 // does nothing in this case.
758 if (after_gc) {
759 checkMutableLists();
760 checkGlobalTSOList(rtsTrue);
761 }
762 }
763
764 // If memInventory() calculates that we have a memory leak, this
765 // function will try to find the block(s) that are leaking by marking
766 // all the ones that we know about, and search through memory to find
767 // blocks that are not marked. In the debugger this can help to give
768 // us a clue about what kind of block leaked. In the future we might
769 // annotate blocks with their allocation site to give more helpful
770 // info.
771 static void
772 findMemoryLeak (void)
773 {
774 nat g, i;
775 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
776 for (i = 0; i < n_capabilities; i++) {
777 markBlocks(capabilities[i].mut_lists[g]);
778 markBlocks(gc_threads[i]->gens[g].part_list);
779 markBlocks(gc_threads[i]->gens[g].scavd_list);
780 markBlocks(gc_threads[i]->gens[g].todo_bd);
781 }
782 markBlocks(generations[g].blocks);
783 markBlocks(generations[g].large_objects);
784 }
785
786 for (i = 0; i < n_capabilities; i++) {
787 markBlocks(nurseries[i].blocks);
788 markBlocks(capabilities[i].pinned_object_block);
789 }
790
791 #ifdef PROFILING
792 // TODO:
793 // if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER) {
794 // markRetainerBlocks();
795 // }
796 #endif
797
798 // count the blocks allocated by the arena allocator
799 // TODO:
800 // markArenaBlocks();
801
802 // count the blocks containing executable memory
803 markBlocks(exec_block);
804
805 reportUnmarkedBlocks();
806 }
807
808 void
809 checkRunQueue(Capability *cap)
810 {
811 StgTSO *prev, *tso;
812 prev = END_TSO_QUEUE;
813 for (tso = cap->run_queue_hd; tso != END_TSO_QUEUE;
814 prev = tso, tso = tso->_link) {
815 ASSERT(prev == END_TSO_QUEUE || prev->_link == tso);
816 ASSERT(tso->block_info.prev == prev);
817 }
818 ASSERT(cap->run_queue_tl == prev);
819 }
820
821 /* -----------------------------------------------------------------------------
822 Memory leak detection
823
824 memInventory() checks for memory leaks by counting up all the
825 blocks we know about and comparing that to the number of blocks
826 allegedly floating around in the system.
827 -------------------------------------------------------------------------- */
828
829 // Useful for finding partially full blocks in gdb
830 void findSlop(bdescr *bd);
831 void findSlop(bdescr *bd)
832 {
833 W_ slop;
834
835 for (; bd != NULL; bd = bd->link) {
836 slop = (bd->blocks * BLOCK_SIZE_W) - (bd->free - bd->start);
837 if (slop > (1024/sizeof(W_))) {
838 debugBelch("block at %p (bdescr %p) has %" FMT_SizeT "KB slop\n",
839 bd->start, bd, slop / (1024/sizeof(W_)));
840 }
841 }
842 }
843
844 static W_
845 genBlocks (generation *gen)
846 {
847 ASSERT(countBlocks(gen->blocks) == gen->n_blocks);
848 ASSERT(countBlocks(gen->large_objects) == gen->n_large_blocks);
849 return gen->n_blocks + gen->n_old_blocks +
850 countAllocdBlocks(gen->large_objects);
851 }
852
853 void
854 memInventory (rtsBool show)
855 {
856 nat g, i;
857 W_ gen_blocks[RtsFlags.GcFlags.generations];
858 W_ nursery_blocks, retainer_blocks,
859 arena_blocks, exec_blocks;
860 W_ live_blocks = 0, free_blocks = 0;
861 rtsBool leak;
862
863 // count the blocks we current have
864
865 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
866 gen_blocks[g] = 0;
867 for (i = 0; i < n_capabilities; i++) {
868 gen_blocks[g] += countBlocks(capabilities[i].mut_lists[g]);
869 gen_blocks[g] += countBlocks(gc_threads[i]->gens[g].part_list);
870 gen_blocks[g] += countBlocks(gc_threads[i]->gens[g].scavd_list);
871 gen_blocks[g] += countBlocks(gc_threads[i]->gens[g].todo_bd);
872 }
873 gen_blocks[g] += genBlocks(&generations[g]);
874 }
875
876 nursery_blocks = 0;
877 for (i = 0; i < n_capabilities; i++) {
878 ASSERT(countBlocks(nurseries[i].blocks) == nurseries[i].n_blocks);
879 nursery_blocks += nurseries[i].n_blocks;
880 if (capabilities[i].pinned_object_block != NULL) {
881 nursery_blocks += capabilities[i].pinned_object_block->blocks;
882 }
883 nursery_blocks += countBlocks(capabilities[i].pinned_object_blocks);
884 }
885
886 retainer_blocks = 0;
887 #ifdef PROFILING
888 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER) {
889 retainer_blocks = retainerStackBlocks();
890 }
891 #endif
892
893 // count the blocks allocated by the arena allocator
894 arena_blocks = arenaBlocks();
895
896 // count the blocks containing executable memory
897 exec_blocks = countAllocdBlocks(exec_block);
898
899 /* count the blocks on the free list */
900 free_blocks = countFreeList();
901
902 live_blocks = 0;
903 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
904 live_blocks += gen_blocks[g];
905 }
906 live_blocks += nursery_blocks +
907 + retainer_blocks + arena_blocks + exec_blocks;
908
909 #define MB(n) (((double)(n) * BLOCK_SIZE_W) / ((1024*1024)/sizeof(W_)))
910
911 leak = live_blocks + free_blocks != mblocks_allocated * BLOCKS_PER_MBLOCK;
912
913 if (show || leak)
914 {
915 if (leak) {
916 debugBelch("Memory leak detected:\n");
917 } else {
918 debugBelch("Memory inventory:\n");
919 }
920 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
921 debugBelch(" gen %d blocks : %5" FMT_Word " blocks (%6.1lf MB)\n", g,
922 gen_blocks[g], MB(gen_blocks[g]));
923 }
924 debugBelch(" nursery : %5" FMT_Word " blocks (%6.1lf MB)\n",
925 nursery_blocks, MB(nursery_blocks));
926 debugBelch(" retainer : %5" FMT_Word " blocks (%6.1lf MB)\n",
927 retainer_blocks, MB(retainer_blocks));
928 debugBelch(" arena blocks : %5" FMT_Word " blocks (%6.1lf MB)\n",
929 arena_blocks, MB(arena_blocks));
930 debugBelch(" exec : %5" FMT_Word " blocks (%6.1lf MB)\n",
931 exec_blocks, MB(exec_blocks));
932 debugBelch(" free : %5" FMT_Word " blocks (%6.1lf MB)\n",
933 free_blocks, MB(free_blocks));
934 debugBelch(" total : %5" FMT_Word " blocks (%6.1lf MB)\n",
935 live_blocks + free_blocks, MB(live_blocks+free_blocks));
936 if (leak) {
937 debugBelch("\n in system : %5" FMT_Word " blocks (%" FMT_Word " MB)\n",
938 mblocks_allocated * BLOCKS_PER_MBLOCK, mblocks_allocated);
939 }
940 }
941
942 if (leak) {
943 debugBelch("\n");
944 findMemoryLeak();
945 }
946 ASSERT(n_alloc_blocks == live_blocks);
947 ASSERT(!leak);
948 }
949
950
951 #endif /* DEBUG */