Merge branch 'master' of /Users/benl/devel/ghc/ghc-head
[ghc.git] / rts / sm / Sanity.c
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 1998-2006
4 *
5 * Sanity checking code for the heap and stack.
6 *
7 * Used when debugging: check that everything reasonable.
8 *
9 * - All things that are supposed to be pointers look like pointers.
10 *
11 * - Objects in text space are marked as static closures, those
12 * in the heap are dynamic.
13 *
14 * ---------------------------------------------------------------------------*/
15
16 #include "PosixSource.h"
17 #include "Rts.h"
18
19 #ifdef DEBUG /* whole file */
20
21 #include "RtsUtils.h"
22 #include "sm/Storage.h"
23 #include "sm/BlockAlloc.h"
24 #include "GCThread.h"
25 #include "Sanity.h"
26 #include "Schedule.h"
27 #include "Apply.h"
28 #include "Printer.h"
29 #include "Arena.h"
30 #include "RetainerProfile.h"
31
32 /* -----------------------------------------------------------------------------
33 Forward decls.
34 -------------------------------------------------------------------------- */
35
36 static void checkSmallBitmap ( StgPtr payload, StgWord bitmap, nat );
37 static void checkLargeBitmap ( StgPtr payload, StgLargeBitmap*, nat );
38 static void checkClosureShallow ( StgClosure * );
39 static void checkSTACK (StgStack *stack);
40
41 /* -----------------------------------------------------------------------------
42 Check stack sanity
43 -------------------------------------------------------------------------- */
44
45 static void
46 checkSmallBitmap( StgPtr payload, StgWord bitmap, nat size )
47 {
48 StgPtr p;
49 nat i;
50
51 p = payload;
52 for(i = 0; i < size; i++, bitmap >>= 1 ) {
53 if ((bitmap & 1) == 0) {
54 checkClosureShallow((StgClosure *)payload[i]);
55 }
56 }
57 }
58
59 static void
60 checkLargeBitmap( StgPtr payload, StgLargeBitmap* large_bitmap, nat size )
61 {
62 StgWord bmp;
63 nat i, j;
64
65 i = 0;
66 for (bmp=0; i < size; bmp++) {
67 StgWord bitmap = large_bitmap->bitmap[bmp];
68 j = 0;
69 for(; i < size && j < BITS_IN(W_); j++, i++, bitmap >>= 1 ) {
70 if ((bitmap & 1) == 0) {
71 checkClosureShallow((StgClosure *)payload[i]);
72 }
73 }
74 }
75 }
76
77 /*
78 * check that it looks like a valid closure - without checking its payload
79 * used to avoid recursion between checking PAPs and checking stack
80 * chunks.
81 */
82
83 static void
84 checkClosureShallow( StgClosure* p )
85 {
86 StgClosure *q;
87
88 q = UNTAG_CLOSURE(p);
89 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q));
90
91 /* Is it a static closure? */
92 if (!HEAP_ALLOCED(q)) {
93 ASSERT(closure_STATIC(q));
94 } else {
95 ASSERT(!closure_STATIC(q));
96 }
97 }
98
99 // check an individual stack object
100 StgOffset
101 checkStackFrame( StgPtr c )
102 {
103 nat size;
104 const StgRetInfoTable* info;
105
106 info = get_ret_itbl((StgClosure *)c);
107
108 /* All activation records have 'bitmap' style layout info. */
109 switch (info->i.type) {
110 case RET_DYN: /* Dynamic bitmap: the mask is stored on the stack */
111 {
112 StgWord dyn;
113 StgPtr p;
114 StgRetDyn* r;
115
116 r = (StgRetDyn *)c;
117 dyn = r->liveness;
118
119 p = (P_)(r->payload);
120 checkSmallBitmap(p,RET_DYN_LIVENESS(r->liveness),RET_DYN_BITMAP_SIZE);
121 p += RET_DYN_BITMAP_SIZE + RET_DYN_NONPTR_REGS_SIZE;
122
123 // skip over the non-pointers
124 p += RET_DYN_NONPTRS(dyn);
125
126 // follow the ptr words
127 for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
128 checkClosureShallow((StgClosure *)*p);
129 p++;
130 }
131
132 return sizeofW(StgRetDyn) + RET_DYN_BITMAP_SIZE +
133 RET_DYN_NONPTR_REGS_SIZE +
134 RET_DYN_NONPTRS(dyn) + RET_DYN_PTRS(dyn);
135 }
136
137 case UPDATE_FRAME:
138 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgUpdateFrame*)c)->updatee));
139 case ATOMICALLY_FRAME:
140 case CATCH_RETRY_FRAME:
141 case CATCH_STM_FRAME:
142 case CATCH_FRAME:
143 // small bitmap cases (<= 32 entries)
144 case UNDERFLOW_FRAME:
145 case STOP_FRAME:
146 case RET_SMALL:
147 size = BITMAP_SIZE(info->i.layout.bitmap);
148 checkSmallBitmap((StgPtr)c + 1,
149 BITMAP_BITS(info->i.layout.bitmap), size);
150 return 1 + size;
151
152 case RET_BCO: {
153 StgBCO *bco;
154 nat size;
155 bco = (StgBCO *)*(c+1);
156 size = BCO_BITMAP_SIZE(bco);
157 checkLargeBitmap((StgPtr)c + 2, BCO_BITMAP(bco), size);
158 return 2 + size;
159 }
160
161 case RET_BIG: // large bitmap (> 32 entries)
162 size = GET_LARGE_BITMAP(&info->i)->size;
163 checkLargeBitmap((StgPtr)c + 1, GET_LARGE_BITMAP(&info->i), size);
164 return 1 + size;
165
166 case RET_FUN:
167 {
168 StgFunInfoTable *fun_info;
169 StgRetFun *ret_fun;
170
171 ret_fun = (StgRetFun *)c;
172 fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun));
173 size = ret_fun->size;
174 switch (fun_info->f.fun_type) {
175 case ARG_GEN:
176 checkSmallBitmap((StgPtr)ret_fun->payload,
177 BITMAP_BITS(fun_info->f.b.bitmap), size);
178 break;
179 case ARG_GEN_BIG:
180 checkLargeBitmap((StgPtr)ret_fun->payload,
181 GET_FUN_LARGE_BITMAP(fun_info), size);
182 break;
183 default:
184 checkSmallBitmap((StgPtr)ret_fun->payload,
185 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
186 size);
187 break;
188 }
189 return sizeofW(StgRetFun) + size;
190 }
191
192 default:
193 barf("checkStackFrame: weird activation record found on stack (%p %d).",c,info->i.type);
194 }
195 }
196
197 // check sections of stack between update frames
198 void
199 checkStackChunk( StgPtr sp, StgPtr stack_end )
200 {
201 StgPtr p;
202
203 p = sp;
204 while (p < stack_end) {
205 p += checkStackFrame( p );
206 }
207 // ASSERT( p == stack_end ); -- HWL
208 }
209
210 static void
211 checkPAP (StgClosure *tagged_fun, StgClosure** payload, StgWord n_args)
212 {
213 StgClosure *fun;
214 StgClosure *p;
215 StgFunInfoTable *fun_info;
216
217 fun = UNTAG_CLOSURE(tagged_fun);
218 ASSERT(LOOKS_LIKE_CLOSURE_PTR(fun));
219 fun_info = get_fun_itbl(fun);
220
221 p = (StgClosure *)payload;
222 switch (fun_info->f.fun_type) {
223 case ARG_GEN:
224 checkSmallBitmap( (StgPtr)payload,
225 BITMAP_BITS(fun_info->f.b.bitmap), n_args );
226 break;
227 case ARG_GEN_BIG:
228 checkLargeBitmap( (StgPtr)payload,
229 GET_FUN_LARGE_BITMAP(fun_info),
230 n_args );
231 break;
232 case ARG_BCO:
233 checkLargeBitmap( (StgPtr)payload,
234 BCO_BITMAP(fun),
235 n_args );
236 break;
237 default:
238 checkSmallBitmap( (StgPtr)payload,
239 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
240 n_args );
241 break;
242 }
243
244 ASSERT(fun_info->f.arity > TAG_MASK ? GET_CLOSURE_TAG(tagged_fun) == 0
245 : GET_CLOSURE_TAG(tagged_fun) == fun_info->f.arity);
246 }
247
248
249 StgOffset
250 checkClosure( StgClosure* p )
251 {
252 const StgInfoTable *info;
253
254 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
255
256 p = UNTAG_CLOSURE(p);
257 /* Is it a static closure (i.e. in the data segment)? */
258 if (!HEAP_ALLOCED(p)) {
259 ASSERT(closure_STATIC(p));
260 } else {
261 ASSERT(!closure_STATIC(p));
262 }
263
264 info = p->header.info;
265
266 if (IS_FORWARDING_PTR(info)) {
267 barf("checkClosure: found EVACUATED closure %d", info->type);
268 }
269 info = INFO_PTR_TO_STRUCT(info);
270
271 switch (info->type) {
272
273 case MVAR_CLEAN:
274 case MVAR_DIRTY:
275 {
276 StgMVar *mvar = (StgMVar *)p;
277 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->head));
278 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->tail));
279 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->value));
280 return sizeofW(StgMVar);
281 }
282
283 case THUNK:
284 case THUNK_1_0:
285 case THUNK_0_1:
286 case THUNK_1_1:
287 case THUNK_0_2:
288 case THUNK_2_0:
289 {
290 nat i;
291 for (i = 0; i < info->layout.payload.ptrs; i++) {
292 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgThunk *)p)->payload[i]));
293 }
294 return thunk_sizeW_fromITBL(info);
295 }
296
297 case FUN:
298 case FUN_1_0:
299 case FUN_0_1:
300 case FUN_1_1:
301 case FUN_0_2:
302 case FUN_2_0:
303 case CONSTR:
304 case CONSTR_1_0:
305 case CONSTR_0_1:
306 case CONSTR_1_1:
307 case CONSTR_0_2:
308 case CONSTR_2_0:
309 case IND_PERM:
310 case BLACKHOLE:
311 case PRIM:
312 case MUT_PRIM:
313 case MUT_VAR_CLEAN:
314 case MUT_VAR_DIRTY:
315 case CONSTR_STATIC:
316 case CONSTR_NOCAF_STATIC:
317 case THUNK_STATIC:
318 case FUN_STATIC:
319 {
320 nat i;
321 for (i = 0; i < info->layout.payload.ptrs; i++) {
322 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p->payload[i]));
323 }
324 return sizeW_fromITBL(info);
325 }
326
327 case BLOCKING_QUEUE:
328 {
329 StgBlockingQueue *bq = (StgBlockingQueue *)p;
330
331 // NO: the BH might have been updated now
332 // ASSERT(get_itbl(bq->bh)->type == BLACKHOLE);
333 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bq->bh));
334
335 ASSERT(get_itbl(bq->owner)->type == TSO);
336 ASSERT(bq->queue == (MessageBlackHole*)END_TSO_QUEUE
337 || bq->queue->header.info == &stg_MSG_BLACKHOLE_info);
338 ASSERT(bq->link == (StgBlockingQueue*)END_TSO_QUEUE ||
339 get_itbl(bq->link)->type == IND ||
340 get_itbl(bq->link)->type == BLOCKING_QUEUE);
341
342 return sizeofW(StgBlockingQueue);
343 }
344
345 case BCO: {
346 StgBCO *bco = (StgBCO *)p;
347 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->instrs));
348 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->literals));
349 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->ptrs));
350 return bco_sizeW(bco);
351 }
352
353 case IND_STATIC: /* (1, 0) closure */
354 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgIndStatic*)p)->indirectee));
355 return sizeW_fromITBL(info);
356
357 case WEAK:
358 /* deal with these specially - the info table isn't
359 * representative of the actual layout.
360 */
361 { StgWeak *w = (StgWeak *)p;
362 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->key));
363 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->value));
364 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->finalizer));
365 if (w->link) {
366 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->link));
367 }
368 return sizeW_fromITBL(info);
369 }
370
371 case THUNK_SELECTOR:
372 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgSelector *)p)->selectee));
373 return THUNK_SELECTOR_sizeW();
374
375 case IND:
376 {
377 /* we don't expect to see any of these after GC
378 * but they might appear during execution
379 */
380 StgInd *ind = (StgInd *)p;
381 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ind->indirectee));
382 return sizeofW(StgInd);
383 }
384
385 case RET_BCO:
386 case RET_SMALL:
387 case RET_BIG:
388 case RET_DYN:
389 case UPDATE_FRAME:
390 case UNDERFLOW_FRAME:
391 case STOP_FRAME:
392 case CATCH_FRAME:
393 case ATOMICALLY_FRAME:
394 case CATCH_RETRY_FRAME:
395 case CATCH_STM_FRAME:
396 barf("checkClosure: stack frame");
397
398 case AP:
399 {
400 StgAP* ap = (StgAP *)p;
401 checkPAP (ap->fun, ap->payload, ap->n_args);
402 return ap_sizeW(ap);
403 }
404
405 case PAP:
406 {
407 StgPAP* pap = (StgPAP *)p;
408 checkPAP (pap->fun, pap->payload, pap->n_args);
409 return pap_sizeW(pap);
410 }
411
412 case AP_STACK:
413 {
414 StgAP_STACK *ap = (StgAP_STACK *)p;
415 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ap->fun));
416 checkStackChunk((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
417 return ap_stack_sizeW(ap);
418 }
419
420 case ARR_WORDS:
421 return arr_words_sizeW((StgArrWords *)p);
422
423 case MUT_ARR_PTRS_CLEAN:
424 case MUT_ARR_PTRS_DIRTY:
425 case MUT_ARR_PTRS_FROZEN:
426 case MUT_ARR_PTRS_FROZEN0:
427 {
428 StgMutArrPtrs* a = (StgMutArrPtrs *)p;
429 nat i;
430 for (i = 0; i < a->ptrs; i++) {
431 ASSERT(LOOKS_LIKE_CLOSURE_PTR(a->payload[i]));
432 }
433 return mut_arr_ptrs_sizeW(a);
434 }
435
436 case TSO:
437 checkTSO((StgTSO *)p);
438 return sizeofW(StgTSO);
439
440 case STACK:
441 checkSTACK((StgStack*)p);
442 return stack_sizeW((StgStack*)p);
443
444 case TREC_CHUNK:
445 {
446 nat i;
447 StgTRecChunk *tc = (StgTRecChunk *)p;
448 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->prev_chunk));
449 for (i = 0; i < tc -> next_entry_idx; i ++) {
450 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].tvar));
451 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].expected_value));
452 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].new_value));
453 }
454 return sizeofW(StgTRecChunk);
455 }
456
457 default:
458 barf("checkClosure (closure type %d)", info->type);
459 }
460 }
461
462
463 /* -----------------------------------------------------------------------------
464 Check Heap Sanity
465
466 After garbage collection, the live heap is in a state where we can
467 run through and check that all the pointers point to the right
468 place. This function starts at a given position and sanity-checks
469 all the objects in the remainder of the chain.
470 -------------------------------------------------------------------------- */
471
472 void checkHeapChain (bdescr *bd)
473 {
474 StgPtr p;
475
476 for (; bd != NULL; bd = bd->link) {
477 if(!(bd->flags & BF_SWEPT)) {
478 p = bd->start;
479 while (p < bd->free) {
480 nat size = checkClosure((StgClosure *)p);
481 /* This is the smallest size of closure that can live in the heap */
482 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
483 p += size;
484
485 /* skip over slop */
486 while (p < bd->free &&
487 (*p < 0x1000 || !LOOKS_LIKE_INFO_PTR(*p))) { p++; }
488 }
489 }
490 }
491 }
492
493 void
494 checkHeapChunk(StgPtr start, StgPtr end)
495 {
496 StgPtr p;
497 nat size;
498
499 for (p=start; p<end; p+=size) {
500 ASSERT(LOOKS_LIKE_INFO_PTR(*p));
501 size = checkClosure((StgClosure *)p);
502 /* This is the smallest size of closure that can live in the heap. */
503 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
504 }
505 }
506
507 void
508 checkLargeObjects(bdescr *bd)
509 {
510 while (bd != NULL) {
511 if (!(bd->flags & BF_PINNED)) {
512 checkClosure((StgClosure *)bd->start);
513 }
514 bd = bd->link;
515 }
516 }
517
518 static void
519 checkSTACK (StgStack *stack)
520 {
521 StgPtr sp = stack->sp;
522 StgOffset stack_size = stack->stack_size;
523 StgPtr stack_end = stack->stack + stack_size;
524
525 ASSERT(stack->stack <= sp && sp <= stack_end);
526
527 checkStackChunk(sp, stack_end);
528 }
529
530 void
531 checkTSO(StgTSO *tso)
532 {
533 if (tso->what_next == ThreadKilled) {
534 /* The garbage collector doesn't bother following any pointers
535 * from dead threads, so don't check sanity here.
536 */
537 return;
538 }
539
540 ASSERT(tso->_link == END_TSO_QUEUE ||
541 tso->_link->header.info == &stg_MVAR_TSO_QUEUE_info ||
542 tso->_link->header.info == &stg_TSO_info);
543
544 if ( tso->why_blocked == BlockedOnMVar
545 || tso->why_blocked == BlockedOnBlackHole
546 || tso->why_blocked == BlockedOnMsgThrowTo
547 || tso->why_blocked == NotBlocked
548 ) {
549 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->block_info.closure));
550 }
551
552 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->bq));
553 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->blocked_exceptions));
554 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->stackobj));
555
556 // XXX are we checking the stack twice?
557 checkSTACK(tso->stackobj);
558 }
559
560 /*
561 Check that all TSOs have been evacuated.
562 Optionally also check the sanity of the TSOs.
563 */
564 void
565 checkGlobalTSOList (rtsBool checkTSOs)
566 {
567 StgTSO *tso;
568 nat g;
569
570 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
571 for (tso=generations[g].threads; tso != END_TSO_QUEUE;
572 tso = tso->global_link) {
573 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso));
574 ASSERT(get_itbl(tso)->type == TSO);
575 if (checkTSOs)
576 checkTSO(tso);
577
578 // If this TSO is dirty and in an old generation, it better
579 // be on the mutable list.
580 if (tso->dirty) {
581 ASSERT(Bdescr((P_)tso)->gen_no == 0 || (tso->flags & TSO_MARKED));
582 tso->flags &= ~TSO_MARKED;
583 }
584
585 {
586 StgStack *stack;
587 StgUnderflowFrame *frame;
588
589 stack = tso->stackobj;
590 while (1) {
591 if (stack->dirty & 1) {
592 ASSERT(Bdescr((P_)stack)->gen_no == 0 || (stack->dirty & TSO_MARKED));
593 stack->dirty &= ~TSO_MARKED;
594 }
595 frame = (StgUnderflowFrame*) (stack->stack + stack->stack_size
596 - sizeofW(StgUnderflowFrame));
597 if (frame->info != &stg_stack_underflow_frame_info
598 || frame->next_chunk == (StgStack*)END_TSO_QUEUE) break;
599 stack = frame->next_chunk;
600 }
601 }
602 }
603 }
604 }
605
606 /* -----------------------------------------------------------------------------
607 Check mutable list sanity.
608 -------------------------------------------------------------------------- */
609
610 static void
611 checkMutableList( bdescr *mut_bd, nat gen )
612 {
613 bdescr *bd;
614 StgPtr q;
615 StgClosure *p;
616
617 for (bd = mut_bd; bd != NULL; bd = bd->link) {
618 for (q = bd->start; q < bd->free; q++) {
619 p = (StgClosure *)*q;
620 ASSERT(!HEAP_ALLOCED(p) || Bdescr((P_)p)->gen_no == gen);
621 checkClosure(p);
622
623 switch (get_itbl(p)->type) {
624 case TSO:
625 ((StgTSO *)p)->flags |= TSO_MARKED;
626 break;
627 case STACK:
628 ((StgStack *)p)->dirty |= TSO_MARKED;
629 break;
630 }
631 }
632 }
633 }
634
635 static void
636 checkLocalMutableLists (nat cap_no)
637 {
638 nat g;
639 for (g = 1; g < RtsFlags.GcFlags.generations; g++) {
640 checkMutableList(capabilities[cap_no].mut_lists[g], g);
641 }
642 }
643
644 static void
645 checkMutableLists (void)
646 {
647 nat i;
648 for (i = 0; i < n_capabilities; i++) {
649 checkLocalMutableLists(i);
650 }
651 }
652
653 /*
654 Check the static objects list.
655 */
656 void
657 checkStaticObjects ( StgClosure* static_objects )
658 {
659 StgClosure *p = static_objects;
660 StgInfoTable *info;
661
662 while (p != END_OF_STATIC_LIST) {
663 checkClosure(p);
664 info = get_itbl(p);
665 switch (info->type) {
666 case IND_STATIC:
667 {
668 StgClosure *indirectee = UNTAG_CLOSURE(((StgIndStatic *)p)->indirectee);
669
670 ASSERT(LOOKS_LIKE_CLOSURE_PTR(indirectee));
671 ASSERT(LOOKS_LIKE_INFO_PTR((StgWord)indirectee->header.info));
672 p = *IND_STATIC_LINK((StgClosure *)p);
673 break;
674 }
675
676 case THUNK_STATIC:
677 p = *THUNK_STATIC_LINK((StgClosure *)p);
678 break;
679
680 case FUN_STATIC:
681 p = *FUN_STATIC_LINK((StgClosure *)p);
682 break;
683
684 case CONSTR_STATIC:
685 p = *STATIC_LINK(info,(StgClosure *)p);
686 break;
687
688 default:
689 barf("checkStaticObjetcs: strange closure %p (%s)",
690 p, info_type(p));
691 }
692 }
693 }
694
695 /* Nursery sanity check */
696 void
697 checkNurserySanity (nursery *nursery)
698 {
699 bdescr *bd, *prev;
700 nat blocks = 0;
701
702 prev = NULL;
703 for (bd = nursery->blocks; bd != NULL; bd = bd->link) {
704 ASSERT(bd->gen == g0);
705 ASSERT(bd->u.back == prev);
706 prev = bd;
707 blocks += bd->blocks;
708 }
709
710 ASSERT(blocks == nursery->n_blocks);
711 }
712
713 static void checkGeneration (generation *gen,
714 rtsBool after_major_gc USED_IF_THREADS)
715 {
716 nat n;
717 gen_workspace *ws;
718
719 ASSERT(countBlocks(gen->blocks) == gen->n_blocks);
720 ASSERT(countBlocks(gen->large_objects) == gen->n_large_blocks);
721
722 #if defined(THREADED_RTS)
723 // heap sanity checking doesn't work with SMP, because we can't
724 // zero the slop (see Updates.h). However, we can sanity-check
725 // the heap after a major gc, because there is no slop.
726 if (!after_major_gc) return;
727 #endif
728
729 checkHeapChain(gen->blocks);
730
731 for (n = 0; n < n_capabilities; n++) {
732 ws = &gc_threads[n]->gens[gen->no];
733 checkHeapChain(ws->todo_bd);
734 checkHeapChain(ws->part_list);
735 checkHeapChain(ws->scavd_list);
736 }
737
738 checkLargeObjects(gen->large_objects);
739 }
740
741 /* Full heap sanity check. */
742 static void checkFullHeap (rtsBool after_major_gc)
743 {
744 nat g, n;
745
746 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
747 checkGeneration(&generations[g], after_major_gc);
748 }
749 for (n = 0; n < n_capabilities; n++) {
750 checkNurserySanity(&nurseries[n]);
751 }
752 }
753
754 void checkSanity (rtsBool after_gc, rtsBool major_gc)
755 {
756 checkFullHeap(after_gc && major_gc);
757
758 checkFreeListSanity();
759
760 // always check the stacks in threaded mode, because checkHeap()
761 // does nothing in this case.
762 if (after_gc) {
763 checkMutableLists();
764 checkGlobalTSOList(rtsTrue);
765 }
766 }
767
768 // If memInventory() calculates that we have a memory leak, this
769 // function will try to find the block(s) that are leaking by marking
770 // all the ones that we know about, and search through memory to find
771 // blocks that are not marked. In the debugger this can help to give
772 // us a clue about what kind of block leaked. In the future we might
773 // annotate blocks with their allocation site to give more helpful
774 // info.
775 static void
776 findMemoryLeak (void)
777 {
778 nat g, i;
779 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
780 for (i = 0; i < n_capabilities; i++) {
781 markBlocks(capabilities[i].mut_lists[g]);
782 markBlocks(gc_threads[i]->gens[g].part_list);
783 markBlocks(gc_threads[i]->gens[g].scavd_list);
784 markBlocks(gc_threads[i]->gens[g].todo_bd);
785 }
786 markBlocks(generations[g].blocks);
787 markBlocks(generations[g].large_objects);
788 }
789
790 for (i = 0; i < n_capabilities; i++) {
791 markBlocks(nurseries[i].blocks);
792 markBlocks(capabilities[i].pinned_object_block);
793 }
794
795 #ifdef PROFILING
796 // TODO:
797 // if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER) {
798 // markRetainerBlocks();
799 // }
800 #endif
801
802 // count the blocks allocated by the arena allocator
803 // TODO:
804 // markArenaBlocks();
805
806 // count the blocks containing executable memory
807 markBlocks(exec_block);
808
809 reportUnmarkedBlocks();
810 }
811
812 void
813 checkRunQueue(Capability *cap)
814 {
815 StgTSO *prev, *tso;
816 prev = END_TSO_QUEUE;
817 for (tso = cap->run_queue_hd; tso != END_TSO_QUEUE;
818 prev = tso, tso = tso->_link) {
819 ASSERT(prev == END_TSO_QUEUE || prev->_link == tso);
820 ASSERT(tso->block_info.prev == prev);
821 }
822 ASSERT(cap->run_queue_tl == prev);
823 }
824
825 /* -----------------------------------------------------------------------------
826 Memory leak detection
827
828 memInventory() checks for memory leaks by counting up all the
829 blocks we know about and comparing that to the number of blocks
830 allegedly floating around in the system.
831 -------------------------------------------------------------------------- */
832
833 // Useful for finding partially full blocks in gdb
834 void findSlop(bdescr *bd);
835 void findSlop(bdescr *bd)
836 {
837 lnat slop;
838
839 for (; bd != NULL; bd = bd->link) {
840 slop = (bd->blocks * BLOCK_SIZE_W) - (bd->free - bd->start);
841 if (slop > (1024/sizeof(W_))) {
842 debugBelch("block at %p (bdescr %p) has %ldKB slop\n",
843 bd->start, bd, slop / (1024/sizeof(W_)));
844 }
845 }
846 }
847
848 static lnat
849 genBlocks (generation *gen)
850 {
851 ASSERT(countBlocks(gen->blocks) == gen->n_blocks);
852 ASSERT(countBlocks(gen->large_objects) == gen->n_large_blocks);
853 return gen->n_blocks + gen->n_old_blocks +
854 countAllocdBlocks(gen->large_objects);
855 }
856
857 void
858 memInventory (rtsBool show)
859 {
860 nat g, i;
861 lnat gen_blocks[RtsFlags.GcFlags.generations];
862 lnat nursery_blocks, retainer_blocks,
863 arena_blocks, exec_blocks;
864 lnat live_blocks = 0, free_blocks = 0;
865 rtsBool leak;
866
867 // count the blocks we current have
868
869 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
870 gen_blocks[g] = 0;
871 for (i = 0; i < n_capabilities; i++) {
872 gen_blocks[g] += countBlocks(capabilities[i].mut_lists[g]);
873 gen_blocks[g] += countBlocks(gc_threads[i]->gens[g].part_list);
874 gen_blocks[g] += countBlocks(gc_threads[i]->gens[g].scavd_list);
875 gen_blocks[g] += countBlocks(gc_threads[i]->gens[g].todo_bd);
876 }
877 gen_blocks[g] += genBlocks(&generations[g]);
878 }
879
880 nursery_blocks = 0;
881 for (i = 0; i < n_capabilities; i++) {
882 ASSERT(countBlocks(nurseries[i].blocks) == nurseries[i].n_blocks);
883 nursery_blocks += nurseries[i].n_blocks;
884 if (capabilities[i].pinned_object_block != NULL) {
885 nursery_blocks += capabilities[i].pinned_object_block->blocks;
886 }
887 }
888
889 retainer_blocks = 0;
890 #ifdef PROFILING
891 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER) {
892 retainer_blocks = retainerStackBlocks();
893 }
894 #endif
895
896 // count the blocks allocated by the arena allocator
897 arena_blocks = arenaBlocks();
898
899 // count the blocks containing executable memory
900 exec_blocks = countAllocdBlocks(exec_block);
901
902 /* count the blocks on the free list */
903 free_blocks = countFreeList();
904
905 live_blocks = 0;
906 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
907 live_blocks += gen_blocks[g];
908 }
909 live_blocks += nursery_blocks +
910 + retainer_blocks + arena_blocks + exec_blocks;
911
912 #define MB(n) (((n) * BLOCK_SIZE_W) / ((1024*1024)/sizeof(W_)))
913
914 leak = live_blocks + free_blocks != mblocks_allocated * BLOCKS_PER_MBLOCK;
915
916 if (show || leak)
917 {
918 if (leak) {
919 debugBelch("Memory leak detected:\n");
920 } else {
921 debugBelch("Memory inventory:\n");
922 }
923 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
924 debugBelch(" gen %d blocks : %5lu blocks (%lu MB)\n", g,
925 gen_blocks[g], MB(gen_blocks[g]));
926 }
927 debugBelch(" nursery : %5lu blocks (%lu MB)\n",
928 nursery_blocks, MB(nursery_blocks));
929 debugBelch(" retainer : %5lu blocks (%lu MB)\n",
930 retainer_blocks, MB(retainer_blocks));
931 debugBelch(" arena blocks : %5lu blocks (%lu MB)\n",
932 arena_blocks, MB(arena_blocks));
933 debugBelch(" exec : %5lu blocks (%lu MB)\n",
934 exec_blocks, MB(exec_blocks));
935 debugBelch(" free : %5lu blocks (%lu MB)\n",
936 free_blocks, MB(free_blocks));
937 debugBelch(" total : %5lu blocks (%lu MB)\n",
938 live_blocks + free_blocks, MB(live_blocks+free_blocks));
939 if (leak) {
940 debugBelch("\n in system : %5lu blocks (%lu MB)\n",
941 mblocks_allocated * BLOCKS_PER_MBLOCK, mblocks_allocated);
942 }
943 }
944
945 if (leak) {
946 debugBelch("\n");
947 findMemoryLeak();
948 }
949 ASSERT(n_alloc_blocks == live_blocks);
950 ASSERT(!leak);
951 }
952
953
954 #endif /* DEBUG */