Remove CONSTR_STATIC
[ghc.git] / rts / sm / Sanity.c
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 1998-2006
4 *
5 * Sanity checking code for the heap and stack.
6 *
7 * Used when debugging: check that everything reasonable.
8 *
9 * - All things that are supposed to be pointers look like pointers.
10 *
11 * - Objects in text space are marked as static closures, those
12 * in the heap are dynamic.
13 *
14 * ---------------------------------------------------------------------------*/
15
16 #include "PosixSource.h"
17 #include "Rts.h"
18
19 #ifdef DEBUG /* whole file */
20
21 #include "RtsUtils.h"
22 #include "sm/Storage.h"
23 #include "sm/BlockAlloc.h"
24 #include "GCThread.h"
25 #include "Sanity.h"
26 #include "Schedule.h"
27 #include "Apply.h"
28 #include "Printer.h"
29 #include "Arena.h"
30 #include "RetainerProfile.h"
31 #include "CNF.h"
32
33 /* -----------------------------------------------------------------------------
34 Forward decls.
35 -------------------------------------------------------------------------- */
36
37 static void checkSmallBitmap ( StgPtr payload, StgWord bitmap, uint32_t );
38 static void checkLargeBitmap ( StgPtr payload, StgLargeBitmap*, uint32_t );
39 static void checkClosureShallow ( const StgClosure * );
40 static void checkSTACK (StgStack *stack);
41
42 /* -----------------------------------------------------------------------------
43 Check stack sanity
44 -------------------------------------------------------------------------- */
45
46 static void
47 checkSmallBitmap( StgPtr payload, StgWord bitmap, uint32_t size )
48 {
49 uint32_t i;
50
51 for(i = 0; i < size; i++, bitmap >>= 1 ) {
52 if ((bitmap & 1) == 0) {
53 checkClosureShallow((StgClosure *)payload[i]);
54 }
55 }
56 }
57
58 static void
59 checkLargeBitmap( StgPtr payload, StgLargeBitmap* large_bitmap, uint32_t size )
60 {
61 StgWord bmp;
62 uint32_t i, j;
63
64 i = 0;
65 for (bmp=0; i < size; bmp++) {
66 StgWord bitmap = large_bitmap->bitmap[bmp];
67 j = 0;
68 for(; i < size && j < BITS_IN(W_); j++, i++, bitmap >>= 1 ) {
69 if ((bitmap & 1) == 0) {
70 checkClosureShallow((StgClosure *)payload[i]);
71 }
72 }
73 }
74 }
75
76 /*
77 * check that it looks like a valid closure - without checking its payload
78 * used to avoid recursion between checking PAPs and checking stack
79 * chunks.
80 */
81
82 static void
83 checkClosureShallow( const StgClosure* p )
84 {
85 const StgClosure *q;
86
87 q = UNTAG_CONST_CLOSURE(p);
88 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q));
89 }
90
91 // check an individual stack object
92 StgOffset
93 checkStackFrame( StgPtr c )
94 {
95 uint32_t size;
96 const StgRetInfoTable* info;
97
98 info = get_ret_itbl((StgClosure *)c);
99
100 /* All activation records have 'bitmap' style layout info. */
101 switch (info->i.type) {
102
103 case UPDATE_FRAME:
104 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgUpdateFrame*)c)->updatee));
105 case ATOMICALLY_FRAME:
106 case CATCH_RETRY_FRAME:
107 case CATCH_STM_FRAME:
108 case CATCH_FRAME:
109 // small bitmap cases (<= 32 entries)
110 case UNDERFLOW_FRAME:
111 case STOP_FRAME:
112 case RET_SMALL:
113 size = BITMAP_SIZE(info->i.layout.bitmap);
114 checkSmallBitmap((StgPtr)c + 1,
115 BITMAP_BITS(info->i.layout.bitmap), size);
116 return 1 + size;
117
118 case RET_BCO: {
119 StgBCO *bco;
120 uint32_t size;
121 bco = (StgBCO *)*(c+1);
122 size = BCO_BITMAP_SIZE(bco);
123 checkLargeBitmap((StgPtr)c + 2, BCO_BITMAP(bco), size);
124 return 2 + size;
125 }
126
127 case RET_BIG: // large bitmap (> 32 entries)
128 size = GET_LARGE_BITMAP(&info->i)->size;
129 checkLargeBitmap((StgPtr)c + 1, GET_LARGE_BITMAP(&info->i), size);
130 return 1 + size;
131
132 case RET_FUN:
133 {
134 const StgFunInfoTable *fun_info;
135 StgRetFun *ret_fun;
136
137 ret_fun = (StgRetFun *)c;
138 fun_info = get_fun_itbl(UNTAG_CONST_CLOSURE(ret_fun->fun));
139 size = ret_fun->size;
140 switch (fun_info->f.fun_type) {
141 case ARG_GEN:
142 checkSmallBitmap((StgPtr)ret_fun->payload,
143 BITMAP_BITS(fun_info->f.b.bitmap), size);
144 break;
145 case ARG_GEN_BIG:
146 checkLargeBitmap((StgPtr)ret_fun->payload,
147 GET_FUN_LARGE_BITMAP(fun_info), size);
148 break;
149 default:
150 checkSmallBitmap((StgPtr)ret_fun->payload,
151 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
152 size);
153 break;
154 }
155 return sizeofW(StgRetFun) + size;
156 }
157
158 default:
159 barf("checkStackFrame: weird activation record found on stack (%p %d).",c,info->i.type);
160 }
161 }
162
163 // check sections of stack between update frames
164 void
165 checkStackChunk( StgPtr sp, StgPtr stack_end )
166 {
167 StgPtr p;
168
169 p = sp;
170 while (p < stack_end) {
171 p += checkStackFrame( p );
172 }
173 // ASSERT( p == stack_end ); -- HWL
174 }
175
176 static void
177 checkPAP (StgClosure *tagged_fun, StgClosure** payload, StgWord n_args)
178 {
179 const StgClosure *fun;
180 const StgFunInfoTable *fun_info;
181
182 fun = UNTAG_CONST_CLOSURE(tagged_fun);
183 ASSERT(LOOKS_LIKE_CLOSURE_PTR(fun));
184 fun_info = get_fun_itbl(fun);
185
186 switch (fun_info->f.fun_type) {
187 case ARG_GEN:
188 checkSmallBitmap( (StgPtr)payload,
189 BITMAP_BITS(fun_info->f.b.bitmap), n_args );
190 break;
191 case ARG_GEN_BIG:
192 checkLargeBitmap( (StgPtr)payload,
193 GET_FUN_LARGE_BITMAP(fun_info),
194 n_args );
195 break;
196 case ARG_BCO:
197 checkLargeBitmap( (StgPtr)payload,
198 BCO_BITMAP(fun),
199 n_args );
200 break;
201 default:
202 checkSmallBitmap( (StgPtr)payload,
203 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
204 n_args );
205 break;
206 }
207
208 ASSERT(fun_info->f.arity > TAG_MASK ? GET_CLOSURE_TAG(tagged_fun) == 0
209 : GET_CLOSURE_TAG(tagged_fun) == fun_info->f.arity);
210 }
211
212
213 StgOffset
214 checkClosure( const StgClosure* p )
215 {
216 const StgInfoTable *info;
217
218 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
219
220 p = UNTAG_CONST_CLOSURE(p);
221
222 info = p->header.info;
223
224 if (IS_FORWARDING_PTR(info)) {
225 barf("checkClosure: found EVACUATED closure %d", info->type);
226 }
227 info = INFO_PTR_TO_STRUCT(info);
228
229 switch (info->type) {
230
231 case MVAR_CLEAN:
232 case MVAR_DIRTY:
233 {
234 StgMVar *mvar = (StgMVar *)p;
235 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->head));
236 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->tail));
237 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->value));
238 return sizeofW(StgMVar);
239 }
240
241 case THUNK:
242 case THUNK_1_0:
243 case THUNK_0_1:
244 case THUNK_1_1:
245 case THUNK_0_2:
246 case THUNK_2_0:
247 {
248 uint32_t i;
249 for (i = 0; i < info->layout.payload.ptrs; i++) {
250 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgThunk *)p)->payload[i]));
251 }
252 return thunk_sizeW_fromITBL(info);
253 }
254
255 case FUN:
256 case FUN_1_0:
257 case FUN_0_1:
258 case FUN_1_1:
259 case FUN_0_2:
260 case FUN_2_0:
261 case CONSTR:
262 case CONSTR_NOCAF:
263 case CONSTR_1_0:
264 case CONSTR_0_1:
265 case CONSTR_1_1:
266 case CONSTR_0_2:
267 case CONSTR_2_0:
268 case BLACKHOLE:
269 case PRIM:
270 case MUT_PRIM:
271 case MUT_VAR_CLEAN:
272 case MUT_VAR_DIRTY:
273 case TVAR:
274 case THUNK_STATIC:
275 case FUN_STATIC:
276 {
277 uint32_t i;
278 for (i = 0; i < info->layout.payload.ptrs; i++) {
279 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p->payload[i]));
280 }
281 return sizeW_fromITBL(info);
282 }
283
284 case BLOCKING_QUEUE:
285 {
286 StgBlockingQueue *bq = (StgBlockingQueue *)p;
287
288 // NO: the BH might have been updated now
289 // ASSERT(get_itbl(bq->bh)->type == BLACKHOLE);
290 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bq->bh));
291
292 ASSERT(get_itbl((StgClosure *)(bq->owner))->type == TSO);
293 ASSERT(bq->queue == (MessageBlackHole*)END_TSO_QUEUE
294 || bq->queue->header.info == &stg_MSG_BLACKHOLE_info);
295 ASSERT(bq->link == (StgBlockingQueue*)END_TSO_QUEUE ||
296 get_itbl((StgClosure *)(bq->link))->type == IND ||
297 get_itbl((StgClosure *)(bq->link))->type == BLOCKING_QUEUE);
298
299 return sizeofW(StgBlockingQueue);
300 }
301
302 case BCO: {
303 StgBCO *bco = (StgBCO *)p;
304 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->instrs));
305 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->literals));
306 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->ptrs));
307 return bco_sizeW(bco);
308 }
309
310 case IND_STATIC: /* (1, 0) closure */
311 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgIndStatic*)p)->indirectee));
312 return sizeW_fromITBL(info);
313
314 case WEAK:
315 /* deal with these specially - the info table isn't
316 * representative of the actual layout.
317 */
318 { StgWeak *w = (StgWeak *)p;
319 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->key));
320 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->value));
321 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->finalizer));
322 if (w->link) {
323 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->link));
324 }
325 return sizeW_fromITBL(info);
326 }
327
328 case THUNK_SELECTOR:
329 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgSelector *)p)->selectee));
330 return THUNK_SELECTOR_sizeW();
331
332 case IND:
333 {
334 /* we don't expect to see any of these after GC
335 * but they might appear during execution
336 */
337 StgInd *ind = (StgInd *)p;
338 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ind->indirectee));
339 return sizeofW(StgInd);
340 }
341
342 case RET_BCO:
343 case RET_SMALL:
344 case RET_BIG:
345 case UPDATE_FRAME:
346 case UNDERFLOW_FRAME:
347 case STOP_FRAME:
348 case CATCH_FRAME:
349 case ATOMICALLY_FRAME:
350 case CATCH_RETRY_FRAME:
351 case CATCH_STM_FRAME:
352 barf("checkClosure: stack frame");
353
354 case AP:
355 {
356 StgAP* ap = (StgAP *)p;
357 checkPAP (ap->fun, ap->payload, ap->n_args);
358 return ap_sizeW(ap);
359 }
360
361 case PAP:
362 {
363 StgPAP* pap = (StgPAP *)p;
364 checkPAP (pap->fun, pap->payload, pap->n_args);
365 return pap_sizeW(pap);
366 }
367
368 case AP_STACK:
369 {
370 StgAP_STACK *ap = (StgAP_STACK *)p;
371 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ap->fun));
372 checkStackChunk((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
373 return ap_stack_sizeW(ap);
374 }
375
376 case ARR_WORDS:
377 return arr_words_sizeW((StgArrBytes *)p);
378
379 case MUT_ARR_PTRS_CLEAN:
380 case MUT_ARR_PTRS_DIRTY:
381 case MUT_ARR_PTRS_FROZEN:
382 case MUT_ARR_PTRS_FROZEN0:
383 {
384 StgMutArrPtrs* a = (StgMutArrPtrs *)p;
385 uint32_t i;
386 for (i = 0; i < a->ptrs; i++) {
387 ASSERT(LOOKS_LIKE_CLOSURE_PTR(a->payload[i]));
388 }
389 return mut_arr_ptrs_sizeW(a);
390 }
391
392 case TSO:
393 checkTSO((StgTSO *)p);
394 return sizeofW(StgTSO);
395
396 case STACK:
397 checkSTACK((StgStack*)p);
398 return stack_sizeW((StgStack*)p);
399
400 case TREC_CHUNK:
401 {
402 uint32_t i;
403 StgTRecChunk *tc = (StgTRecChunk *)p;
404 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->prev_chunk));
405 for (i = 0; i < tc -> next_entry_idx; i ++) {
406 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].tvar));
407 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].expected_value));
408 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].new_value));
409 }
410 return sizeofW(StgTRecChunk);
411 }
412
413 default:
414 barf("checkClosure (closure type %d)", info->type);
415 }
416 }
417
418
419 /* -----------------------------------------------------------------------------
420 Check Heap Sanity
421
422 After garbage collection, the live heap is in a state where we can
423 run through and check that all the pointers point to the right
424 place. This function starts at a given position and sanity-checks
425 all the objects in the remainder of the chain.
426 -------------------------------------------------------------------------- */
427
428 void checkHeapChain (bdescr *bd)
429 {
430 StgPtr p;
431
432 for (; bd != NULL; bd = bd->link) {
433 if(!(bd->flags & BF_SWEPT)) {
434 p = bd->start;
435 while (p < bd->free) {
436 uint32_t size = checkClosure((StgClosure *)p);
437 /* This is the smallest size of closure that can live in the heap */
438 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
439 p += size;
440
441 /* skip over slop */
442 while (p < bd->free &&
443 (*p < 0x1000 || !LOOKS_LIKE_INFO_PTR(*p))) { p++; }
444 }
445 }
446 }
447 }
448
449 void
450 checkHeapChunk(StgPtr start, StgPtr end)
451 {
452 StgPtr p;
453 uint32_t size;
454
455 for (p=start; p<end; p+=size) {
456 ASSERT(LOOKS_LIKE_INFO_PTR(*p));
457 size = checkClosure((StgClosure *)p);
458 /* This is the smallest size of closure that can live in the heap. */
459 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
460 }
461 }
462
463 void
464 checkLargeObjects(bdescr *bd)
465 {
466 while (bd != NULL) {
467 if (!(bd->flags & BF_PINNED)) {
468 checkClosure((StgClosure *)bd->start);
469 }
470 bd = bd->link;
471 }
472 }
473
474 static void
475 checkCompactObjects(bdescr *bd)
476 {
477 // Compact objects are similar to large objects,
478 // but they have a StgCompactNFDataBlock at the beginning,
479 // before the actual closure
480
481 for ( ; bd != NULL; bd = bd->link) {
482 StgCompactNFDataBlock *block, *last;
483 StgCompactNFData *str;
484 StgWord totalW;
485
486 ASSERT (bd->flags & BF_COMPACT);
487
488 block = (StgCompactNFDataBlock*)bd->start;
489 str = block->owner;
490 ASSERT ((W_)str == (W_)block + sizeof(StgCompactNFDataBlock));
491
492 totalW = 0;
493 for ( ; block ; block = block->next) {
494 last = block;
495 ASSERT (block->owner == str);
496
497 totalW += Bdescr((P_)block)->blocks * BLOCK_SIZE_W;
498 }
499
500 ASSERT (str->totalW == totalW);
501 ASSERT (str->last == last);
502 }
503 }
504
505 static void
506 checkSTACK (StgStack *stack)
507 {
508 StgPtr sp = stack->sp;
509 StgOffset stack_size = stack->stack_size;
510 StgPtr stack_end = stack->stack + stack_size;
511
512 ASSERT(stack->stack <= sp && sp <= stack_end);
513
514 checkStackChunk(sp, stack_end);
515 }
516
517 void
518 checkTSO(StgTSO *tso)
519 {
520 StgTSO *next;
521 const StgInfoTable *info;
522
523 if (tso->what_next == ThreadKilled) {
524 /* The garbage collector doesn't bother following any pointers
525 * from dead threads, so don't check sanity here.
526 */
527 return;
528 }
529
530 next = tso->_link;
531 info = (const StgInfoTable*) tso->_link->header.info;
532
533 ASSERT(next == END_TSO_QUEUE ||
534 info == &stg_MVAR_TSO_QUEUE_info ||
535 info == &stg_TSO_info ||
536 info == &stg_WHITEHOLE_info); // happens due to STM doing lockTSO()
537
538 if ( tso->why_blocked == BlockedOnMVar
539 || tso->why_blocked == BlockedOnMVarRead
540 || tso->why_blocked == BlockedOnBlackHole
541 || tso->why_blocked == BlockedOnMsgThrowTo
542 || tso->why_blocked == NotBlocked
543 ) {
544 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->block_info.closure));
545 }
546
547 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->bq));
548 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->blocked_exceptions));
549 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->stackobj));
550
551 // XXX are we checking the stack twice?
552 checkSTACK(tso->stackobj);
553 }
554
555 /*
556 Check that all TSOs have been evacuated.
557 Optionally also check the sanity of the TSOs.
558 */
559 void
560 checkGlobalTSOList (rtsBool checkTSOs)
561 {
562 StgTSO *tso;
563 uint32_t g;
564
565 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
566 for (tso=generations[g].threads; tso != END_TSO_QUEUE;
567 tso = tso->global_link) {
568 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso));
569 ASSERT(get_itbl((StgClosure *)tso)->type == TSO);
570 if (checkTSOs)
571 checkTSO(tso);
572
573 // If this TSO is dirty and in an old generation, it better
574 // be on the mutable list.
575 if (tso->dirty) {
576 ASSERT(Bdescr((P_)tso)->gen_no == 0 || (tso->flags & TSO_MARKED));
577 tso->flags &= ~TSO_MARKED;
578 }
579
580 {
581 StgStack *stack;
582 StgUnderflowFrame *frame;
583
584 stack = tso->stackobj;
585 while (1) {
586 if (stack->dirty & 1) {
587 ASSERT(Bdescr((P_)stack)->gen_no == 0 || (stack->dirty & TSO_MARKED));
588 stack->dirty &= ~TSO_MARKED;
589 }
590 frame = (StgUnderflowFrame*) (stack->stack + stack->stack_size
591 - sizeofW(StgUnderflowFrame));
592 if (frame->info != &stg_stack_underflow_frame_info
593 || frame->next_chunk == (StgStack*)END_TSO_QUEUE) break;
594 stack = frame->next_chunk;
595 }
596 }
597 }
598 }
599 }
600
601 /* -----------------------------------------------------------------------------
602 Check mutable list sanity.
603 -------------------------------------------------------------------------- */
604
605 static void
606 checkMutableList( bdescr *mut_bd, uint32_t gen )
607 {
608 bdescr *bd;
609 StgPtr q;
610 StgClosure *p;
611
612 for (bd = mut_bd; bd != NULL; bd = bd->link) {
613 for (q = bd->start; q < bd->free; q++) {
614 p = (StgClosure *)*q;
615 ASSERT(!HEAP_ALLOCED(p) || Bdescr((P_)p)->gen_no == gen);
616 checkClosure(p);
617
618 switch (get_itbl(p)->type) {
619 case TSO:
620 ((StgTSO *)p)->flags |= TSO_MARKED;
621 break;
622 case STACK:
623 ((StgStack *)p)->dirty |= TSO_MARKED;
624 break;
625 }
626 }
627 }
628 }
629
630 static void
631 checkLocalMutableLists (uint32_t cap_no)
632 {
633 uint32_t g;
634 for (g = 1; g < RtsFlags.GcFlags.generations; g++) {
635 checkMutableList(capabilities[cap_no]->mut_lists[g], g);
636 }
637 }
638
639 static void
640 checkMutableLists (void)
641 {
642 uint32_t i;
643 for (i = 0; i < n_capabilities; i++) {
644 checkLocalMutableLists(i);
645 }
646 }
647
648 /*
649 Check the static objects list.
650 */
651 void
652 checkStaticObjects ( StgClosure* static_objects )
653 {
654 StgClosure *p = static_objects;
655 const StgInfoTable *info;
656
657 while (p != END_OF_STATIC_OBJECT_LIST) {
658 p = UNTAG_STATIC_LIST_PTR(p);
659 checkClosure(p);
660 info = get_itbl(p);
661 switch (info->type) {
662 case IND_STATIC:
663 {
664 const StgClosure *indirectee;
665
666 indirectee = UNTAG_CONST_CLOSURE(((StgIndStatic *)p)->indirectee);
667 ASSERT(LOOKS_LIKE_CLOSURE_PTR(indirectee));
668 ASSERT(LOOKS_LIKE_INFO_PTR((StgWord)indirectee->header.info));
669 p = *IND_STATIC_LINK((StgClosure *)p);
670 break;
671 }
672
673 case THUNK_STATIC:
674 p = *THUNK_STATIC_LINK((StgClosure *)p);
675 break;
676
677 case FUN_STATIC:
678 p = *FUN_STATIC_LINK((StgClosure *)p);
679 break;
680
681 case CONSTR:
682 case CONSTR_NOCAF:
683 case CONSTR_1_0:
684 case CONSTR_2_0:
685 case CONSTR_1_1:
686 p = *STATIC_LINK(info,(StgClosure *)p);
687 break;
688
689 default:
690 barf("checkStaticObjetcs: strange closure %p (%s)",
691 p, info_type(p));
692 }
693 }
694 }
695
696 /* Nursery sanity check */
697 void
698 checkNurserySanity (nursery *nursery)
699 {
700 bdescr *bd, *prev;
701 uint32_t blocks = 0;
702
703 prev = NULL;
704 for (bd = nursery->blocks; bd != NULL; bd = bd->link) {
705 ASSERT(bd->gen == g0);
706 ASSERT(bd->u.back == prev);
707 prev = bd;
708 blocks += bd->blocks;
709 }
710
711 ASSERT(blocks == nursery->n_blocks);
712 }
713
714 static void checkGeneration (generation *gen,
715 rtsBool after_major_gc USED_IF_THREADS)
716 {
717 uint32_t n;
718 gen_workspace *ws;
719
720 ASSERT(countBlocks(gen->blocks) == gen->n_blocks);
721 ASSERT(countBlocks(gen->large_objects) == gen->n_large_blocks);
722
723 #if defined(THREADED_RTS)
724 // heap sanity checking doesn't work with SMP, because we can't
725 // zero the slop (see Updates.h). However, we can sanity-check
726 // the heap after a major gc, because there is no slop.
727 if (!after_major_gc) return;
728 #endif
729
730 checkHeapChain(gen->blocks);
731
732 for (n = 0; n < n_capabilities; n++) {
733 ws = &gc_threads[n]->gens[gen->no];
734 checkHeapChain(ws->todo_bd);
735 checkHeapChain(ws->part_list);
736 checkHeapChain(ws->scavd_list);
737 }
738
739 checkLargeObjects(gen->large_objects);
740 checkCompactObjects(gen->compact_objects);
741 }
742
743 /* Full heap sanity check. */
744 static void checkFullHeap (rtsBool after_major_gc)
745 {
746 uint32_t g, n;
747
748 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
749 checkGeneration(&generations[g], after_major_gc);
750 }
751 for (n = 0; n < n_capabilities; n++) {
752 checkNurserySanity(&nurseries[n]);
753 }
754 }
755
756 void checkSanity (rtsBool after_gc, rtsBool major_gc)
757 {
758 checkFullHeap(after_gc && major_gc);
759
760 checkFreeListSanity();
761
762 // always check the stacks in threaded mode, because checkHeap()
763 // does nothing in this case.
764 if (after_gc) {
765 checkMutableLists();
766 checkGlobalTSOList(rtsTrue);
767 }
768 }
769
770 static void
771 markCompactBlocks(bdescr *bd)
772 {
773 for (; bd != NULL; bd = bd->link) {
774 compactMarkKnown(((StgCompactNFDataBlock*)bd->start)->owner);
775 }
776 }
777
778 // If memInventory() calculates that we have a memory leak, this
779 // function will try to find the block(s) that are leaking by marking
780 // all the ones that we know about, and search through memory to find
781 // blocks that are not marked. In the debugger this can help to give
782 // us a clue about what kind of block leaked. In the future we might
783 // annotate blocks with their allocation site to give more helpful
784 // info.
785 static void
786 findMemoryLeak (void)
787 {
788 uint32_t g, i;
789 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
790 for (i = 0; i < n_capabilities; i++) {
791 markBlocks(capabilities[i]->mut_lists[g]);
792 markBlocks(gc_threads[i]->gens[g].part_list);
793 markBlocks(gc_threads[i]->gens[g].scavd_list);
794 markBlocks(gc_threads[i]->gens[g].todo_bd);
795 }
796 markBlocks(generations[g].blocks);
797 markBlocks(generations[g].large_objects);
798 markCompactBlocks(generations[g].compact_objects);
799 }
800
801 for (i = 0; i < n_nurseries; i++) {
802 markBlocks(nurseries[i].blocks);
803 }
804
805 for (i = 0; i < n_capabilities; i++) {
806 markBlocks(gc_threads[i]->free_blocks);
807 markBlocks(capabilities[i]->pinned_object_block);
808 }
809
810 #ifdef PROFILING
811 // TODO:
812 // if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER) {
813 // markRetainerBlocks();
814 // }
815 #endif
816
817 // count the blocks allocated by the arena allocator
818 // TODO:
819 // markArenaBlocks();
820
821 // count the blocks containing executable memory
822 markBlocks(exec_block);
823
824 reportUnmarkedBlocks();
825 }
826
827 void
828 checkRunQueue(Capability *cap)
829 {
830 StgTSO *prev, *tso;
831 prev = END_TSO_QUEUE;
832 uint32_t n;
833 for (n = 0, tso = cap->run_queue_hd; tso != END_TSO_QUEUE;
834 prev = tso, tso = tso->_link, n++) {
835 ASSERT(prev == END_TSO_QUEUE || prev->_link == tso);
836 ASSERT(tso->block_info.prev == prev);
837 }
838 ASSERT(cap->run_queue_tl == prev);
839 ASSERT(cap->n_run_queue == n);
840 }
841
842 /* -----------------------------------------------------------------------------
843 Memory leak detection
844
845 memInventory() checks for memory leaks by counting up all the
846 blocks we know about and comparing that to the number of blocks
847 allegedly floating around in the system.
848 -------------------------------------------------------------------------- */
849
850 // Useful for finding partially full blocks in gdb
851 void findSlop(bdescr *bd);
852 void findSlop(bdescr *bd)
853 {
854 W_ slop;
855
856 for (; bd != NULL; bd = bd->link) {
857 slop = (bd->blocks * BLOCK_SIZE_W) - (bd->free - bd->start);
858 if (slop > (1024/sizeof(W_))) {
859 debugBelch("block at %p (bdescr %p) has %" FMT_Word "KB slop\n",
860 bd->start, bd, slop / (1024/sizeof(W_)));
861 }
862 }
863 }
864
865 static W_
866 genBlocks (generation *gen)
867 {
868 ASSERT(countBlocks(gen->blocks) == gen->n_blocks);
869 ASSERT(countBlocks(gen->large_objects) == gen->n_large_blocks);
870 ASSERT(countCompactBlocks(gen->compact_objects) == gen->n_compact_blocks);
871 ASSERT(countCompactBlocks(gen->compact_blocks_in_import) == gen->n_compact_blocks_in_import);
872 return gen->n_blocks + gen->n_old_blocks +
873 countAllocdBlocks(gen->large_objects) +
874 gen->n_compact_blocks + gen->n_compact_blocks_in_import;
875 }
876
877 void
878 memInventory (rtsBool show)
879 {
880 uint32_t g, i;
881 W_ gen_blocks[RtsFlags.GcFlags.generations];
882 W_ nursery_blocks, retainer_blocks,
883 arena_blocks, exec_blocks, gc_free_blocks = 0;
884 W_ live_blocks = 0, free_blocks = 0;
885 rtsBool leak;
886
887 // count the blocks we current have
888
889 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
890 gen_blocks[g] = 0;
891 for (i = 0; i < n_capabilities; i++) {
892 gen_blocks[g] += countBlocks(capabilities[i]->mut_lists[g]);
893 gen_blocks[g] += countBlocks(gc_threads[i]->gens[g].part_list);
894 gen_blocks[g] += countBlocks(gc_threads[i]->gens[g].scavd_list);
895 gen_blocks[g] += countBlocks(gc_threads[i]->gens[g].todo_bd);
896 }
897 gen_blocks[g] += genBlocks(&generations[g]);
898 }
899
900 nursery_blocks = 0;
901 for (i = 0; i < n_nurseries; i++) {
902 ASSERT(countBlocks(nurseries[i].blocks) == nurseries[i].n_blocks);
903 nursery_blocks += nurseries[i].n_blocks;
904 }
905 for (i = 0; i < n_capabilities; i++) {
906 gc_free_blocks += countBlocks(gc_threads[i]->free_blocks);
907 if (capabilities[i]->pinned_object_block != NULL) {
908 nursery_blocks += capabilities[i]->pinned_object_block->blocks;
909 }
910 nursery_blocks += countBlocks(capabilities[i]->pinned_object_blocks);
911 }
912
913 retainer_blocks = 0;
914 #ifdef PROFILING
915 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER) {
916 retainer_blocks = retainerStackBlocks();
917 }
918 #endif
919
920 // count the blocks allocated by the arena allocator
921 arena_blocks = arenaBlocks();
922
923 // count the blocks containing executable memory
924 exec_blocks = countAllocdBlocks(exec_block);
925
926 /* count the blocks on the free list */
927 free_blocks = countFreeList();
928
929 live_blocks = 0;
930 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
931 live_blocks += gen_blocks[g];
932 }
933 live_blocks += nursery_blocks +
934 + retainer_blocks + arena_blocks + exec_blocks + gc_free_blocks;
935
936 #define MB(n) (((double)(n) * BLOCK_SIZE_W) / ((1024*1024)/sizeof(W_)))
937
938 leak = live_blocks + free_blocks != mblocks_allocated * BLOCKS_PER_MBLOCK;
939
940 if (show || leak)
941 {
942 if (leak) {
943 debugBelch("Memory leak detected:\n");
944 } else {
945 debugBelch("Memory inventory:\n");
946 }
947 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
948 debugBelch(" gen %d blocks : %5" FMT_Word " blocks (%6.1lf MB)\n", g,
949 gen_blocks[g], MB(gen_blocks[g]));
950 }
951 debugBelch(" nursery : %5" FMT_Word " blocks (%6.1lf MB)\n",
952 nursery_blocks, MB(nursery_blocks));
953 debugBelch(" retainer : %5" FMT_Word " blocks (%6.1lf MB)\n",
954 retainer_blocks, MB(retainer_blocks));
955 debugBelch(" arena blocks : %5" FMT_Word " blocks (%6.1lf MB)\n",
956 arena_blocks, MB(arena_blocks));
957 debugBelch(" exec : %5" FMT_Word " blocks (%6.1lf MB)\n",
958 exec_blocks, MB(exec_blocks));
959 debugBelch(" GC free pool : %5" FMT_Word " blocks (%6.1lf MB)\n",
960 gc_free_blocks, MB(gc_free_blocks));
961 debugBelch(" free : %5" FMT_Word " blocks (%6.1lf MB)\n",
962 free_blocks, MB(free_blocks));
963 debugBelch(" total : %5" FMT_Word " blocks (%6.1lf MB)\n",
964 live_blocks + free_blocks, MB(live_blocks+free_blocks));
965 if (leak) {
966 debugBelch("\n in system : %5" FMT_Word " blocks (%" FMT_Word " MB)\n",
967 (W_)(mblocks_allocated * BLOCKS_PER_MBLOCK), mblocks_allocated);
968 }
969 }
970
971 if (leak) {
972 debugBelch("\n");
973 findMemoryLeak();
974 }
975 ASSERT(n_alloc_blocks == live_blocks);
976 ASSERT(!leak);
977 }
978
979
980 #endif /* DEBUG */