Eliminate zero_static_objects_list()
[ghc.git] / rts / sm / Sanity.c
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 1998-2006
4 *
5 * Sanity checking code for the heap and stack.
6 *
7 * Used when debugging: check that everything reasonable.
8 *
9 * - All things that are supposed to be pointers look like pointers.
10 *
11 * - Objects in text space are marked as static closures, those
12 * in the heap are dynamic.
13 *
14 * ---------------------------------------------------------------------------*/
15
16 #include "PosixSource.h"
17 #include "Rts.h"
18
19 #ifdef DEBUG /* whole file */
20
21 #include "RtsUtils.h"
22 #include "sm/Storage.h"
23 #include "sm/BlockAlloc.h"
24 #include "GCThread.h"
25 #include "Sanity.h"
26 #include "Schedule.h"
27 #include "Apply.h"
28 #include "Printer.h"
29 #include "Arena.h"
30 #include "RetainerProfile.h"
31
32 /* -----------------------------------------------------------------------------
33 Forward decls.
34 -------------------------------------------------------------------------- */
35
36 static void checkSmallBitmap ( StgPtr payload, StgWord bitmap, nat );
37 static void checkLargeBitmap ( StgPtr payload, StgLargeBitmap*, nat );
38 static void checkClosureShallow ( StgClosure * );
39 static void checkSTACK (StgStack *stack);
40
41 /* -----------------------------------------------------------------------------
42 Check stack sanity
43 -------------------------------------------------------------------------- */
44
45 static void
46 checkSmallBitmap( StgPtr payload, StgWord bitmap, nat size )
47 {
48 nat i;
49
50 for(i = 0; i < size; i++, bitmap >>= 1 ) {
51 if ((bitmap & 1) == 0) {
52 checkClosureShallow((StgClosure *)payload[i]);
53 }
54 }
55 }
56
57 static void
58 checkLargeBitmap( StgPtr payload, StgLargeBitmap* large_bitmap, nat size )
59 {
60 StgWord bmp;
61 nat i, j;
62
63 i = 0;
64 for (bmp=0; i < size; bmp++) {
65 StgWord bitmap = large_bitmap->bitmap[bmp];
66 j = 0;
67 for(; i < size && j < BITS_IN(W_); j++, i++, bitmap >>= 1 ) {
68 if ((bitmap & 1) == 0) {
69 checkClosureShallow((StgClosure *)payload[i]);
70 }
71 }
72 }
73 }
74
75 /*
76 * check that it looks like a valid closure - without checking its payload
77 * used to avoid recursion between checking PAPs and checking stack
78 * chunks.
79 */
80
81 static void
82 checkClosureShallow( StgClosure* p )
83 {
84 StgClosure *q;
85
86 q = UNTAG_CLOSURE(p);
87 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q));
88
89 /* Is it a static closure? */
90 if (!HEAP_ALLOCED(q)) {
91 ASSERT(closure_STATIC(q));
92 } else {
93 ASSERT(!closure_STATIC(q));
94 }
95 }
96
97 // check an individual stack object
98 StgOffset
99 checkStackFrame( StgPtr c )
100 {
101 nat size;
102 const StgRetInfoTable* info;
103
104 info = get_ret_itbl((StgClosure *)c);
105
106 /* All activation records have 'bitmap' style layout info. */
107 switch (info->i.type) {
108
109 case UPDATE_FRAME:
110 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgUpdateFrame*)c)->updatee));
111 case ATOMICALLY_FRAME:
112 case CATCH_RETRY_FRAME:
113 case CATCH_STM_FRAME:
114 case CATCH_FRAME:
115 // small bitmap cases (<= 32 entries)
116 case UNDERFLOW_FRAME:
117 case STOP_FRAME:
118 case RET_SMALL:
119 size = BITMAP_SIZE(info->i.layout.bitmap);
120 checkSmallBitmap((StgPtr)c + 1,
121 BITMAP_BITS(info->i.layout.bitmap), size);
122 return 1 + size;
123
124 case RET_BCO: {
125 StgBCO *bco;
126 nat size;
127 bco = (StgBCO *)*(c+1);
128 size = BCO_BITMAP_SIZE(bco);
129 checkLargeBitmap((StgPtr)c + 2, BCO_BITMAP(bco), size);
130 return 2 + size;
131 }
132
133 case RET_BIG: // large bitmap (> 32 entries)
134 size = GET_LARGE_BITMAP(&info->i)->size;
135 checkLargeBitmap((StgPtr)c + 1, GET_LARGE_BITMAP(&info->i), size);
136 return 1 + size;
137
138 case RET_FUN:
139 {
140 StgFunInfoTable *fun_info;
141 StgRetFun *ret_fun;
142
143 ret_fun = (StgRetFun *)c;
144 fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun));
145 size = ret_fun->size;
146 switch (fun_info->f.fun_type) {
147 case ARG_GEN:
148 checkSmallBitmap((StgPtr)ret_fun->payload,
149 BITMAP_BITS(fun_info->f.b.bitmap), size);
150 break;
151 case ARG_GEN_BIG:
152 checkLargeBitmap((StgPtr)ret_fun->payload,
153 GET_FUN_LARGE_BITMAP(fun_info), size);
154 break;
155 default:
156 checkSmallBitmap((StgPtr)ret_fun->payload,
157 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
158 size);
159 break;
160 }
161 return sizeofW(StgRetFun) + size;
162 }
163
164 default:
165 barf("checkStackFrame: weird activation record found on stack (%p %d).",c,info->i.type);
166 }
167 }
168
169 // check sections of stack between update frames
170 void
171 checkStackChunk( StgPtr sp, StgPtr stack_end )
172 {
173 StgPtr p;
174
175 p = sp;
176 while (p < stack_end) {
177 p += checkStackFrame( p );
178 }
179 // ASSERT( p == stack_end ); -- HWL
180 }
181
182 static void
183 checkPAP (StgClosure *tagged_fun, StgClosure** payload, StgWord n_args)
184 {
185 StgClosure *fun;
186 StgFunInfoTable *fun_info;
187
188 fun = UNTAG_CLOSURE(tagged_fun);
189 ASSERT(LOOKS_LIKE_CLOSURE_PTR(fun));
190 fun_info = get_fun_itbl(fun);
191
192 switch (fun_info->f.fun_type) {
193 case ARG_GEN:
194 checkSmallBitmap( (StgPtr)payload,
195 BITMAP_BITS(fun_info->f.b.bitmap), n_args );
196 break;
197 case ARG_GEN_BIG:
198 checkLargeBitmap( (StgPtr)payload,
199 GET_FUN_LARGE_BITMAP(fun_info),
200 n_args );
201 break;
202 case ARG_BCO:
203 checkLargeBitmap( (StgPtr)payload,
204 BCO_BITMAP(fun),
205 n_args );
206 break;
207 default:
208 checkSmallBitmap( (StgPtr)payload,
209 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
210 n_args );
211 break;
212 }
213
214 ASSERT(fun_info->f.arity > TAG_MASK ? GET_CLOSURE_TAG(tagged_fun) == 0
215 : GET_CLOSURE_TAG(tagged_fun) == fun_info->f.arity);
216 }
217
218
219 StgOffset
220 checkClosure( StgClosure* p )
221 {
222 const StgInfoTable *info;
223
224 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
225
226 p = UNTAG_CLOSURE(p);
227 /* Is it a static closure (i.e. in the data segment)? */
228 if (!HEAP_ALLOCED(p)) {
229 ASSERT(closure_STATIC(p));
230 } else {
231 ASSERT(!closure_STATIC(p));
232 }
233
234 info = p->header.info;
235
236 if (IS_FORWARDING_PTR(info)) {
237 barf("checkClosure: found EVACUATED closure %d", info->type);
238 }
239 info = INFO_PTR_TO_STRUCT(info);
240
241 switch (info->type) {
242
243 case MVAR_CLEAN:
244 case MVAR_DIRTY:
245 {
246 StgMVar *mvar = (StgMVar *)p;
247 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->head));
248 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->tail));
249 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->value));
250 return sizeofW(StgMVar);
251 }
252
253 case THUNK:
254 case THUNK_1_0:
255 case THUNK_0_1:
256 case THUNK_1_1:
257 case THUNK_0_2:
258 case THUNK_2_0:
259 {
260 nat i;
261 for (i = 0; i < info->layout.payload.ptrs; i++) {
262 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgThunk *)p)->payload[i]));
263 }
264 return thunk_sizeW_fromITBL(info);
265 }
266
267 case FUN:
268 case FUN_1_0:
269 case FUN_0_1:
270 case FUN_1_1:
271 case FUN_0_2:
272 case FUN_2_0:
273 case CONSTR:
274 case CONSTR_1_0:
275 case CONSTR_0_1:
276 case CONSTR_1_1:
277 case CONSTR_0_2:
278 case CONSTR_2_0:
279 case IND_PERM:
280 case BLACKHOLE:
281 case PRIM:
282 case MUT_PRIM:
283 case MUT_VAR_CLEAN:
284 case MUT_VAR_DIRTY:
285 case TVAR:
286 case CONSTR_STATIC:
287 case CONSTR_NOCAF_STATIC:
288 case THUNK_STATIC:
289 case FUN_STATIC:
290 {
291 nat i;
292 for (i = 0; i < info->layout.payload.ptrs; i++) {
293 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p->payload[i]));
294 }
295 return sizeW_fromITBL(info);
296 }
297
298 case BLOCKING_QUEUE:
299 {
300 StgBlockingQueue *bq = (StgBlockingQueue *)p;
301
302 // NO: the BH might have been updated now
303 // ASSERT(get_itbl(bq->bh)->type == BLACKHOLE);
304 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bq->bh));
305
306 ASSERT(get_itbl((StgClosure *)(bq->owner))->type == TSO);
307 ASSERT(bq->queue == (MessageBlackHole*)END_TSO_QUEUE
308 || bq->queue->header.info == &stg_MSG_BLACKHOLE_info);
309 ASSERT(bq->link == (StgBlockingQueue*)END_TSO_QUEUE ||
310 get_itbl((StgClosure *)(bq->link))->type == IND ||
311 get_itbl((StgClosure *)(bq->link))->type == BLOCKING_QUEUE);
312
313 return sizeofW(StgBlockingQueue);
314 }
315
316 case BCO: {
317 StgBCO *bco = (StgBCO *)p;
318 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->instrs));
319 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->literals));
320 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->ptrs));
321 return bco_sizeW(bco);
322 }
323
324 case IND_STATIC: /* (1, 0) closure */
325 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgIndStatic*)p)->indirectee));
326 return sizeW_fromITBL(info);
327
328 case WEAK:
329 /* deal with these specially - the info table isn't
330 * representative of the actual layout.
331 */
332 { StgWeak *w = (StgWeak *)p;
333 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->key));
334 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->value));
335 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->finalizer));
336 if (w->link) {
337 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->link));
338 }
339 return sizeW_fromITBL(info);
340 }
341
342 case THUNK_SELECTOR:
343 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgSelector *)p)->selectee));
344 return THUNK_SELECTOR_sizeW();
345
346 case IND:
347 {
348 /* we don't expect to see any of these after GC
349 * but they might appear during execution
350 */
351 StgInd *ind = (StgInd *)p;
352 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ind->indirectee));
353 return sizeofW(StgInd);
354 }
355
356 case RET_BCO:
357 case RET_SMALL:
358 case RET_BIG:
359 case UPDATE_FRAME:
360 case UNDERFLOW_FRAME:
361 case STOP_FRAME:
362 case CATCH_FRAME:
363 case ATOMICALLY_FRAME:
364 case CATCH_RETRY_FRAME:
365 case CATCH_STM_FRAME:
366 barf("checkClosure: stack frame");
367
368 case AP:
369 {
370 StgAP* ap = (StgAP *)p;
371 checkPAP (ap->fun, ap->payload, ap->n_args);
372 return ap_sizeW(ap);
373 }
374
375 case PAP:
376 {
377 StgPAP* pap = (StgPAP *)p;
378 checkPAP (pap->fun, pap->payload, pap->n_args);
379 return pap_sizeW(pap);
380 }
381
382 case AP_STACK:
383 {
384 StgAP_STACK *ap = (StgAP_STACK *)p;
385 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ap->fun));
386 checkStackChunk((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
387 return ap_stack_sizeW(ap);
388 }
389
390 case ARR_WORDS:
391 return arr_words_sizeW((StgArrWords *)p);
392
393 case MUT_ARR_PTRS_CLEAN:
394 case MUT_ARR_PTRS_DIRTY:
395 case MUT_ARR_PTRS_FROZEN:
396 case MUT_ARR_PTRS_FROZEN0:
397 {
398 StgMutArrPtrs* a = (StgMutArrPtrs *)p;
399 nat i;
400 for (i = 0; i < a->ptrs; i++) {
401 ASSERT(LOOKS_LIKE_CLOSURE_PTR(a->payload[i]));
402 }
403 return mut_arr_ptrs_sizeW(a);
404 }
405
406 case TSO:
407 checkTSO((StgTSO *)p);
408 return sizeofW(StgTSO);
409
410 case STACK:
411 checkSTACK((StgStack*)p);
412 return stack_sizeW((StgStack*)p);
413
414 case TREC_CHUNK:
415 {
416 nat i;
417 StgTRecChunk *tc = (StgTRecChunk *)p;
418 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->prev_chunk));
419 for (i = 0; i < tc -> next_entry_idx; i ++) {
420 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].tvar));
421 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].expected_value));
422 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].new_value));
423 }
424 return sizeofW(StgTRecChunk);
425 }
426
427 default:
428 barf("checkClosure (closure type %d)", info->type);
429 }
430 }
431
432
433 /* -----------------------------------------------------------------------------
434 Check Heap Sanity
435
436 After garbage collection, the live heap is in a state where we can
437 run through and check that all the pointers point to the right
438 place. This function starts at a given position and sanity-checks
439 all the objects in the remainder of the chain.
440 -------------------------------------------------------------------------- */
441
442 void checkHeapChain (bdescr *bd)
443 {
444 StgPtr p;
445
446 for (; bd != NULL; bd = bd->link) {
447 if(!(bd->flags & BF_SWEPT)) {
448 p = bd->start;
449 while (p < bd->free) {
450 nat size = checkClosure((StgClosure *)p);
451 /* This is the smallest size of closure that can live in the heap */
452 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
453 p += size;
454
455 /* skip over slop */
456 while (p < bd->free &&
457 (*p < 0x1000 || !LOOKS_LIKE_INFO_PTR(*p))) { p++; }
458 }
459 }
460 }
461 }
462
463 void
464 checkHeapChunk(StgPtr start, StgPtr end)
465 {
466 StgPtr p;
467 nat size;
468
469 for (p=start; p<end; p+=size) {
470 ASSERT(LOOKS_LIKE_INFO_PTR(*p));
471 size = checkClosure((StgClosure *)p);
472 /* This is the smallest size of closure that can live in the heap. */
473 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
474 }
475 }
476
477 void
478 checkLargeObjects(bdescr *bd)
479 {
480 while (bd != NULL) {
481 if (!(bd->flags & BF_PINNED)) {
482 checkClosure((StgClosure *)bd->start);
483 }
484 bd = bd->link;
485 }
486 }
487
488 static void
489 checkSTACK (StgStack *stack)
490 {
491 StgPtr sp = stack->sp;
492 StgOffset stack_size = stack->stack_size;
493 StgPtr stack_end = stack->stack + stack_size;
494
495 ASSERT(stack->stack <= sp && sp <= stack_end);
496
497 checkStackChunk(sp, stack_end);
498 }
499
500 void
501 checkTSO(StgTSO *tso)
502 {
503 StgTSO *next;
504 const StgInfoTable *info;
505
506 if (tso->what_next == ThreadKilled) {
507 /* The garbage collector doesn't bother following any pointers
508 * from dead threads, so don't check sanity here.
509 */
510 return;
511 }
512
513 next = tso->_link;
514 info = (const StgInfoTable*) tso->_link->header.info;
515
516 ASSERT(next == END_TSO_QUEUE ||
517 info == &stg_MVAR_TSO_QUEUE_info ||
518 info == &stg_TSO_info ||
519 info == &stg_WHITEHOLE_info); // happens due to STM doing lockTSO()
520
521 if ( tso->why_blocked == BlockedOnMVar
522 || tso->why_blocked == BlockedOnMVarRead
523 || tso->why_blocked == BlockedOnBlackHole
524 || tso->why_blocked == BlockedOnMsgThrowTo
525 || tso->why_blocked == NotBlocked
526 ) {
527 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->block_info.closure));
528 }
529
530 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->bq));
531 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->blocked_exceptions));
532 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->stackobj));
533
534 // XXX are we checking the stack twice?
535 checkSTACK(tso->stackobj);
536 }
537
538 /*
539 Check that all TSOs have been evacuated.
540 Optionally also check the sanity of the TSOs.
541 */
542 void
543 checkGlobalTSOList (rtsBool checkTSOs)
544 {
545 StgTSO *tso;
546 nat g;
547
548 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
549 for (tso=generations[g].threads; tso != END_TSO_QUEUE;
550 tso = tso->global_link) {
551 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso));
552 ASSERT(get_itbl((StgClosure *)tso)->type == TSO);
553 if (checkTSOs)
554 checkTSO(tso);
555
556 // If this TSO is dirty and in an old generation, it better
557 // be on the mutable list.
558 if (tso->dirty) {
559 ASSERT(Bdescr((P_)tso)->gen_no == 0 || (tso->flags & TSO_MARKED));
560 tso->flags &= ~TSO_MARKED;
561 }
562
563 {
564 StgStack *stack;
565 StgUnderflowFrame *frame;
566
567 stack = tso->stackobj;
568 while (1) {
569 if (stack->dirty & 1) {
570 ASSERT(Bdescr((P_)stack)->gen_no == 0 || (stack->dirty & TSO_MARKED));
571 stack->dirty &= ~TSO_MARKED;
572 }
573 frame = (StgUnderflowFrame*) (stack->stack + stack->stack_size
574 - sizeofW(StgUnderflowFrame));
575 if (frame->info != &stg_stack_underflow_frame_info
576 || frame->next_chunk == (StgStack*)END_TSO_QUEUE) break;
577 stack = frame->next_chunk;
578 }
579 }
580 }
581 }
582 }
583
584 /* -----------------------------------------------------------------------------
585 Check mutable list sanity.
586 -------------------------------------------------------------------------- */
587
588 static void
589 checkMutableList( bdescr *mut_bd, nat gen )
590 {
591 bdescr *bd;
592 StgPtr q;
593 StgClosure *p;
594
595 for (bd = mut_bd; bd != NULL; bd = bd->link) {
596 for (q = bd->start; q < bd->free; q++) {
597 p = (StgClosure *)*q;
598 ASSERT(!HEAP_ALLOCED(p) || Bdescr((P_)p)->gen_no == gen);
599 checkClosure(p);
600
601 switch (get_itbl(p)->type) {
602 case TSO:
603 ((StgTSO *)p)->flags |= TSO_MARKED;
604 break;
605 case STACK:
606 ((StgStack *)p)->dirty |= TSO_MARKED;
607 break;
608 }
609 }
610 }
611 }
612
613 static void
614 checkLocalMutableLists (nat cap_no)
615 {
616 nat g;
617 for (g = 1; g < RtsFlags.GcFlags.generations; g++) {
618 checkMutableList(capabilities[cap_no]->mut_lists[g], g);
619 }
620 }
621
622 static void
623 checkMutableLists (void)
624 {
625 nat i;
626 for (i = 0; i < n_capabilities; i++) {
627 checkLocalMutableLists(i);
628 }
629 }
630
631 /*
632 Check the static objects list.
633 */
634 void
635 checkStaticObjects ( StgClosure* static_objects )
636 {
637 StgClosure *p = static_objects;
638 StgInfoTable *info;
639
640 while (p != END_OF_STATIC_OBJECT_LIST) {
641 p = UNTAG_STATIC_LIST_PTR(p);
642 checkClosure(p);
643 info = get_itbl(p);
644 switch (info->type) {
645 case IND_STATIC:
646 {
647 StgClosure *indirectee = UNTAG_CLOSURE(((StgIndStatic *)p)->indirectee);
648
649 ASSERT(LOOKS_LIKE_CLOSURE_PTR(indirectee));
650 ASSERT(LOOKS_LIKE_INFO_PTR((StgWord)indirectee->header.info));
651 p = *IND_STATIC_LINK((StgClosure *)p);
652 break;
653 }
654
655 case THUNK_STATIC:
656 p = *THUNK_STATIC_LINK((StgClosure *)p);
657 break;
658
659 case FUN_STATIC:
660 p = *FUN_STATIC_LINK((StgClosure *)p);
661 break;
662
663 case CONSTR_STATIC:
664 p = *STATIC_LINK(info,(StgClosure *)p);
665 break;
666
667 default:
668 barf("checkStaticObjetcs: strange closure %p (%s)",
669 p, info_type(p));
670 }
671 }
672 }
673
674 /* Nursery sanity check */
675 void
676 checkNurserySanity (nursery *nursery)
677 {
678 bdescr *bd, *prev;
679 nat blocks = 0;
680
681 prev = NULL;
682 for (bd = nursery->blocks; bd != NULL; bd = bd->link) {
683 ASSERT(bd->gen == g0);
684 ASSERT(bd->u.back == prev);
685 prev = bd;
686 blocks += bd->blocks;
687 }
688
689 ASSERT(blocks == nursery->n_blocks);
690 }
691
692 static void checkGeneration (generation *gen,
693 rtsBool after_major_gc USED_IF_THREADS)
694 {
695 nat n;
696 gen_workspace *ws;
697
698 ASSERT(countBlocks(gen->blocks) == gen->n_blocks);
699 ASSERT(countBlocks(gen->large_objects) == gen->n_large_blocks);
700
701 #if defined(THREADED_RTS)
702 // heap sanity checking doesn't work with SMP, because we can't
703 // zero the slop (see Updates.h). However, we can sanity-check
704 // the heap after a major gc, because there is no slop.
705 if (!after_major_gc) return;
706 #endif
707
708 checkHeapChain(gen->blocks);
709
710 for (n = 0; n < n_capabilities; n++) {
711 ws = &gc_threads[n]->gens[gen->no];
712 checkHeapChain(ws->todo_bd);
713 checkHeapChain(ws->part_list);
714 checkHeapChain(ws->scavd_list);
715 }
716
717 checkLargeObjects(gen->large_objects);
718 }
719
720 /* Full heap sanity check. */
721 static void checkFullHeap (rtsBool after_major_gc)
722 {
723 nat g, n;
724
725 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
726 checkGeneration(&generations[g], after_major_gc);
727 }
728 for (n = 0; n < n_capabilities; n++) {
729 checkNurserySanity(&nurseries[n]);
730 }
731 }
732
733 void checkSanity (rtsBool after_gc, rtsBool major_gc)
734 {
735 checkFullHeap(after_gc && major_gc);
736
737 checkFreeListSanity();
738
739 // always check the stacks in threaded mode, because checkHeap()
740 // does nothing in this case.
741 if (after_gc) {
742 checkMutableLists();
743 checkGlobalTSOList(rtsTrue);
744 }
745 }
746
747 // If memInventory() calculates that we have a memory leak, this
748 // function will try to find the block(s) that are leaking by marking
749 // all the ones that we know about, and search through memory to find
750 // blocks that are not marked. In the debugger this can help to give
751 // us a clue about what kind of block leaked. In the future we might
752 // annotate blocks with their allocation site to give more helpful
753 // info.
754 static void
755 findMemoryLeak (void)
756 {
757 nat g, i;
758 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
759 for (i = 0; i < n_capabilities; i++) {
760 markBlocks(capabilities[i]->mut_lists[g]);
761 markBlocks(gc_threads[i]->gens[g].part_list);
762 markBlocks(gc_threads[i]->gens[g].scavd_list);
763 markBlocks(gc_threads[i]->gens[g].todo_bd);
764 }
765 markBlocks(generations[g].blocks);
766 markBlocks(generations[g].large_objects);
767 }
768
769 for (i = 0; i < n_nurseries; i++) {
770 markBlocks(nurseries[i].blocks);
771 }
772
773 for (i = 0; i < n_capabilities; i++) {
774 markBlocks(capabilities[i]->pinned_object_block);
775 }
776
777 #ifdef PROFILING
778 // TODO:
779 // if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER) {
780 // markRetainerBlocks();
781 // }
782 #endif
783
784 // count the blocks allocated by the arena allocator
785 // TODO:
786 // markArenaBlocks();
787
788 // count the blocks containing executable memory
789 markBlocks(exec_block);
790
791 reportUnmarkedBlocks();
792 }
793
794 void
795 checkRunQueue(Capability *cap)
796 {
797 StgTSO *prev, *tso;
798 prev = END_TSO_QUEUE;
799 for (tso = cap->run_queue_hd; tso != END_TSO_QUEUE;
800 prev = tso, tso = tso->_link) {
801 ASSERT(prev == END_TSO_QUEUE || prev->_link == tso);
802 ASSERT(tso->block_info.prev == prev);
803 }
804 ASSERT(cap->run_queue_tl == prev);
805 }
806
807 /* -----------------------------------------------------------------------------
808 Memory leak detection
809
810 memInventory() checks for memory leaks by counting up all the
811 blocks we know about and comparing that to the number of blocks
812 allegedly floating around in the system.
813 -------------------------------------------------------------------------- */
814
815 // Useful for finding partially full blocks in gdb
816 void findSlop(bdescr *bd);
817 void findSlop(bdescr *bd)
818 {
819 W_ slop;
820
821 for (; bd != NULL; bd = bd->link) {
822 slop = (bd->blocks * BLOCK_SIZE_W) - (bd->free - bd->start);
823 if (slop > (1024/sizeof(W_))) {
824 debugBelch("block at %p (bdescr %p) has %" FMT_SizeT "KB slop\n",
825 bd->start, bd, slop / (1024/sizeof(W_)));
826 }
827 }
828 }
829
830 static W_
831 genBlocks (generation *gen)
832 {
833 ASSERT(countBlocks(gen->blocks) == gen->n_blocks);
834 ASSERT(countBlocks(gen->large_objects) == gen->n_large_blocks);
835 return gen->n_blocks + gen->n_old_blocks +
836 countAllocdBlocks(gen->large_objects);
837 }
838
839 void
840 memInventory (rtsBool show)
841 {
842 nat g, i;
843 W_ gen_blocks[RtsFlags.GcFlags.generations];
844 W_ nursery_blocks, retainer_blocks,
845 arena_blocks, exec_blocks;
846 W_ live_blocks = 0, free_blocks = 0;
847 rtsBool leak;
848
849 // count the blocks we current have
850
851 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
852 gen_blocks[g] = 0;
853 for (i = 0; i < n_capabilities; i++) {
854 gen_blocks[g] += countBlocks(capabilities[i]->mut_lists[g]);
855 gen_blocks[g] += countBlocks(gc_threads[i]->gens[g].part_list);
856 gen_blocks[g] += countBlocks(gc_threads[i]->gens[g].scavd_list);
857 gen_blocks[g] += countBlocks(gc_threads[i]->gens[g].todo_bd);
858 }
859 gen_blocks[g] += genBlocks(&generations[g]);
860 }
861
862 nursery_blocks = 0;
863 for (i = 0; i < n_nurseries; i++) {
864 ASSERT(countBlocks(nurseries[i].blocks) == nurseries[i].n_blocks);
865 nursery_blocks += nurseries[i].n_blocks;
866 }
867 for (i = 0; i < n_capabilities; i++) {
868 if (capabilities[i]->pinned_object_block != NULL) {
869 nursery_blocks += capabilities[i]->pinned_object_block->blocks;
870 }
871 nursery_blocks += countBlocks(capabilities[i]->pinned_object_blocks);
872 }
873
874 retainer_blocks = 0;
875 #ifdef PROFILING
876 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER) {
877 retainer_blocks = retainerStackBlocks();
878 }
879 #endif
880
881 // count the blocks allocated by the arena allocator
882 arena_blocks = arenaBlocks();
883
884 // count the blocks containing executable memory
885 exec_blocks = countAllocdBlocks(exec_block);
886
887 /* count the blocks on the free list */
888 free_blocks = countFreeList();
889
890 live_blocks = 0;
891 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
892 live_blocks += gen_blocks[g];
893 }
894 live_blocks += nursery_blocks +
895 + retainer_blocks + arena_blocks + exec_blocks;
896
897 #define MB(n) (((double)(n) * BLOCK_SIZE_W) / ((1024*1024)/sizeof(W_)))
898
899 leak = live_blocks + free_blocks != mblocks_allocated * BLOCKS_PER_MBLOCK;
900
901 if (show || leak)
902 {
903 if (leak) {
904 debugBelch("Memory leak detected:\n");
905 } else {
906 debugBelch("Memory inventory:\n");
907 }
908 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
909 debugBelch(" gen %d blocks : %5" FMT_Word " blocks (%6.1lf MB)\n", g,
910 gen_blocks[g], MB(gen_blocks[g]));
911 }
912 debugBelch(" nursery : %5" FMT_Word " blocks (%6.1lf MB)\n",
913 nursery_blocks, MB(nursery_blocks));
914 debugBelch(" retainer : %5" FMT_Word " blocks (%6.1lf MB)\n",
915 retainer_blocks, MB(retainer_blocks));
916 debugBelch(" arena blocks : %5" FMT_Word " blocks (%6.1lf MB)\n",
917 arena_blocks, MB(arena_blocks));
918 debugBelch(" exec : %5" FMT_Word " blocks (%6.1lf MB)\n",
919 exec_blocks, MB(exec_blocks));
920 debugBelch(" free : %5" FMT_Word " blocks (%6.1lf MB)\n",
921 free_blocks, MB(free_blocks));
922 debugBelch(" total : %5" FMT_Word " blocks (%6.1lf MB)\n",
923 live_blocks + free_blocks, MB(live_blocks+free_blocks));
924 if (leak) {
925 debugBelch("\n in system : %5" FMT_Word " blocks (%" FMT_Word " MB)\n",
926 (W_)(mblocks_allocated * BLOCKS_PER_MBLOCK), mblocks_allocated);
927 }
928 }
929
930 if (leak) {
931 debugBelch("\n");
932 findMemoryLeak();
933 }
934 ASSERT(n_alloc_blocks == live_blocks);
935 ASSERT(!leak);
936 }
937
938
939 #endif /* DEBUG */