Add a write barrier for TVAR closures
[ghc.git] / rts / sm / Sanity.c
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 1998-2006
4 *
5 * Sanity checking code for the heap and stack.
6 *
7 * Used when debugging: check that everything reasonable.
8 *
9 * - All things that are supposed to be pointers look like pointers.
10 *
11 * - Objects in text space are marked as static closures, those
12 * in the heap are dynamic.
13 *
14 * ---------------------------------------------------------------------------*/
15
16 #include "PosixSource.h"
17 #include "Rts.h"
18
19 #ifdef DEBUG /* whole file */
20
21 #include "RtsUtils.h"
22 #include "sm/Storage.h"
23 #include "sm/BlockAlloc.h"
24 #include "GCThread.h"
25 #include "Sanity.h"
26 #include "Schedule.h"
27 #include "Apply.h"
28 #include "Printer.h"
29 #include "Arena.h"
30 #include "RetainerProfile.h"
31
32 /* -----------------------------------------------------------------------------
33 Forward decls.
34 -------------------------------------------------------------------------- */
35
36 static void checkSmallBitmap ( StgPtr payload, StgWord bitmap, nat );
37 static void checkLargeBitmap ( StgPtr payload, StgLargeBitmap*, nat );
38 static void checkClosureShallow ( StgClosure * );
39 static void checkSTACK (StgStack *stack);
40
41 /* -----------------------------------------------------------------------------
42 Check stack sanity
43 -------------------------------------------------------------------------- */
44
45 static void
46 checkSmallBitmap( StgPtr payload, StgWord bitmap, nat size )
47 {
48 nat i;
49
50 for(i = 0; i < size; i++, bitmap >>= 1 ) {
51 if ((bitmap & 1) == 0) {
52 checkClosureShallow((StgClosure *)payload[i]);
53 }
54 }
55 }
56
57 static void
58 checkLargeBitmap( StgPtr payload, StgLargeBitmap* large_bitmap, nat size )
59 {
60 StgWord bmp;
61 nat i, j;
62
63 i = 0;
64 for (bmp=0; i < size; bmp++) {
65 StgWord bitmap = large_bitmap->bitmap[bmp];
66 j = 0;
67 for(; i < size && j < BITS_IN(W_); j++, i++, bitmap >>= 1 ) {
68 if ((bitmap & 1) == 0) {
69 checkClosureShallow((StgClosure *)payload[i]);
70 }
71 }
72 }
73 }
74
75 /*
76 * check that it looks like a valid closure - without checking its payload
77 * used to avoid recursion between checking PAPs and checking stack
78 * chunks.
79 */
80
81 static void
82 checkClosureShallow( StgClosure* p )
83 {
84 StgClosure *q;
85
86 q = UNTAG_CLOSURE(p);
87 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q));
88
89 /* Is it a static closure? */
90 if (!HEAP_ALLOCED(q)) {
91 ASSERT(closure_STATIC(q));
92 } else {
93 ASSERT(!closure_STATIC(q));
94 }
95 }
96
97 // check an individual stack object
98 StgOffset
99 checkStackFrame( StgPtr c )
100 {
101 nat size;
102 const StgRetInfoTable* info;
103
104 info = get_ret_itbl((StgClosure *)c);
105
106 /* All activation records have 'bitmap' style layout info. */
107 switch (info->i.type) {
108
109 case UPDATE_FRAME:
110 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgUpdateFrame*)c)->updatee));
111 case ATOMICALLY_FRAME:
112 case CATCH_RETRY_FRAME:
113 case CATCH_STM_FRAME:
114 case CATCH_FRAME:
115 // small bitmap cases (<= 32 entries)
116 case UNDERFLOW_FRAME:
117 case STOP_FRAME:
118 case RET_SMALL:
119 size = BITMAP_SIZE(info->i.layout.bitmap);
120 checkSmallBitmap((StgPtr)c + 1,
121 BITMAP_BITS(info->i.layout.bitmap), size);
122 return 1 + size;
123
124 case RET_BCO: {
125 StgBCO *bco;
126 nat size;
127 bco = (StgBCO *)*(c+1);
128 size = BCO_BITMAP_SIZE(bco);
129 checkLargeBitmap((StgPtr)c + 2, BCO_BITMAP(bco), size);
130 return 2 + size;
131 }
132
133 case RET_BIG: // large bitmap (> 32 entries)
134 size = GET_LARGE_BITMAP(&info->i)->size;
135 checkLargeBitmap((StgPtr)c + 1, GET_LARGE_BITMAP(&info->i), size);
136 return 1 + size;
137
138 case RET_FUN:
139 {
140 StgFunInfoTable *fun_info;
141 StgRetFun *ret_fun;
142
143 ret_fun = (StgRetFun *)c;
144 fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun));
145 size = ret_fun->size;
146 switch (fun_info->f.fun_type) {
147 case ARG_GEN:
148 checkSmallBitmap((StgPtr)ret_fun->payload,
149 BITMAP_BITS(fun_info->f.b.bitmap), size);
150 break;
151 case ARG_GEN_BIG:
152 checkLargeBitmap((StgPtr)ret_fun->payload,
153 GET_FUN_LARGE_BITMAP(fun_info), size);
154 break;
155 default:
156 checkSmallBitmap((StgPtr)ret_fun->payload,
157 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
158 size);
159 break;
160 }
161 return sizeofW(StgRetFun) + size;
162 }
163
164 default:
165 barf("checkStackFrame: weird activation record found on stack (%p %d).",c,info->i.type);
166 }
167 }
168
169 // check sections of stack between update frames
170 void
171 checkStackChunk( StgPtr sp, StgPtr stack_end )
172 {
173 StgPtr p;
174
175 p = sp;
176 while (p < stack_end) {
177 p += checkStackFrame( p );
178 }
179 // ASSERT( p == stack_end ); -- HWL
180 }
181
182 static void
183 checkPAP (StgClosure *tagged_fun, StgClosure** payload, StgWord n_args)
184 {
185 StgClosure *fun;
186 StgFunInfoTable *fun_info;
187
188 fun = UNTAG_CLOSURE(tagged_fun);
189 ASSERT(LOOKS_LIKE_CLOSURE_PTR(fun));
190 fun_info = get_fun_itbl(fun);
191
192 switch (fun_info->f.fun_type) {
193 case ARG_GEN:
194 checkSmallBitmap( (StgPtr)payload,
195 BITMAP_BITS(fun_info->f.b.bitmap), n_args );
196 break;
197 case ARG_GEN_BIG:
198 checkLargeBitmap( (StgPtr)payload,
199 GET_FUN_LARGE_BITMAP(fun_info),
200 n_args );
201 break;
202 case ARG_BCO:
203 checkLargeBitmap( (StgPtr)payload,
204 BCO_BITMAP(fun),
205 n_args );
206 break;
207 default:
208 checkSmallBitmap( (StgPtr)payload,
209 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
210 n_args );
211 break;
212 }
213
214 ASSERT(fun_info->f.arity > TAG_MASK ? GET_CLOSURE_TAG(tagged_fun) == 0
215 : GET_CLOSURE_TAG(tagged_fun) == fun_info->f.arity);
216 }
217
218
219 StgOffset
220 checkClosure( StgClosure* p )
221 {
222 const StgInfoTable *info;
223
224 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
225
226 p = UNTAG_CLOSURE(p);
227 /* Is it a static closure (i.e. in the data segment)? */
228 if (!HEAP_ALLOCED(p)) {
229 ASSERT(closure_STATIC(p));
230 } else {
231 ASSERT(!closure_STATIC(p));
232 }
233
234 info = p->header.info;
235
236 if (IS_FORWARDING_PTR(info)) {
237 barf("checkClosure: found EVACUATED closure %d", info->type);
238 }
239 info = INFO_PTR_TO_STRUCT(info);
240
241 switch (info->type) {
242
243 case MVAR_CLEAN:
244 case MVAR_DIRTY:
245 {
246 StgMVar *mvar = (StgMVar *)p;
247 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->head));
248 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->tail));
249 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->value));
250 return sizeofW(StgMVar);
251 }
252
253 case THUNK:
254 case THUNK_1_0:
255 case THUNK_0_1:
256 case THUNK_1_1:
257 case THUNK_0_2:
258 case THUNK_2_0:
259 {
260 nat i;
261 for (i = 0; i < info->layout.payload.ptrs; i++) {
262 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgThunk *)p)->payload[i]));
263 }
264 return thunk_sizeW_fromITBL(info);
265 }
266
267 case FUN:
268 case FUN_1_0:
269 case FUN_0_1:
270 case FUN_1_1:
271 case FUN_0_2:
272 case FUN_2_0:
273 case CONSTR:
274 case CONSTR_1_0:
275 case CONSTR_0_1:
276 case CONSTR_1_1:
277 case CONSTR_0_2:
278 case CONSTR_2_0:
279 case IND_PERM:
280 case BLACKHOLE:
281 case PRIM:
282 case MUT_PRIM:
283 case MUT_VAR_CLEAN:
284 case MUT_VAR_DIRTY:
285 case TVAR:
286 case CONSTR_STATIC:
287 case CONSTR_NOCAF_STATIC:
288 case THUNK_STATIC:
289 case FUN_STATIC:
290 {
291 nat i;
292 for (i = 0; i < info->layout.payload.ptrs; i++) {
293 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p->payload[i]));
294 }
295 return sizeW_fromITBL(info);
296 }
297
298 case BLOCKING_QUEUE:
299 {
300 StgBlockingQueue *bq = (StgBlockingQueue *)p;
301
302 // NO: the BH might have been updated now
303 // ASSERT(get_itbl(bq->bh)->type == BLACKHOLE);
304 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bq->bh));
305
306 ASSERT(get_itbl((StgClosure *)(bq->owner))->type == TSO);
307 ASSERT(bq->queue == (MessageBlackHole*)END_TSO_QUEUE
308 || bq->queue->header.info == &stg_MSG_BLACKHOLE_info);
309 ASSERT(bq->link == (StgBlockingQueue*)END_TSO_QUEUE ||
310 get_itbl((StgClosure *)(bq->link))->type == IND ||
311 get_itbl((StgClosure *)(bq->link))->type == BLOCKING_QUEUE);
312
313 return sizeofW(StgBlockingQueue);
314 }
315
316 case BCO: {
317 StgBCO *bco = (StgBCO *)p;
318 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->instrs));
319 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->literals));
320 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->ptrs));
321 return bco_sizeW(bco);
322 }
323
324 case IND_STATIC: /* (1, 0) closure */
325 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgIndStatic*)p)->indirectee));
326 return sizeW_fromITBL(info);
327
328 case WEAK:
329 /* deal with these specially - the info table isn't
330 * representative of the actual layout.
331 */
332 { StgWeak *w = (StgWeak *)p;
333 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->key));
334 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->value));
335 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->finalizer));
336 if (w->link) {
337 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->link));
338 }
339 return sizeW_fromITBL(info);
340 }
341
342 case THUNK_SELECTOR:
343 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgSelector *)p)->selectee));
344 return THUNK_SELECTOR_sizeW();
345
346 case IND:
347 {
348 /* we don't expect to see any of these after GC
349 * but they might appear during execution
350 */
351 StgInd *ind = (StgInd *)p;
352 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ind->indirectee));
353 return sizeofW(StgInd);
354 }
355
356 case RET_BCO:
357 case RET_SMALL:
358 case RET_BIG:
359 case UPDATE_FRAME:
360 case UNDERFLOW_FRAME:
361 case STOP_FRAME:
362 case CATCH_FRAME:
363 case ATOMICALLY_FRAME:
364 case CATCH_RETRY_FRAME:
365 case CATCH_STM_FRAME:
366 barf("checkClosure: stack frame");
367
368 case AP:
369 {
370 StgAP* ap = (StgAP *)p;
371 checkPAP (ap->fun, ap->payload, ap->n_args);
372 return ap_sizeW(ap);
373 }
374
375 case PAP:
376 {
377 StgPAP* pap = (StgPAP *)p;
378 checkPAP (pap->fun, pap->payload, pap->n_args);
379 return pap_sizeW(pap);
380 }
381
382 case AP_STACK:
383 {
384 StgAP_STACK *ap = (StgAP_STACK *)p;
385 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ap->fun));
386 checkStackChunk((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
387 return ap_stack_sizeW(ap);
388 }
389
390 case ARR_WORDS:
391 return arr_words_sizeW((StgArrWords *)p);
392
393 case MUT_ARR_PTRS_CLEAN:
394 case MUT_ARR_PTRS_DIRTY:
395 case MUT_ARR_PTRS_FROZEN:
396 case MUT_ARR_PTRS_FROZEN0:
397 {
398 StgMutArrPtrs* a = (StgMutArrPtrs *)p;
399 nat i;
400 for (i = 0; i < a->ptrs; i++) {
401 ASSERT(LOOKS_LIKE_CLOSURE_PTR(a->payload[i]));
402 }
403 return mut_arr_ptrs_sizeW(a);
404 }
405
406 case TSO:
407 checkTSO((StgTSO *)p);
408 return sizeofW(StgTSO);
409
410 case STACK:
411 checkSTACK((StgStack*)p);
412 return stack_sizeW((StgStack*)p);
413
414 case TREC_CHUNK:
415 {
416 nat i;
417 StgTRecChunk *tc = (StgTRecChunk *)p;
418 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->prev_chunk));
419 for (i = 0; i < tc -> next_entry_idx; i ++) {
420 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].tvar));
421 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].expected_value));
422 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].new_value));
423 }
424 return sizeofW(StgTRecChunk);
425 }
426
427 default:
428 barf("checkClosure (closure type %d)", info->type);
429 }
430 }
431
432
433 /* -----------------------------------------------------------------------------
434 Check Heap Sanity
435
436 After garbage collection, the live heap is in a state where we can
437 run through and check that all the pointers point to the right
438 place. This function starts at a given position and sanity-checks
439 all the objects in the remainder of the chain.
440 -------------------------------------------------------------------------- */
441
442 void checkHeapChain (bdescr *bd)
443 {
444 StgPtr p;
445
446 for (; bd != NULL; bd = bd->link) {
447 if(!(bd->flags & BF_SWEPT)) {
448 p = bd->start;
449 while (p < bd->free) {
450 nat size = checkClosure((StgClosure *)p);
451 /* This is the smallest size of closure that can live in the heap */
452 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
453 p += size;
454
455 /* skip over slop */
456 while (p < bd->free &&
457 (*p < 0x1000 || !LOOKS_LIKE_INFO_PTR(*p))) { p++; }
458 }
459 }
460 }
461 }
462
463 void
464 checkHeapChunk(StgPtr start, StgPtr end)
465 {
466 StgPtr p;
467 nat size;
468
469 for (p=start; p<end; p+=size) {
470 ASSERT(LOOKS_LIKE_INFO_PTR(*p));
471 size = checkClosure((StgClosure *)p);
472 /* This is the smallest size of closure that can live in the heap. */
473 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
474 }
475 }
476
477 void
478 checkLargeObjects(bdescr *bd)
479 {
480 while (bd != NULL) {
481 if (!(bd->flags & BF_PINNED)) {
482 checkClosure((StgClosure *)bd->start);
483 }
484 bd = bd->link;
485 }
486 }
487
488 static void
489 checkSTACK (StgStack *stack)
490 {
491 StgPtr sp = stack->sp;
492 StgOffset stack_size = stack->stack_size;
493 StgPtr stack_end = stack->stack + stack_size;
494
495 ASSERT(stack->stack <= sp && sp <= stack_end);
496
497 checkStackChunk(sp, stack_end);
498 }
499
500 void
501 checkTSO(StgTSO *tso)
502 {
503 StgTSO *next;
504 const StgInfoTable *info;
505
506 if (tso->what_next == ThreadKilled) {
507 /* The garbage collector doesn't bother following any pointers
508 * from dead threads, so don't check sanity here.
509 */
510 return;
511 }
512
513 next = tso->_link;
514 info = (const StgInfoTable*) tso->_link->header.info;
515
516 ASSERT(next == END_TSO_QUEUE ||
517 info == &stg_MVAR_TSO_QUEUE_info ||
518 info == &stg_TSO_info ||
519 info == &stg_WHITEHOLE_info); // happens due to STM doing lockTSO()
520
521 if ( tso->why_blocked == BlockedOnMVar
522 || tso->why_blocked == BlockedOnBlackHole
523 || tso->why_blocked == BlockedOnMsgThrowTo
524 || tso->why_blocked == NotBlocked
525 ) {
526 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->block_info.closure));
527 }
528
529 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->bq));
530 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->blocked_exceptions));
531 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->stackobj));
532
533 // XXX are we checking the stack twice?
534 checkSTACK(tso->stackobj);
535 }
536
537 /*
538 Check that all TSOs have been evacuated.
539 Optionally also check the sanity of the TSOs.
540 */
541 void
542 checkGlobalTSOList (rtsBool checkTSOs)
543 {
544 StgTSO *tso;
545 nat g;
546
547 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
548 for (tso=generations[g].threads; tso != END_TSO_QUEUE;
549 tso = tso->global_link) {
550 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso));
551 ASSERT(get_itbl((StgClosure *)tso)->type == TSO);
552 if (checkTSOs)
553 checkTSO(tso);
554
555 // If this TSO is dirty and in an old generation, it better
556 // be on the mutable list.
557 if (tso->dirty) {
558 ASSERT(Bdescr((P_)tso)->gen_no == 0 || (tso->flags & TSO_MARKED));
559 tso->flags &= ~TSO_MARKED;
560 }
561
562 {
563 StgStack *stack;
564 StgUnderflowFrame *frame;
565
566 stack = tso->stackobj;
567 while (1) {
568 if (stack->dirty & 1) {
569 ASSERT(Bdescr((P_)stack)->gen_no == 0 || (stack->dirty & TSO_MARKED));
570 stack->dirty &= ~TSO_MARKED;
571 }
572 frame = (StgUnderflowFrame*) (stack->stack + stack->stack_size
573 - sizeofW(StgUnderflowFrame));
574 if (frame->info != &stg_stack_underflow_frame_info
575 || frame->next_chunk == (StgStack*)END_TSO_QUEUE) break;
576 stack = frame->next_chunk;
577 }
578 }
579 }
580 }
581 }
582
583 /* -----------------------------------------------------------------------------
584 Check mutable list sanity.
585 -------------------------------------------------------------------------- */
586
587 static void
588 checkMutableList( bdescr *mut_bd, nat gen )
589 {
590 bdescr *bd;
591 StgPtr q;
592 StgClosure *p;
593
594 for (bd = mut_bd; bd != NULL; bd = bd->link) {
595 for (q = bd->start; q < bd->free; q++) {
596 p = (StgClosure *)*q;
597 ASSERT(!HEAP_ALLOCED(p) || Bdescr((P_)p)->gen_no == gen);
598 checkClosure(p);
599
600 switch (get_itbl(p)->type) {
601 case TSO:
602 ((StgTSO *)p)->flags |= TSO_MARKED;
603 break;
604 case STACK:
605 ((StgStack *)p)->dirty |= TSO_MARKED;
606 break;
607 }
608 }
609 }
610 }
611
612 static void
613 checkLocalMutableLists (nat cap_no)
614 {
615 nat g;
616 for (g = 1; g < RtsFlags.GcFlags.generations; g++) {
617 checkMutableList(capabilities[cap_no].mut_lists[g], g);
618 }
619 }
620
621 static void
622 checkMutableLists (void)
623 {
624 nat i;
625 for (i = 0; i < n_capabilities; i++) {
626 checkLocalMutableLists(i);
627 }
628 }
629
630 /*
631 Check the static objects list.
632 */
633 void
634 checkStaticObjects ( StgClosure* static_objects )
635 {
636 StgClosure *p = static_objects;
637 StgInfoTable *info;
638
639 while (p != END_OF_STATIC_LIST) {
640 checkClosure(p);
641 info = get_itbl(p);
642 switch (info->type) {
643 case IND_STATIC:
644 {
645 StgClosure *indirectee = UNTAG_CLOSURE(((StgIndStatic *)p)->indirectee);
646
647 ASSERT(LOOKS_LIKE_CLOSURE_PTR(indirectee));
648 ASSERT(LOOKS_LIKE_INFO_PTR((StgWord)indirectee->header.info));
649 p = *IND_STATIC_LINK((StgClosure *)p);
650 break;
651 }
652
653 case THUNK_STATIC:
654 p = *THUNK_STATIC_LINK((StgClosure *)p);
655 break;
656
657 case FUN_STATIC:
658 p = *FUN_STATIC_LINK((StgClosure *)p);
659 break;
660
661 case CONSTR_STATIC:
662 p = *STATIC_LINK(info,(StgClosure *)p);
663 break;
664
665 default:
666 barf("checkStaticObjetcs: strange closure %p (%s)",
667 p, info_type(p));
668 }
669 }
670 }
671
672 /* Nursery sanity check */
673 void
674 checkNurserySanity (nursery *nursery)
675 {
676 bdescr *bd, *prev;
677 nat blocks = 0;
678
679 prev = NULL;
680 for (bd = nursery->blocks; bd != NULL; bd = bd->link) {
681 ASSERT(bd->gen == g0);
682 ASSERT(bd->u.back == prev);
683 prev = bd;
684 blocks += bd->blocks;
685 }
686
687 ASSERT(blocks == nursery->n_blocks);
688 }
689
690 static void checkGeneration (generation *gen,
691 rtsBool after_major_gc USED_IF_THREADS)
692 {
693 nat n;
694 gen_workspace *ws;
695
696 ASSERT(countBlocks(gen->blocks) == gen->n_blocks);
697 ASSERT(countBlocks(gen->large_objects) == gen->n_large_blocks);
698
699 #if defined(THREADED_RTS)
700 // heap sanity checking doesn't work with SMP, because we can't
701 // zero the slop (see Updates.h). However, we can sanity-check
702 // the heap after a major gc, because there is no slop.
703 if (!after_major_gc) return;
704 #endif
705
706 checkHeapChain(gen->blocks);
707
708 for (n = 0; n < n_capabilities; n++) {
709 ws = &gc_threads[n]->gens[gen->no];
710 checkHeapChain(ws->todo_bd);
711 checkHeapChain(ws->part_list);
712 checkHeapChain(ws->scavd_list);
713 }
714
715 checkLargeObjects(gen->large_objects);
716 }
717
718 /* Full heap sanity check. */
719 static void checkFullHeap (rtsBool after_major_gc)
720 {
721 nat g, n;
722
723 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
724 checkGeneration(&generations[g], after_major_gc);
725 }
726 for (n = 0; n < n_capabilities; n++) {
727 checkNurserySanity(&nurseries[n]);
728 }
729 }
730
731 void checkSanity (rtsBool after_gc, rtsBool major_gc)
732 {
733 checkFullHeap(after_gc && major_gc);
734
735 checkFreeListSanity();
736
737 // always check the stacks in threaded mode, because checkHeap()
738 // does nothing in this case.
739 if (after_gc) {
740 checkMutableLists();
741 checkGlobalTSOList(rtsTrue);
742 }
743 }
744
745 // If memInventory() calculates that we have a memory leak, this
746 // function will try to find the block(s) that are leaking by marking
747 // all the ones that we know about, and search through memory to find
748 // blocks that are not marked. In the debugger this can help to give
749 // us a clue about what kind of block leaked. In the future we might
750 // annotate blocks with their allocation site to give more helpful
751 // info.
752 static void
753 findMemoryLeak (void)
754 {
755 nat g, i;
756 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
757 for (i = 0; i < n_capabilities; i++) {
758 markBlocks(capabilities[i].mut_lists[g]);
759 markBlocks(gc_threads[i]->gens[g].part_list);
760 markBlocks(gc_threads[i]->gens[g].scavd_list);
761 markBlocks(gc_threads[i]->gens[g].todo_bd);
762 }
763 markBlocks(generations[g].blocks);
764 markBlocks(generations[g].large_objects);
765 }
766
767 for (i = 0; i < n_capabilities; i++) {
768 markBlocks(nurseries[i].blocks);
769 markBlocks(capabilities[i].pinned_object_block);
770 }
771
772 #ifdef PROFILING
773 // TODO:
774 // if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER) {
775 // markRetainerBlocks();
776 // }
777 #endif
778
779 // count the blocks allocated by the arena allocator
780 // TODO:
781 // markArenaBlocks();
782
783 // count the blocks containing executable memory
784 markBlocks(exec_block);
785
786 reportUnmarkedBlocks();
787 }
788
789 void
790 checkRunQueue(Capability *cap)
791 {
792 StgTSO *prev, *tso;
793 prev = END_TSO_QUEUE;
794 for (tso = cap->run_queue_hd; tso != END_TSO_QUEUE;
795 prev = tso, tso = tso->_link) {
796 ASSERT(prev == END_TSO_QUEUE || prev->_link == tso);
797 ASSERT(tso->block_info.prev == prev);
798 }
799 ASSERT(cap->run_queue_tl == prev);
800 }
801
802 /* -----------------------------------------------------------------------------
803 Memory leak detection
804
805 memInventory() checks for memory leaks by counting up all the
806 blocks we know about and comparing that to the number of blocks
807 allegedly floating around in the system.
808 -------------------------------------------------------------------------- */
809
810 // Useful for finding partially full blocks in gdb
811 void findSlop(bdescr *bd);
812 void findSlop(bdescr *bd)
813 {
814 W_ slop;
815
816 for (; bd != NULL; bd = bd->link) {
817 slop = (bd->blocks * BLOCK_SIZE_W) - (bd->free - bd->start);
818 if (slop > (1024/sizeof(W_))) {
819 debugBelch("block at %p (bdescr %p) has %" FMT_SizeT "KB slop\n",
820 bd->start, bd, slop / (1024/sizeof(W_)));
821 }
822 }
823 }
824
825 static W_
826 genBlocks (generation *gen)
827 {
828 ASSERT(countBlocks(gen->blocks) == gen->n_blocks);
829 ASSERT(countBlocks(gen->large_objects) == gen->n_large_blocks);
830 return gen->n_blocks + gen->n_old_blocks +
831 countAllocdBlocks(gen->large_objects);
832 }
833
834 void
835 memInventory (rtsBool show)
836 {
837 nat g, i;
838 W_ gen_blocks[RtsFlags.GcFlags.generations];
839 W_ nursery_blocks, retainer_blocks,
840 arena_blocks, exec_blocks;
841 W_ live_blocks = 0, free_blocks = 0;
842 rtsBool leak;
843
844 // count the blocks we current have
845
846 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
847 gen_blocks[g] = 0;
848 for (i = 0; i < n_capabilities; i++) {
849 gen_blocks[g] += countBlocks(capabilities[i].mut_lists[g]);
850 gen_blocks[g] += countBlocks(gc_threads[i]->gens[g].part_list);
851 gen_blocks[g] += countBlocks(gc_threads[i]->gens[g].scavd_list);
852 gen_blocks[g] += countBlocks(gc_threads[i]->gens[g].todo_bd);
853 }
854 gen_blocks[g] += genBlocks(&generations[g]);
855 }
856
857 nursery_blocks = 0;
858 for (i = 0; i < n_capabilities; i++) {
859 ASSERT(countBlocks(nurseries[i].blocks) == nurseries[i].n_blocks);
860 nursery_blocks += nurseries[i].n_blocks;
861 if (capabilities[i].pinned_object_block != NULL) {
862 nursery_blocks += capabilities[i].pinned_object_block->blocks;
863 }
864 nursery_blocks += countBlocks(capabilities[i].pinned_object_blocks);
865 }
866
867 retainer_blocks = 0;
868 #ifdef PROFILING
869 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER) {
870 retainer_blocks = retainerStackBlocks();
871 }
872 #endif
873
874 // count the blocks allocated by the arena allocator
875 arena_blocks = arenaBlocks();
876
877 // count the blocks containing executable memory
878 exec_blocks = countAllocdBlocks(exec_block);
879
880 /* count the blocks on the free list */
881 free_blocks = countFreeList();
882
883 live_blocks = 0;
884 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
885 live_blocks += gen_blocks[g];
886 }
887 live_blocks += nursery_blocks +
888 + retainer_blocks + arena_blocks + exec_blocks;
889
890 #define MB(n) (((double)(n) * BLOCK_SIZE_W) / ((1024*1024)/sizeof(W_)))
891
892 leak = live_blocks + free_blocks != mblocks_allocated * BLOCKS_PER_MBLOCK;
893
894 if (show || leak)
895 {
896 if (leak) {
897 debugBelch("Memory leak detected:\n");
898 } else {
899 debugBelch("Memory inventory:\n");
900 }
901 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
902 debugBelch(" gen %d blocks : %5" FMT_Word " blocks (%6.1lf MB)\n", g,
903 gen_blocks[g], MB(gen_blocks[g]));
904 }
905 debugBelch(" nursery : %5" FMT_Word " blocks (%6.1lf MB)\n",
906 nursery_blocks, MB(nursery_blocks));
907 debugBelch(" retainer : %5" FMT_Word " blocks (%6.1lf MB)\n",
908 retainer_blocks, MB(retainer_blocks));
909 debugBelch(" arena blocks : %5" FMT_Word " blocks (%6.1lf MB)\n",
910 arena_blocks, MB(arena_blocks));
911 debugBelch(" exec : %5" FMT_Word " blocks (%6.1lf MB)\n",
912 exec_blocks, MB(exec_blocks));
913 debugBelch(" free : %5" FMT_Word " blocks (%6.1lf MB)\n",
914 free_blocks, MB(free_blocks));
915 debugBelch(" total : %5" FMT_Word " blocks (%6.1lf MB)\n",
916 live_blocks + free_blocks, MB(live_blocks+free_blocks));
917 if (leak) {
918 debugBelch("\n in system : %5" FMT_Word " blocks (%" FMT_Word " MB)\n",
919 mblocks_allocated * BLOCKS_PER_MBLOCK, mblocks_allocated);
920 }
921 }
922
923 if (leak) {
924 debugBelch("\n");
925 findMemoryLeak();
926 }
927 ASSERT(n_alloc_blocks == live_blocks);
928 ASSERT(!leak);
929 }
930
931
932 #endif /* DEBUG */