Allocate blocks in the GC in batches
[ghc.git] / rts / sm / Sanity.c
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 1998-2006
4 *
5 * Sanity checking code for the heap and stack.
6 *
7 * Used when debugging: check that everything reasonable.
8 *
9 * - All things that are supposed to be pointers look like pointers.
10 *
11 * - Objects in text space are marked as static closures, those
12 * in the heap are dynamic.
13 *
14 * ---------------------------------------------------------------------------*/
15
16 #include "PosixSource.h"
17 #include "Rts.h"
18
19 #ifdef DEBUG /* whole file */
20
21 #include "RtsUtils.h"
22 #include "sm/Storage.h"
23 #include "sm/BlockAlloc.h"
24 #include "GCThread.h"
25 #include "Sanity.h"
26 #include "Schedule.h"
27 #include "Apply.h"
28 #include "Printer.h"
29 #include "Arena.h"
30 #include "RetainerProfile.h"
31
32 /* -----------------------------------------------------------------------------
33 Forward decls.
34 -------------------------------------------------------------------------- */
35
36 static void checkSmallBitmap ( StgPtr payload, StgWord bitmap, nat );
37 static void checkLargeBitmap ( StgPtr payload, StgLargeBitmap*, nat );
38 static void checkClosureShallow ( StgClosure * );
39 static void checkSTACK (StgStack *stack);
40
41 /* -----------------------------------------------------------------------------
42 Check stack sanity
43 -------------------------------------------------------------------------- */
44
45 static void
46 checkSmallBitmap( StgPtr payload, StgWord bitmap, nat size )
47 {
48 nat i;
49
50 for(i = 0; i < size; i++, bitmap >>= 1 ) {
51 if ((bitmap & 1) == 0) {
52 checkClosureShallow((StgClosure *)payload[i]);
53 }
54 }
55 }
56
57 static void
58 checkLargeBitmap( StgPtr payload, StgLargeBitmap* large_bitmap, nat size )
59 {
60 StgWord bmp;
61 nat i, j;
62
63 i = 0;
64 for (bmp=0; i < size; bmp++) {
65 StgWord bitmap = large_bitmap->bitmap[bmp];
66 j = 0;
67 for(; i < size && j < BITS_IN(W_); j++, i++, bitmap >>= 1 ) {
68 if ((bitmap & 1) == 0) {
69 checkClosureShallow((StgClosure *)payload[i]);
70 }
71 }
72 }
73 }
74
75 /*
76 * check that it looks like a valid closure - without checking its payload
77 * used to avoid recursion between checking PAPs and checking stack
78 * chunks.
79 */
80
81 static void
82 checkClosureShallow( StgClosure* p )
83 {
84 StgClosure *q;
85
86 q = UNTAG_CLOSURE(p);
87 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q));
88
89 /* Is it a static closure? */
90 if (!HEAP_ALLOCED(q)) {
91 ASSERT(closure_STATIC(q));
92 } else {
93 ASSERT(!closure_STATIC(q));
94 }
95 }
96
97 // check an individual stack object
98 StgOffset
99 checkStackFrame( StgPtr c )
100 {
101 nat size;
102 const StgRetInfoTable* info;
103
104 info = get_ret_itbl((StgClosure *)c);
105
106 /* All activation records have 'bitmap' style layout info. */
107 switch (info->i.type) {
108
109 case UPDATE_FRAME:
110 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgUpdateFrame*)c)->updatee));
111 case ATOMICALLY_FRAME:
112 case CATCH_RETRY_FRAME:
113 case CATCH_STM_FRAME:
114 case CATCH_FRAME:
115 // small bitmap cases (<= 32 entries)
116 case UNDERFLOW_FRAME:
117 case STOP_FRAME:
118 case RET_SMALL:
119 size = BITMAP_SIZE(info->i.layout.bitmap);
120 checkSmallBitmap((StgPtr)c + 1,
121 BITMAP_BITS(info->i.layout.bitmap), size);
122 return 1 + size;
123
124 case RET_BCO: {
125 StgBCO *bco;
126 nat size;
127 bco = (StgBCO *)*(c+1);
128 size = BCO_BITMAP_SIZE(bco);
129 checkLargeBitmap((StgPtr)c + 2, BCO_BITMAP(bco), size);
130 return 2 + size;
131 }
132
133 case RET_BIG: // large bitmap (> 32 entries)
134 size = GET_LARGE_BITMAP(&info->i)->size;
135 checkLargeBitmap((StgPtr)c + 1, GET_LARGE_BITMAP(&info->i), size);
136 return 1 + size;
137
138 case RET_FUN:
139 {
140 StgFunInfoTable *fun_info;
141 StgRetFun *ret_fun;
142
143 ret_fun = (StgRetFun *)c;
144 fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun));
145 size = ret_fun->size;
146 switch (fun_info->f.fun_type) {
147 case ARG_GEN:
148 checkSmallBitmap((StgPtr)ret_fun->payload,
149 BITMAP_BITS(fun_info->f.b.bitmap), size);
150 break;
151 case ARG_GEN_BIG:
152 checkLargeBitmap((StgPtr)ret_fun->payload,
153 GET_FUN_LARGE_BITMAP(fun_info), size);
154 break;
155 default:
156 checkSmallBitmap((StgPtr)ret_fun->payload,
157 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
158 size);
159 break;
160 }
161 return sizeofW(StgRetFun) + size;
162 }
163
164 default:
165 barf("checkStackFrame: weird activation record found on stack (%p %d).",c,info->i.type);
166 }
167 }
168
169 // check sections of stack between update frames
170 void
171 checkStackChunk( StgPtr sp, StgPtr stack_end )
172 {
173 StgPtr p;
174
175 p = sp;
176 while (p < stack_end) {
177 p += checkStackFrame( p );
178 }
179 // ASSERT( p == stack_end ); -- HWL
180 }
181
182 static void
183 checkPAP (StgClosure *tagged_fun, StgClosure** payload, StgWord n_args)
184 {
185 StgClosure *fun;
186 StgFunInfoTable *fun_info;
187
188 fun = UNTAG_CLOSURE(tagged_fun);
189 ASSERT(LOOKS_LIKE_CLOSURE_PTR(fun));
190 fun_info = get_fun_itbl(fun);
191
192 switch (fun_info->f.fun_type) {
193 case ARG_GEN:
194 checkSmallBitmap( (StgPtr)payload,
195 BITMAP_BITS(fun_info->f.b.bitmap), n_args );
196 break;
197 case ARG_GEN_BIG:
198 checkLargeBitmap( (StgPtr)payload,
199 GET_FUN_LARGE_BITMAP(fun_info),
200 n_args );
201 break;
202 case ARG_BCO:
203 checkLargeBitmap( (StgPtr)payload,
204 BCO_BITMAP(fun),
205 n_args );
206 break;
207 default:
208 checkSmallBitmap( (StgPtr)payload,
209 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
210 n_args );
211 break;
212 }
213
214 ASSERT(fun_info->f.arity > TAG_MASK ? GET_CLOSURE_TAG(tagged_fun) == 0
215 : GET_CLOSURE_TAG(tagged_fun) == fun_info->f.arity);
216 }
217
218
219 StgOffset
220 checkClosure( StgClosure* p )
221 {
222 const StgInfoTable *info;
223
224 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
225
226 p = UNTAG_CLOSURE(p);
227 /* Is it a static closure (i.e. in the data segment)? */
228 if (!HEAP_ALLOCED(p)) {
229 ASSERT(closure_STATIC(p));
230 } else {
231 ASSERT(!closure_STATIC(p));
232 }
233
234 info = p->header.info;
235
236 if (IS_FORWARDING_PTR(info)) {
237 barf("checkClosure: found EVACUATED closure %d", info->type);
238 }
239 info = INFO_PTR_TO_STRUCT(info);
240
241 switch (info->type) {
242
243 case MVAR_CLEAN:
244 case MVAR_DIRTY:
245 {
246 StgMVar *mvar = (StgMVar *)p;
247 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->head));
248 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->tail));
249 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->value));
250 return sizeofW(StgMVar);
251 }
252
253 case THUNK:
254 case THUNK_1_0:
255 case THUNK_0_1:
256 case THUNK_1_1:
257 case THUNK_0_2:
258 case THUNK_2_0:
259 {
260 nat i;
261 for (i = 0; i < info->layout.payload.ptrs; i++) {
262 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgThunk *)p)->payload[i]));
263 }
264 return thunk_sizeW_fromITBL(info);
265 }
266
267 case FUN:
268 case FUN_1_0:
269 case FUN_0_1:
270 case FUN_1_1:
271 case FUN_0_2:
272 case FUN_2_0:
273 case CONSTR:
274 case CONSTR_1_0:
275 case CONSTR_0_1:
276 case CONSTR_1_1:
277 case CONSTR_0_2:
278 case CONSTR_2_0:
279 case BLACKHOLE:
280 case PRIM:
281 case MUT_PRIM:
282 case MUT_VAR_CLEAN:
283 case MUT_VAR_DIRTY:
284 case TVAR:
285 case CONSTR_STATIC:
286 case CONSTR_NOCAF_STATIC:
287 case THUNK_STATIC:
288 case FUN_STATIC:
289 {
290 nat i;
291 for (i = 0; i < info->layout.payload.ptrs; i++) {
292 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p->payload[i]));
293 }
294 return sizeW_fromITBL(info);
295 }
296
297 case BLOCKING_QUEUE:
298 {
299 StgBlockingQueue *bq = (StgBlockingQueue *)p;
300
301 // NO: the BH might have been updated now
302 // ASSERT(get_itbl(bq->bh)->type == BLACKHOLE);
303 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bq->bh));
304
305 ASSERT(get_itbl((StgClosure *)(bq->owner))->type == TSO);
306 ASSERT(bq->queue == (MessageBlackHole*)END_TSO_QUEUE
307 || bq->queue->header.info == &stg_MSG_BLACKHOLE_info);
308 ASSERT(bq->link == (StgBlockingQueue*)END_TSO_QUEUE ||
309 get_itbl((StgClosure *)(bq->link))->type == IND ||
310 get_itbl((StgClosure *)(bq->link))->type == BLOCKING_QUEUE);
311
312 return sizeofW(StgBlockingQueue);
313 }
314
315 case BCO: {
316 StgBCO *bco = (StgBCO *)p;
317 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->instrs));
318 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->literals));
319 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->ptrs));
320 return bco_sizeW(bco);
321 }
322
323 case IND_STATIC: /* (1, 0) closure */
324 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgIndStatic*)p)->indirectee));
325 return sizeW_fromITBL(info);
326
327 case WEAK:
328 /* deal with these specially - the info table isn't
329 * representative of the actual layout.
330 */
331 { StgWeak *w = (StgWeak *)p;
332 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->key));
333 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->value));
334 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->finalizer));
335 if (w->link) {
336 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->link));
337 }
338 return sizeW_fromITBL(info);
339 }
340
341 case THUNK_SELECTOR:
342 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgSelector *)p)->selectee));
343 return THUNK_SELECTOR_sizeW();
344
345 case IND:
346 {
347 /* we don't expect to see any of these after GC
348 * but they might appear during execution
349 */
350 StgInd *ind = (StgInd *)p;
351 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ind->indirectee));
352 return sizeofW(StgInd);
353 }
354
355 case RET_BCO:
356 case RET_SMALL:
357 case RET_BIG:
358 case UPDATE_FRAME:
359 case UNDERFLOW_FRAME:
360 case STOP_FRAME:
361 case CATCH_FRAME:
362 case ATOMICALLY_FRAME:
363 case CATCH_RETRY_FRAME:
364 case CATCH_STM_FRAME:
365 barf("checkClosure: stack frame");
366
367 case AP:
368 {
369 StgAP* ap = (StgAP *)p;
370 checkPAP (ap->fun, ap->payload, ap->n_args);
371 return ap_sizeW(ap);
372 }
373
374 case PAP:
375 {
376 StgPAP* pap = (StgPAP *)p;
377 checkPAP (pap->fun, pap->payload, pap->n_args);
378 return pap_sizeW(pap);
379 }
380
381 case AP_STACK:
382 {
383 StgAP_STACK *ap = (StgAP_STACK *)p;
384 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ap->fun));
385 checkStackChunk((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
386 return ap_stack_sizeW(ap);
387 }
388
389 case ARR_WORDS:
390 return arr_words_sizeW((StgArrBytes *)p);
391
392 case MUT_ARR_PTRS_CLEAN:
393 case MUT_ARR_PTRS_DIRTY:
394 case MUT_ARR_PTRS_FROZEN:
395 case MUT_ARR_PTRS_FROZEN0:
396 {
397 StgMutArrPtrs* a = (StgMutArrPtrs *)p;
398 nat i;
399 for (i = 0; i < a->ptrs; i++) {
400 ASSERT(LOOKS_LIKE_CLOSURE_PTR(a->payload[i]));
401 }
402 return mut_arr_ptrs_sizeW(a);
403 }
404
405 case TSO:
406 checkTSO((StgTSO *)p);
407 return sizeofW(StgTSO);
408
409 case STACK:
410 checkSTACK((StgStack*)p);
411 return stack_sizeW((StgStack*)p);
412
413 case TREC_CHUNK:
414 {
415 nat i;
416 StgTRecChunk *tc = (StgTRecChunk *)p;
417 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->prev_chunk));
418 for (i = 0; i < tc -> next_entry_idx; i ++) {
419 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].tvar));
420 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].expected_value));
421 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].new_value));
422 }
423 return sizeofW(StgTRecChunk);
424 }
425
426 default:
427 barf("checkClosure (closure type %d)", info->type);
428 }
429 }
430
431
432 /* -----------------------------------------------------------------------------
433 Check Heap Sanity
434
435 After garbage collection, the live heap is in a state where we can
436 run through and check that all the pointers point to the right
437 place. This function starts at a given position and sanity-checks
438 all the objects in the remainder of the chain.
439 -------------------------------------------------------------------------- */
440
441 void checkHeapChain (bdescr *bd)
442 {
443 StgPtr p;
444
445 for (; bd != NULL; bd = bd->link) {
446 if(!(bd->flags & BF_SWEPT)) {
447 p = bd->start;
448 while (p < bd->free) {
449 nat size = checkClosure((StgClosure *)p);
450 /* This is the smallest size of closure that can live in the heap */
451 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
452 p += size;
453
454 /* skip over slop */
455 while (p < bd->free &&
456 (*p < 0x1000 || !LOOKS_LIKE_INFO_PTR(*p))) { p++; }
457 }
458 }
459 }
460 }
461
462 void
463 checkHeapChunk(StgPtr start, StgPtr end)
464 {
465 StgPtr p;
466 nat size;
467
468 for (p=start; p<end; p+=size) {
469 ASSERT(LOOKS_LIKE_INFO_PTR(*p));
470 size = checkClosure((StgClosure *)p);
471 /* This is the smallest size of closure that can live in the heap. */
472 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
473 }
474 }
475
476 void
477 checkLargeObjects(bdescr *bd)
478 {
479 while (bd != NULL) {
480 if (!(bd->flags & BF_PINNED)) {
481 checkClosure((StgClosure *)bd->start);
482 }
483 bd = bd->link;
484 }
485 }
486
487 static void
488 checkSTACK (StgStack *stack)
489 {
490 StgPtr sp = stack->sp;
491 StgOffset stack_size = stack->stack_size;
492 StgPtr stack_end = stack->stack + stack_size;
493
494 ASSERT(stack->stack <= sp && sp <= stack_end);
495
496 checkStackChunk(sp, stack_end);
497 }
498
499 void
500 checkTSO(StgTSO *tso)
501 {
502 StgTSO *next;
503 const StgInfoTable *info;
504
505 if (tso->what_next == ThreadKilled) {
506 /* The garbage collector doesn't bother following any pointers
507 * from dead threads, so don't check sanity here.
508 */
509 return;
510 }
511
512 next = tso->_link;
513 info = (const StgInfoTable*) tso->_link->header.info;
514
515 ASSERT(next == END_TSO_QUEUE ||
516 info == &stg_MVAR_TSO_QUEUE_info ||
517 info == &stg_TSO_info ||
518 info == &stg_WHITEHOLE_info); // happens due to STM doing lockTSO()
519
520 if ( tso->why_blocked == BlockedOnMVar
521 || tso->why_blocked == BlockedOnMVarRead
522 || tso->why_blocked == BlockedOnBlackHole
523 || tso->why_blocked == BlockedOnMsgThrowTo
524 || tso->why_blocked == NotBlocked
525 ) {
526 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->block_info.closure));
527 }
528
529 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->bq));
530 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->blocked_exceptions));
531 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->stackobj));
532
533 // XXX are we checking the stack twice?
534 checkSTACK(tso->stackobj);
535 }
536
537 /*
538 Check that all TSOs have been evacuated.
539 Optionally also check the sanity of the TSOs.
540 */
541 void
542 checkGlobalTSOList (rtsBool checkTSOs)
543 {
544 StgTSO *tso;
545 nat g;
546
547 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
548 for (tso=generations[g].threads; tso != END_TSO_QUEUE;
549 tso = tso->global_link) {
550 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso));
551 ASSERT(get_itbl((StgClosure *)tso)->type == TSO);
552 if (checkTSOs)
553 checkTSO(tso);
554
555 // If this TSO is dirty and in an old generation, it better
556 // be on the mutable list.
557 if (tso->dirty) {
558 ASSERT(Bdescr((P_)tso)->gen_no == 0 || (tso->flags & TSO_MARKED));
559 tso->flags &= ~TSO_MARKED;
560 }
561
562 {
563 StgStack *stack;
564 StgUnderflowFrame *frame;
565
566 stack = tso->stackobj;
567 while (1) {
568 if (stack->dirty & 1) {
569 ASSERT(Bdescr((P_)stack)->gen_no == 0 || (stack->dirty & TSO_MARKED));
570 stack->dirty &= ~TSO_MARKED;
571 }
572 frame = (StgUnderflowFrame*) (stack->stack + stack->stack_size
573 - sizeofW(StgUnderflowFrame));
574 if (frame->info != &stg_stack_underflow_frame_info
575 || frame->next_chunk == (StgStack*)END_TSO_QUEUE) break;
576 stack = frame->next_chunk;
577 }
578 }
579 }
580 }
581 }
582
583 /* -----------------------------------------------------------------------------
584 Check mutable list sanity.
585 -------------------------------------------------------------------------- */
586
587 static void
588 checkMutableList( bdescr *mut_bd, nat gen )
589 {
590 bdescr *bd;
591 StgPtr q;
592 StgClosure *p;
593
594 for (bd = mut_bd; bd != NULL; bd = bd->link) {
595 for (q = bd->start; q < bd->free; q++) {
596 p = (StgClosure *)*q;
597 ASSERT(!HEAP_ALLOCED(p) || Bdescr((P_)p)->gen_no == gen);
598 checkClosure(p);
599
600 switch (get_itbl(p)->type) {
601 case TSO:
602 ((StgTSO *)p)->flags |= TSO_MARKED;
603 break;
604 case STACK:
605 ((StgStack *)p)->dirty |= TSO_MARKED;
606 break;
607 }
608 }
609 }
610 }
611
612 static void
613 checkLocalMutableLists (nat cap_no)
614 {
615 nat g;
616 for (g = 1; g < RtsFlags.GcFlags.generations; g++) {
617 checkMutableList(capabilities[cap_no]->mut_lists[g], g);
618 }
619 }
620
621 static void
622 checkMutableLists (void)
623 {
624 nat i;
625 for (i = 0; i < n_capabilities; i++) {
626 checkLocalMutableLists(i);
627 }
628 }
629
630 /*
631 Check the static objects list.
632 */
633 void
634 checkStaticObjects ( StgClosure* static_objects )
635 {
636 StgClosure *p = static_objects;
637 StgInfoTable *info;
638
639 while (p != END_OF_STATIC_OBJECT_LIST) {
640 p = UNTAG_STATIC_LIST_PTR(p);
641 checkClosure(p);
642 info = get_itbl(p);
643 switch (info->type) {
644 case IND_STATIC:
645 {
646 StgClosure *indirectee = UNTAG_CLOSURE(((StgIndStatic *)p)->indirectee);
647
648 ASSERT(LOOKS_LIKE_CLOSURE_PTR(indirectee));
649 ASSERT(LOOKS_LIKE_INFO_PTR((StgWord)indirectee->header.info));
650 p = *IND_STATIC_LINK((StgClosure *)p);
651 break;
652 }
653
654 case THUNK_STATIC:
655 p = *THUNK_STATIC_LINK((StgClosure *)p);
656 break;
657
658 case FUN_STATIC:
659 p = *FUN_STATIC_LINK((StgClosure *)p);
660 break;
661
662 case CONSTR_STATIC:
663 p = *STATIC_LINK(info,(StgClosure *)p);
664 break;
665
666 default:
667 barf("checkStaticObjetcs: strange closure %p (%s)",
668 p, info_type(p));
669 }
670 }
671 }
672
673 /* Nursery sanity check */
674 void
675 checkNurserySanity (nursery *nursery)
676 {
677 bdescr *bd, *prev;
678 nat blocks = 0;
679
680 prev = NULL;
681 for (bd = nursery->blocks; bd != NULL; bd = bd->link) {
682 ASSERT(bd->gen == g0);
683 ASSERT(bd->u.back == prev);
684 prev = bd;
685 blocks += bd->blocks;
686 }
687
688 ASSERT(blocks == nursery->n_blocks);
689 }
690
691 static void checkGeneration (generation *gen,
692 rtsBool after_major_gc USED_IF_THREADS)
693 {
694 nat n;
695 gen_workspace *ws;
696
697 ASSERT(countBlocks(gen->blocks) == gen->n_blocks);
698 ASSERT(countBlocks(gen->large_objects) == gen->n_large_blocks);
699
700 #if defined(THREADED_RTS)
701 // heap sanity checking doesn't work with SMP, because we can't
702 // zero the slop (see Updates.h). However, we can sanity-check
703 // the heap after a major gc, because there is no slop.
704 if (!after_major_gc) return;
705 #endif
706
707 checkHeapChain(gen->blocks);
708
709 for (n = 0; n < n_capabilities; n++) {
710 ws = &gc_threads[n]->gens[gen->no];
711 checkHeapChain(ws->todo_bd);
712 checkHeapChain(ws->part_list);
713 checkHeapChain(ws->scavd_list);
714 }
715
716 checkLargeObjects(gen->large_objects);
717 }
718
719 /* Full heap sanity check. */
720 static void checkFullHeap (rtsBool after_major_gc)
721 {
722 nat g, n;
723
724 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
725 checkGeneration(&generations[g], after_major_gc);
726 }
727 for (n = 0; n < n_capabilities; n++) {
728 checkNurserySanity(&nurseries[n]);
729 }
730 }
731
732 void checkSanity (rtsBool after_gc, rtsBool major_gc)
733 {
734 checkFullHeap(after_gc && major_gc);
735
736 checkFreeListSanity();
737
738 // always check the stacks in threaded mode, because checkHeap()
739 // does nothing in this case.
740 if (after_gc) {
741 checkMutableLists();
742 checkGlobalTSOList(rtsTrue);
743 }
744 }
745
746 // If memInventory() calculates that we have a memory leak, this
747 // function will try to find the block(s) that are leaking by marking
748 // all the ones that we know about, and search through memory to find
749 // blocks that are not marked. In the debugger this can help to give
750 // us a clue about what kind of block leaked. In the future we might
751 // annotate blocks with their allocation site to give more helpful
752 // info.
753 static void
754 findMemoryLeak (void)
755 {
756 nat g, i;
757 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
758 for (i = 0; i < n_capabilities; i++) {
759 markBlocks(capabilities[i]->mut_lists[g]);
760 markBlocks(gc_threads[i]->gens[g].part_list);
761 markBlocks(gc_threads[i]->gens[g].scavd_list);
762 markBlocks(gc_threads[i]->gens[g].todo_bd);
763 }
764 markBlocks(generations[g].blocks);
765 markBlocks(generations[g].large_objects);
766 }
767
768 for (i = 0; i < n_nurseries; i++) {
769 markBlocks(nurseries[i].blocks);
770 }
771
772 for (i = 0; i < n_capabilities; i++) {
773 markBlocks(gc_threads[i]->free_blocks);
774 markBlocks(capabilities[i]->pinned_object_block);
775 }
776
777 #ifdef PROFILING
778 // TODO:
779 // if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER) {
780 // markRetainerBlocks();
781 // }
782 #endif
783
784 // count the blocks allocated by the arena allocator
785 // TODO:
786 // markArenaBlocks();
787
788 // count the blocks containing executable memory
789 markBlocks(exec_block);
790
791 reportUnmarkedBlocks();
792 }
793
794 void
795 checkRunQueue(Capability *cap)
796 {
797 StgTSO *prev, *tso;
798 prev = END_TSO_QUEUE;
799 for (tso = cap->run_queue_hd; tso != END_TSO_QUEUE;
800 prev = tso, tso = tso->_link) {
801 ASSERT(prev == END_TSO_QUEUE || prev->_link == tso);
802 ASSERT(tso->block_info.prev == prev);
803 }
804 ASSERT(cap->run_queue_tl == prev);
805 }
806
807 /* -----------------------------------------------------------------------------
808 Memory leak detection
809
810 memInventory() checks for memory leaks by counting up all the
811 blocks we know about and comparing that to the number of blocks
812 allegedly floating around in the system.
813 -------------------------------------------------------------------------- */
814
815 // Useful for finding partially full blocks in gdb
816 void findSlop(bdescr *bd);
817 void findSlop(bdescr *bd)
818 {
819 W_ slop;
820
821 for (; bd != NULL; bd = bd->link) {
822 slop = (bd->blocks * BLOCK_SIZE_W) - (bd->free - bd->start);
823 if (slop > (1024/sizeof(W_))) {
824 debugBelch("block at %p (bdescr %p) has %" FMT_SizeT "KB slop\n",
825 bd->start, bd, slop / (1024/sizeof(W_)));
826 }
827 }
828 }
829
830 static W_
831 genBlocks (generation *gen)
832 {
833 ASSERT(countBlocks(gen->blocks) == gen->n_blocks);
834 ASSERT(countBlocks(gen->large_objects) == gen->n_large_blocks);
835 return gen->n_blocks + gen->n_old_blocks +
836 countAllocdBlocks(gen->large_objects);
837 }
838
839 void
840 memInventory (rtsBool show)
841 {
842 nat g, i;
843 W_ gen_blocks[RtsFlags.GcFlags.generations];
844 W_ nursery_blocks, retainer_blocks,
845 arena_blocks, exec_blocks, gc_free_blocks = 0;
846 W_ live_blocks = 0, free_blocks = 0;
847 rtsBool leak;
848
849 // count the blocks we current have
850
851 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
852 gen_blocks[g] = 0;
853 for (i = 0; i < n_capabilities; i++) {
854 gen_blocks[g] += countBlocks(capabilities[i]->mut_lists[g]);
855 gen_blocks[g] += countBlocks(gc_threads[i]->gens[g].part_list);
856 gen_blocks[g] += countBlocks(gc_threads[i]->gens[g].scavd_list);
857 gen_blocks[g] += countBlocks(gc_threads[i]->gens[g].todo_bd);
858 }
859 gen_blocks[g] += genBlocks(&generations[g]);
860 }
861
862 nursery_blocks = 0;
863 for (i = 0; i < n_nurseries; i++) {
864 ASSERT(countBlocks(nurseries[i].blocks) == nurseries[i].n_blocks);
865 nursery_blocks += nurseries[i].n_blocks;
866 }
867 for (i = 0; i < n_capabilities; i++) {
868 gc_free_blocks += countBlocks(gc_threads[i]->free_blocks);
869 if (capabilities[i]->pinned_object_block != NULL) {
870 nursery_blocks += capabilities[i]->pinned_object_block->blocks;
871 }
872 nursery_blocks += countBlocks(capabilities[i]->pinned_object_blocks);
873 }
874
875 retainer_blocks = 0;
876 #ifdef PROFILING
877 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER) {
878 retainer_blocks = retainerStackBlocks();
879 }
880 #endif
881
882 // count the blocks allocated by the arena allocator
883 arena_blocks = arenaBlocks();
884
885 // count the blocks containing executable memory
886 exec_blocks = countAllocdBlocks(exec_block);
887
888 /* count the blocks on the free list */
889 free_blocks = countFreeList();
890
891 live_blocks = 0;
892 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
893 live_blocks += gen_blocks[g];
894 }
895 live_blocks += nursery_blocks +
896 + retainer_blocks + arena_blocks + exec_blocks + gc_free_blocks;
897
898 #define MB(n) (((double)(n) * BLOCK_SIZE_W) / ((1024*1024)/sizeof(W_)))
899
900 leak = live_blocks + free_blocks != mblocks_allocated * BLOCKS_PER_MBLOCK;
901
902 if (show || leak)
903 {
904 if (leak) {
905 debugBelch("Memory leak detected:\n");
906 } else {
907 debugBelch("Memory inventory:\n");
908 }
909 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
910 debugBelch(" gen %d blocks : %5" FMT_Word " blocks (%6.1lf MB)\n", g,
911 gen_blocks[g], MB(gen_blocks[g]));
912 }
913 debugBelch(" nursery : %5" FMT_Word " blocks (%6.1lf MB)\n",
914 nursery_blocks, MB(nursery_blocks));
915 debugBelch(" retainer : %5" FMT_Word " blocks (%6.1lf MB)\n",
916 retainer_blocks, MB(retainer_blocks));
917 debugBelch(" arena blocks : %5" FMT_Word " blocks (%6.1lf MB)\n",
918 arena_blocks, MB(arena_blocks));
919 debugBelch(" exec : %5" FMT_Word " blocks (%6.1lf MB)\n",
920 exec_blocks, MB(exec_blocks));
921 debugBelch(" GC free pool : %5" FMT_Word " blocks (%6.1lf MB)\n",
922 gc_free_blocks, MB(gc_free_blocks));
923 debugBelch(" free : %5" FMT_Word " blocks (%6.1lf MB)\n",
924 free_blocks, MB(free_blocks));
925 debugBelch(" total : %5" FMT_Word " blocks (%6.1lf MB)\n",
926 live_blocks + free_blocks, MB(live_blocks+free_blocks));
927 if (leak) {
928 debugBelch("\n in system : %5" FMT_Word " blocks (%" FMT_Word " MB)\n",
929 (W_)(mblocks_allocated * BLOCKS_PER_MBLOCK), mblocks_allocated);
930 }
931 }
932
933 if (leak) {
934 debugBelch("\n");
935 findMemoryLeak();
936 }
937 ASSERT(n_alloc_blocks == live_blocks);
938 ASSERT(!leak);
939 }
940
941
942 #endif /* DEBUG */