a fix for checkTSO(): the TSO could be a WHITEHOLE
[ghc.git] / rts / sm / Sanity.c
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 1998-2006
4 *
5 * Sanity checking code for the heap and stack.
6 *
7 * Used when debugging: check that everything reasonable.
8 *
9 * - All things that are supposed to be pointers look like pointers.
10 *
11 * - Objects in text space are marked as static closures, those
12 * in the heap are dynamic.
13 *
14 * ---------------------------------------------------------------------------*/
15
16 #include "PosixSource.h"
17 #include "Rts.h"
18
19 #ifdef DEBUG /* whole file */
20
21 #include "RtsUtils.h"
22 #include "sm/Storage.h"
23 #include "sm/BlockAlloc.h"
24 #include "GCThread.h"
25 #include "Sanity.h"
26 #include "Schedule.h"
27 #include "Apply.h"
28 #include "Printer.h"
29 #include "Arena.h"
30 #include "RetainerProfile.h"
31
32 /* -----------------------------------------------------------------------------
33 Forward decls.
34 -------------------------------------------------------------------------- */
35
36 static void checkSmallBitmap ( StgPtr payload, StgWord bitmap, nat );
37 static void checkLargeBitmap ( StgPtr payload, StgLargeBitmap*, nat );
38 static void checkClosureShallow ( StgClosure * );
39 static void checkSTACK (StgStack *stack);
40
41 /* -----------------------------------------------------------------------------
42 Check stack sanity
43 -------------------------------------------------------------------------- */
44
45 static void
46 checkSmallBitmap( StgPtr payload, StgWord bitmap, nat size )
47 {
48 nat i;
49
50 for(i = 0; i < size; i++, bitmap >>= 1 ) {
51 if ((bitmap & 1) == 0) {
52 checkClosureShallow((StgClosure *)payload[i]);
53 }
54 }
55 }
56
57 static void
58 checkLargeBitmap( StgPtr payload, StgLargeBitmap* large_bitmap, nat size )
59 {
60 StgWord bmp;
61 nat i, j;
62
63 i = 0;
64 for (bmp=0; i < size; bmp++) {
65 StgWord bitmap = large_bitmap->bitmap[bmp];
66 j = 0;
67 for(; i < size && j < BITS_IN(W_); j++, i++, bitmap >>= 1 ) {
68 if ((bitmap & 1) == 0) {
69 checkClosureShallow((StgClosure *)payload[i]);
70 }
71 }
72 }
73 }
74
75 /*
76 * check that it looks like a valid closure - without checking its payload
77 * used to avoid recursion between checking PAPs and checking stack
78 * chunks.
79 */
80
81 static void
82 checkClosureShallow( StgClosure* p )
83 {
84 StgClosure *q;
85
86 q = UNTAG_CLOSURE(p);
87 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q));
88
89 /* Is it a static closure? */
90 if (!HEAP_ALLOCED(q)) {
91 ASSERT(closure_STATIC(q));
92 } else {
93 ASSERT(!closure_STATIC(q));
94 }
95 }
96
97 // check an individual stack object
98 StgOffset
99 checkStackFrame( StgPtr c )
100 {
101 nat size;
102 const StgRetInfoTable* info;
103
104 info = get_ret_itbl((StgClosure *)c);
105
106 /* All activation records have 'bitmap' style layout info. */
107 switch (info->i.type) {
108
109 case UPDATE_FRAME:
110 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgUpdateFrame*)c)->updatee));
111 case ATOMICALLY_FRAME:
112 case CATCH_RETRY_FRAME:
113 case CATCH_STM_FRAME:
114 case CATCH_FRAME:
115 // small bitmap cases (<= 32 entries)
116 case UNDERFLOW_FRAME:
117 case STOP_FRAME:
118 case RET_SMALL:
119 size = BITMAP_SIZE(info->i.layout.bitmap);
120 checkSmallBitmap((StgPtr)c + 1,
121 BITMAP_BITS(info->i.layout.bitmap), size);
122 return 1 + size;
123
124 case RET_BCO: {
125 StgBCO *bco;
126 nat size;
127 bco = (StgBCO *)*(c+1);
128 size = BCO_BITMAP_SIZE(bco);
129 checkLargeBitmap((StgPtr)c + 2, BCO_BITMAP(bco), size);
130 return 2 + size;
131 }
132
133 case RET_BIG: // large bitmap (> 32 entries)
134 size = GET_LARGE_BITMAP(&info->i)->size;
135 checkLargeBitmap((StgPtr)c + 1, GET_LARGE_BITMAP(&info->i), size);
136 return 1 + size;
137
138 case RET_FUN:
139 {
140 StgFunInfoTable *fun_info;
141 StgRetFun *ret_fun;
142
143 ret_fun = (StgRetFun *)c;
144 fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun));
145 size = ret_fun->size;
146 switch (fun_info->f.fun_type) {
147 case ARG_GEN:
148 checkSmallBitmap((StgPtr)ret_fun->payload,
149 BITMAP_BITS(fun_info->f.b.bitmap), size);
150 break;
151 case ARG_GEN_BIG:
152 checkLargeBitmap((StgPtr)ret_fun->payload,
153 GET_FUN_LARGE_BITMAP(fun_info), size);
154 break;
155 default:
156 checkSmallBitmap((StgPtr)ret_fun->payload,
157 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
158 size);
159 break;
160 }
161 return sizeofW(StgRetFun) + size;
162 }
163
164 default:
165 barf("checkStackFrame: weird activation record found on stack (%p %d).",c,info->i.type);
166 }
167 }
168
169 // check sections of stack between update frames
170 void
171 checkStackChunk( StgPtr sp, StgPtr stack_end )
172 {
173 StgPtr p;
174
175 p = sp;
176 while (p < stack_end) {
177 p += checkStackFrame( p );
178 }
179 // ASSERT( p == stack_end ); -- HWL
180 }
181
182 static void
183 checkPAP (StgClosure *tagged_fun, StgClosure** payload, StgWord n_args)
184 {
185 StgClosure *fun;
186 StgFunInfoTable *fun_info;
187
188 fun = UNTAG_CLOSURE(tagged_fun);
189 ASSERT(LOOKS_LIKE_CLOSURE_PTR(fun));
190 fun_info = get_fun_itbl(fun);
191
192 switch (fun_info->f.fun_type) {
193 case ARG_GEN:
194 checkSmallBitmap( (StgPtr)payload,
195 BITMAP_BITS(fun_info->f.b.bitmap), n_args );
196 break;
197 case ARG_GEN_BIG:
198 checkLargeBitmap( (StgPtr)payload,
199 GET_FUN_LARGE_BITMAP(fun_info),
200 n_args );
201 break;
202 case ARG_BCO:
203 checkLargeBitmap( (StgPtr)payload,
204 BCO_BITMAP(fun),
205 n_args );
206 break;
207 default:
208 checkSmallBitmap( (StgPtr)payload,
209 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
210 n_args );
211 break;
212 }
213
214 ASSERT(fun_info->f.arity > TAG_MASK ? GET_CLOSURE_TAG(tagged_fun) == 0
215 : GET_CLOSURE_TAG(tagged_fun) == fun_info->f.arity);
216 }
217
218
219 StgOffset
220 checkClosure( StgClosure* p )
221 {
222 const StgInfoTable *info;
223
224 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
225
226 p = UNTAG_CLOSURE(p);
227 /* Is it a static closure (i.e. in the data segment)? */
228 if (!HEAP_ALLOCED(p)) {
229 ASSERT(closure_STATIC(p));
230 } else {
231 ASSERT(!closure_STATIC(p));
232 }
233
234 info = p->header.info;
235
236 if (IS_FORWARDING_PTR(info)) {
237 barf("checkClosure: found EVACUATED closure %d", info->type);
238 }
239 info = INFO_PTR_TO_STRUCT(info);
240
241 switch (info->type) {
242
243 case MVAR_CLEAN:
244 case MVAR_DIRTY:
245 {
246 StgMVar *mvar = (StgMVar *)p;
247 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->head));
248 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->tail));
249 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->value));
250 return sizeofW(StgMVar);
251 }
252
253 case THUNK:
254 case THUNK_1_0:
255 case THUNK_0_1:
256 case THUNK_1_1:
257 case THUNK_0_2:
258 case THUNK_2_0:
259 {
260 nat i;
261 for (i = 0; i < info->layout.payload.ptrs; i++) {
262 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgThunk *)p)->payload[i]));
263 }
264 return thunk_sizeW_fromITBL(info);
265 }
266
267 case FUN:
268 case FUN_1_0:
269 case FUN_0_1:
270 case FUN_1_1:
271 case FUN_0_2:
272 case FUN_2_0:
273 case CONSTR:
274 case CONSTR_1_0:
275 case CONSTR_0_1:
276 case CONSTR_1_1:
277 case CONSTR_0_2:
278 case CONSTR_2_0:
279 case IND_PERM:
280 case BLACKHOLE:
281 case PRIM:
282 case MUT_PRIM:
283 case MUT_VAR_CLEAN:
284 case MUT_VAR_DIRTY:
285 case CONSTR_STATIC:
286 case CONSTR_NOCAF_STATIC:
287 case THUNK_STATIC:
288 case FUN_STATIC:
289 {
290 nat i;
291 for (i = 0; i < info->layout.payload.ptrs; i++) {
292 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p->payload[i]));
293 }
294 return sizeW_fromITBL(info);
295 }
296
297 case BLOCKING_QUEUE:
298 {
299 StgBlockingQueue *bq = (StgBlockingQueue *)p;
300
301 // NO: the BH might have been updated now
302 // ASSERT(get_itbl(bq->bh)->type == BLACKHOLE);
303 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bq->bh));
304
305 ASSERT(get_itbl((StgClosure *)(bq->owner))->type == TSO);
306 ASSERT(bq->queue == (MessageBlackHole*)END_TSO_QUEUE
307 || bq->queue->header.info == &stg_MSG_BLACKHOLE_info);
308 ASSERT(bq->link == (StgBlockingQueue*)END_TSO_QUEUE ||
309 get_itbl((StgClosure *)(bq->link))->type == IND ||
310 get_itbl((StgClosure *)(bq->link))->type == BLOCKING_QUEUE);
311
312 return sizeofW(StgBlockingQueue);
313 }
314
315 case BCO: {
316 StgBCO *bco = (StgBCO *)p;
317 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->instrs));
318 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->literals));
319 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->ptrs));
320 return bco_sizeW(bco);
321 }
322
323 case IND_STATIC: /* (1, 0) closure */
324 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgIndStatic*)p)->indirectee));
325 return sizeW_fromITBL(info);
326
327 case WEAK:
328 /* deal with these specially - the info table isn't
329 * representative of the actual layout.
330 */
331 { StgWeak *w = (StgWeak *)p;
332 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->key));
333 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->value));
334 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->finalizer));
335 if (w->link) {
336 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->link));
337 }
338 return sizeW_fromITBL(info);
339 }
340
341 case THUNK_SELECTOR:
342 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgSelector *)p)->selectee));
343 return THUNK_SELECTOR_sizeW();
344
345 case IND:
346 {
347 /* we don't expect to see any of these after GC
348 * but they might appear during execution
349 */
350 StgInd *ind = (StgInd *)p;
351 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ind->indirectee));
352 return sizeofW(StgInd);
353 }
354
355 case RET_BCO:
356 case RET_SMALL:
357 case RET_BIG:
358 case UPDATE_FRAME:
359 case UNDERFLOW_FRAME:
360 case STOP_FRAME:
361 case CATCH_FRAME:
362 case ATOMICALLY_FRAME:
363 case CATCH_RETRY_FRAME:
364 case CATCH_STM_FRAME:
365 barf("checkClosure: stack frame");
366
367 case AP:
368 {
369 StgAP* ap = (StgAP *)p;
370 checkPAP (ap->fun, ap->payload, ap->n_args);
371 return ap_sizeW(ap);
372 }
373
374 case PAP:
375 {
376 StgPAP* pap = (StgPAP *)p;
377 checkPAP (pap->fun, pap->payload, pap->n_args);
378 return pap_sizeW(pap);
379 }
380
381 case AP_STACK:
382 {
383 StgAP_STACK *ap = (StgAP_STACK *)p;
384 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ap->fun));
385 checkStackChunk((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
386 return ap_stack_sizeW(ap);
387 }
388
389 case ARR_WORDS:
390 return arr_words_sizeW((StgArrWords *)p);
391
392 case MUT_ARR_PTRS_CLEAN:
393 case MUT_ARR_PTRS_DIRTY:
394 case MUT_ARR_PTRS_FROZEN:
395 case MUT_ARR_PTRS_FROZEN0:
396 {
397 StgMutArrPtrs* a = (StgMutArrPtrs *)p;
398 nat i;
399 for (i = 0; i < a->ptrs; i++) {
400 ASSERT(LOOKS_LIKE_CLOSURE_PTR(a->payload[i]));
401 }
402 return mut_arr_ptrs_sizeW(a);
403 }
404
405 case TSO:
406 checkTSO((StgTSO *)p);
407 return sizeofW(StgTSO);
408
409 case STACK:
410 checkSTACK((StgStack*)p);
411 return stack_sizeW((StgStack*)p);
412
413 case TREC_CHUNK:
414 {
415 nat i;
416 StgTRecChunk *tc = (StgTRecChunk *)p;
417 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->prev_chunk));
418 for (i = 0; i < tc -> next_entry_idx; i ++) {
419 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].tvar));
420 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].expected_value));
421 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].new_value));
422 }
423 return sizeofW(StgTRecChunk);
424 }
425
426 default:
427 barf("checkClosure (closure type %d)", info->type);
428 }
429 }
430
431
432 /* -----------------------------------------------------------------------------
433 Check Heap Sanity
434
435 After garbage collection, the live heap is in a state where we can
436 run through and check that all the pointers point to the right
437 place. This function starts at a given position and sanity-checks
438 all the objects in the remainder of the chain.
439 -------------------------------------------------------------------------- */
440
441 void checkHeapChain (bdescr *bd)
442 {
443 StgPtr p;
444
445 for (; bd != NULL; bd = bd->link) {
446 if(!(bd->flags & BF_SWEPT)) {
447 p = bd->start;
448 while (p < bd->free) {
449 nat size = checkClosure((StgClosure *)p);
450 /* This is the smallest size of closure that can live in the heap */
451 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
452 p += size;
453
454 /* skip over slop */
455 while (p < bd->free &&
456 (*p < 0x1000 || !LOOKS_LIKE_INFO_PTR(*p))) { p++; }
457 }
458 }
459 }
460 }
461
462 void
463 checkHeapChunk(StgPtr start, StgPtr end)
464 {
465 StgPtr p;
466 nat size;
467
468 for (p=start; p<end; p+=size) {
469 ASSERT(LOOKS_LIKE_INFO_PTR(*p));
470 size = checkClosure((StgClosure *)p);
471 /* This is the smallest size of closure that can live in the heap. */
472 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
473 }
474 }
475
476 void
477 checkLargeObjects(bdescr *bd)
478 {
479 while (bd != NULL) {
480 if (!(bd->flags & BF_PINNED)) {
481 checkClosure((StgClosure *)bd->start);
482 }
483 bd = bd->link;
484 }
485 }
486
487 static void
488 checkSTACK (StgStack *stack)
489 {
490 StgPtr sp = stack->sp;
491 StgOffset stack_size = stack->stack_size;
492 StgPtr stack_end = stack->stack + stack_size;
493
494 ASSERT(stack->stack <= sp && sp <= stack_end);
495
496 checkStackChunk(sp, stack_end);
497 }
498
499 void
500 checkTSO(StgTSO *tso)
501 {
502 StgTSO *next;
503 const StgInfoTable *info;
504
505 if (tso->what_next == ThreadKilled) {
506 /* The garbage collector doesn't bother following any pointers
507 * from dead threads, so don't check sanity here.
508 */
509 return;
510 }
511
512 next = tso->_link;
513 info = (const StgInfoTable*) tso->header.info;
514
515 ASSERT(next == END_TSO_QUEUE ||
516 info == &stg_MVAR_TSO_QUEUE_info ||
517 info == &stg_TSO_info ||
518 info == &stg_WHITEHOLE_info); // happens due to STM doing lockTSO()
519
520 if ( tso->why_blocked == BlockedOnMVar
521 || tso->why_blocked == BlockedOnBlackHole
522 || tso->why_blocked == BlockedOnMsgThrowTo
523 || tso->why_blocked == NotBlocked
524 ) {
525 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->block_info.closure));
526 }
527
528 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->bq));
529 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->blocked_exceptions));
530 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->stackobj));
531
532 // XXX are we checking the stack twice?
533 checkSTACK(tso->stackobj);
534 }
535
536 /*
537 Check that all TSOs have been evacuated.
538 Optionally also check the sanity of the TSOs.
539 */
540 void
541 checkGlobalTSOList (rtsBool checkTSOs)
542 {
543 StgTSO *tso;
544 nat g;
545
546 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
547 for (tso=generations[g].threads; tso != END_TSO_QUEUE;
548 tso = tso->global_link) {
549 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso));
550 ASSERT(get_itbl((StgClosure *)tso)->type == TSO);
551 if (checkTSOs)
552 checkTSO(tso);
553
554 // If this TSO is dirty and in an old generation, it better
555 // be on the mutable list.
556 if (tso->dirty) {
557 ASSERT(Bdescr((P_)tso)->gen_no == 0 || (tso->flags & TSO_MARKED));
558 tso->flags &= ~TSO_MARKED;
559 }
560
561 {
562 StgStack *stack;
563 StgUnderflowFrame *frame;
564
565 stack = tso->stackobj;
566 while (1) {
567 if (stack->dirty & 1) {
568 ASSERT(Bdescr((P_)stack)->gen_no == 0 || (stack->dirty & TSO_MARKED));
569 stack->dirty &= ~TSO_MARKED;
570 }
571 frame = (StgUnderflowFrame*) (stack->stack + stack->stack_size
572 - sizeofW(StgUnderflowFrame));
573 if (frame->info != &stg_stack_underflow_frame_info
574 || frame->next_chunk == (StgStack*)END_TSO_QUEUE) break;
575 stack = frame->next_chunk;
576 }
577 }
578 }
579 }
580 }
581
582 /* -----------------------------------------------------------------------------
583 Check mutable list sanity.
584 -------------------------------------------------------------------------- */
585
586 static void
587 checkMutableList( bdescr *mut_bd, nat gen )
588 {
589 bdescr *bd;
590 StgPtr q;
591 StgClosure *p;
592
593 for (bd = mut_bd; bd != NULL; bd = bd->link) {
594 for (q = bd->start; q < bd->free; q++) {
595 p = (StgClosure *)*q;
596 ASSERT(!HEAP_ALLOCED(p) || Bdescr((P_)p)->gen_no == gen);
597 checkClosure(p);
598
599 switch (get_itbl(p)->type) {
600 case TSO:
601 ((StgTSO *)p)->flags |= TSO_MARKED;
602 break;
603 case STACK:
604 ((StgStack *)p)->dirty |= TSO_MARKED;
605 break;
606 }
607 }
608 }
609 }
610
611 static void
612 checkLocalMutableLists (nat cap_no)
613 {
614 nat g;
615 for (g = 1; g < RtsFlags.GcFlags.generations; g++) {
616 checkMutableList(capabilities[cap_no].mut_lists[g], g);
617 }
618 }
619
620 static void
621 checkMutableLists (void)
622 {
623 nat i;
624 for (i = 0; i < n_capabilities; i++) {
625 checkLocalMutableLists(i);
626 }
627 }
628
629 /*
630 Check the static objects list.
631 */
632 void
633 checkStaticObjects ( StgClosure* static_objects )
634 {
635 StgClosure *p = static_objects;
636 StgInfoTable *info;
637
638 while (p != END_OF_STATIC_LIST) {
639 checkClosure(p);
640 info = get_itbl(p);
641 switch (info->type) {
642 case IND_STATIC:
643 {
644 StgClosure *indirectee = UNTAG_CLOSURE(((StgIndStatic *)p)->indirectee);
645
646 ASSERT(LOOKS_LIKE_CLOSURE_PTR(indirectee));
647 ASSERT(LOOKS_LIKE_INFO_PTR((StgWord)indirectee->header.info));
648 p = *IND_STATIC_LINK((StgClosure *)p);
649 break;
650 }
651
652 case THUNK_STATIC:
653 p = *THUNK_STATIC_LINK((StgClosure *)p);
654 break;
655
656 case FUN_STATIC:
657 p = *FUN_STATIC_LINK((StgClosure *)p);
658 break;
659
660 case CONSTR_STATIC:
661 p = *STATIC_LINK(info,(StgClosure *)p);
662 break;
663
664 default:
665 barf("checkStaticObjetcs: strange closure %p (%s)",
666 p, info_type(p));
667 }
668 }
669 }
670
671 /* Nursery sanity check */
672 void
673 checkNurserySanity (nursery *nursery)
674 {
675 bdescr *bd, *prev;
676 nat blocks = 0;
677
678 prev = NULL;
679 for (bd = nursery->blocks; bd != NULL; bd = bd->link) {
680 ASSERT(bd->gen == g0);
681 ASSERT(bd->u.back == prev);
682 prev = bd;
683 blocks += bd->blocks;
684 }
685
686 ASSERT(blocks == nursery->n_blocks);
687 }
688
689 static void checkGeneration (generation *gen,
690 rtsBool after_major_gc USED_IF_THREADS)
691 {
692 nat n;
693 gen_workspace *ws;
694
695 ASSERT(countBlocks(gen->blocks) == gen->n_blocks);
696 ASSERT(countBlocks(gen->large_objects) == gen->n_large_blocks);
697
698 #if defined(THREADED_RTS)
699 // heap sanity checking doesn't work with SMP, because we can't
700 // zero the slop (see Updates.h). However, we can sanity-check
701 // the heap after a major gc, because there is no slop.
702 if (!after_major_gc) return;
703 #endif
704
705 checkHeapChain(gen->blocks);
706
707 for (n = 0; n < n_capabilities; n++) {
708 ws = &gc_threads[n]->gens[gen->no];
709 checkHeapChain(ws->todo_bd);
710 checkHeapChain(ws->part_list);
711 checkHeapChain(ws->scavd_list);
712 }
713
714 checkLargeObjects(gen->large_objects);
715 }
716
717 /* Full heap sanity check. */
718 static void checkFullHeap (rtsBool after_major_gc)
719 {
720 nat g, n;
721
722 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
723 checkGeneration(&generations[g], after_major_gc);
724 }
725 for (n = 0; n < n_capabilities; n++) {
726 checkNurserySanity(&nurseries[n]);
727 }
728 }
729
730 void checkSanity (rtsBool after_gc, rtsBool major_gc)
731 {
732 checkFullHeap(after_gc && major_gc);
733
734 checkFreeListSanity();
735
736 // always check the stacks in threaded mode, because checkHeap()
737 // does nothing in this case.
738 if (after_gc) {
739 checkMutableLists();
740 checkGlobalTSOList(rtsTrue);
741 }
742 }
743
744 // If memInventory() calculates that we have a memory leak, this
745 // function will try to find the block(s) that are leaking by marking
746 // all the ones that we know about, and search through memory to find
747 // blocks that are not marked. In the debugger this can help to give
748 // us a clue about what kind of block leaked. In the future we might
749 // annotate blocks with their allocation site to give more helpful
750 // info.
751 static void
752 findMemoryLeak (void)
753 {
754 nat g, i;
755 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
756 for (i = 0; i < n_capabilities; i++) {
757 markBlocks(capabilities[i].mut_lists[g]);
758 markBlocks(gc_threads[i]->gens[g].part_list);
759 markBlocks(gc_threads[i]->gens[g].scavd_list);
760 markBlocks(gc_threads[i]->gens[g].todo_bd);
761 }
762 markBlocks(generations[g].blocks);
763 markBlocks(generations[g].large_objects);
764 }
765
766 for (i = 0; i < n_capabilities; i++) {
767 markBlocks(nurseries[i].blocks);
768 markBlocks(capabilities[i].pinned_object_block);
769 }
770
771 #ifdef PROFILING
772 // TODO:
773 // if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER) {
774 // markRetainerBlocks();
775 // }
776 #endif
777
778 // count the blocks allocated by the arena allocator
779 // TODO:
780 // markArenaBlocks();
781
782 // count the blocks containing executable memory
783 markBlocks(exec_block);
784
785 reportUnmarkedBlocks();
786 }
787
788 void
789 checkRunQueue(Capability *cap)
790 {
791 StgTSO *prev, *tso;
792 prev = END_TSO_QUEUE;
793 for (tso = cap->run_queue_hd; tso != END_TSO_QUEUE;
794 prev = tso, tso = tso->_link) {
795 ASSERT(prev == END_TSO_QUEUE || prev->_link == tso);
796 ASSERT(tso->block_info.prev == prev);
797 }
798 ASSERT(cap->run_queue_tl == prev);
799 }
800
801 /* -----------------------------------------------------------------------------
802 Memory leak detection
803
804 memInventory() checks for memory leaks by counting up all the
805 blocks we know about and comparing that to the number of blocks
806 allegedly floating around in the system.
807 -------------------------------------------------------------------------- */
808
809 // Useful for finding partially full blocks in gdb
810 void findSlop(bdescr *bd);
811 void findSlop(bdescr *bd)
812 {
813 W_ slop;
814
815 for (; bd != NULL; bd = bd->link) {
816 slop = (bd->blocks * BLOCK_SIZE_W) - (bd->free - bd->start);
817 if (slop > (1024/sizeof(W_))) {
818 debugBelch("block at %p (bdescr %p) has %" FMT_SizeT "KB slop\n",
819 bd->start, bd, slop / (1024/sizeof(W_)));
820 }
821 }
822 }
823
824 static W_
825 genBlocks (generation *gen)
826 {
827 ASSERT(countBlocks(gen->blocks) == gen->n_blocks);
828 ASSERT(countBlocks(gen->large_objects) == gen->n_large_blocks);
829 return gen->n_blocks + gen->n_old_blocks +
830 countAllocdBlocks(gen->large_objects);
831 }
832
833 void
834 memInventory (rtsBool show)
835 {
836 nat g, i;
837 W_ gen_blocks[RtsFlags.GcFlags.generations];
838 W_ nursery_blocks, retainer_blocks,
839 arena_blocks, exec_blocks;
840 W_ live_blocks = 0, free_blocks = 0;
841 rtsBool leak;
842
843 // count the blocks we current have
844
845 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
846 gen_blocks[g] = 0;
847 for (i = 0; i < n_capabilities; i++) {
848 gen_blocks[g] += countBlocks(capabilities[i].mut_lists[g]);
849 gen_blocks[g] += countBlocks(gc_threads[i]->gens[g].part_list);
850 gen_blocks[g] += countBlocks(gc_threads[i]->gens[g].scavd_list);
851 gen_blocks[g] += countBlocks(gc_threads[i]->gens[g].todo_bd);
852 }
853 gen_blocks[g] += genBlocks(&generations[g]);
854 }
855
856 nursery_blocks = 0;
857 for (i = 0; i < n_capabilities; i++) {
858 ASSERT(countBlocks(nurseries[i].blocks) == nurseries[i].n_blocks);
859 nursery_blocks += nurseries[i].n_blocks;
860 if (capabilities[i].pinned_object_block != NULL) {
861 nursery_blocks += capabilities[i].pinned_object_block->blocks;
862 }
863 nursery_blocks += countBlocks(capabilities[i].pinned_object_blocks);
864 }
865
866 retainer_blocks = 0;
867 #ifdef PROFILING
868 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER) {
869 retainer_blocks = retainerStackBlocks();
870 }
871 #endif
872
873 // count the blocks allocated by the arena allocator
874 arena_blocks = arenaBlocks();
875
876 // count the blocks containing executable memory
877 exec_blocks = countAllocdBlocks(exec_block);
878
879 /* count the blocks on the free list */
880 free_blocks = countFreeList();
881
882 live_blocks = 0;
883 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
884 live_blocks += gen_blocks[g];
885 }
886 live_blocks += nursery_blocks +
887 + retainer_blocks + arena_blocks + exec_blocks;
888
889 #define MB(n) (((double)(n) * BLOCK_SIZE_W) / ((1024*1024)/sizeof(W_)))
890
891 leak = live_blocks + free_blocks != mblocks_allocated * BLOCKS_PER_MBLOCK;
892
893 if (show || leak)
894 {
895 if (leak) {
896 debugBelch("Memory leak detected:\n");
897 } else {
898 debugBelch("Memory inventory:\n");
899 }
900 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
901 debugBelch(" gen %d blocks : %5" FMT_Word " blocks (%6.1lf MB)\n", g,
902 gen_blocks[g], MB(gen_blocks[g]));
903 }
904 debugBelch(" nursery : %5" FMT_Word " blocks (%6.1lf MB)\n",
905 nursery_blocks, MB(nursery_blocks));
906 debugBelch(" retainer : %5" FMT_Word " blocks (%6.1lf MB)\n",
907 retainer_blocks, MB(retainer_blocks));
908 debugBelch(" arena blocks : %5" FMT_Word " blocks (%6.1lf MB)\n",
909 arena_blocks, MB(arena_blocks));
910 debugBelch(" exec : %5" FMT_Word " blocks (%6.1lf MB)\n",
911 exec_blocks, MB(exec_blocks));
912 debugBelch(" free : %5" FMT_Word " blocks (%6.1lf MB)\n",
913 free_blocks, MB(free_blocks));
914 debugBelch(" total : %5" FMT_Word " blocks (%6.1lf MB)\n",
915 live_blocks + free_blocks, MB(live_blocks+free_blocks));
916 if (leak) {
917 debugBelch("\n in system : %5" FMT_Word " blocks (%" FMT_Word " MB)\n",
918 mblocks_allocated * BLOCKS_PER_MBLOCK, mblocks_allocated);
919 }
920 }
921
922 if (leak) {
923 debugBelch("\n");
924 findMemoryLeak();
925 }
926 ASSERT(n_alloc_blocks == live_blocks);
927 ASSERT(!leak);
928 }
929
930
931 #endif /* DEBUG */