exceptions: Bump submodule back to master
[ghc.git] / rts / sm / Sanity.c
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 1998-2006
4 *
5 * Sanity checking code for the heap and stack.
6 *
7 * Used when debugging: check that everything reasonable.
8 *
9 * - All things that are supposed to be pointers look like pointers.
10 *
11 * - Objects in text space are marked as static closures, those
12 * in the heap are dynamic.
13 *
14 * ---------------------------------------------------------------------------*/
15
16 #include "PosixSource.h"
17 #include "Rts.h"
18
19 #if defined(DEBUG) /* whole file */
20
21 #include "RtsUtils.h"
22 #include "sm/Storage.h"
23 #include "sm/BlockAlloc.h"
24 #include "GCThread.h"
25 #include "Sanity.h"
26 #include "Schedule.h"
27 #include "Apply.h"
28 #include "Printer.h"
29 #include "Arena.h"
30 #include "RetainerProfile.h"
31 #include "CNF.h"
32 #include "sm/NonMoving.h"
33 #include "sm/NonMovingMark.h"
34 #include "Profiling.h" // prof_arena
35
36 /* -----------------------------------------------------------------------------
37 Forward decls.
38 -------------------------------------------------------------------------- */
39
40 static void checkSmallBitmap ( StgPtr payload, StgWord bitmap, uint32_t );
41 static void checkLargeBitmap ( StgPtr payload, StgLargeBitmap*, uint32_t );
42 static void checkClosureShallow ( const StgClosure * );
43 static void checkSTACK (StgStack *stack);
44
45 static W_ countNonMovingSegments ( struct NonmovingSegment *segs );
46 static W_ countNonMovingHeap ( struct NonmovingHeap *heap );
47
48 /* -----------------------------------------------------------------------------
49 Check stack sanity
50 -------------------------------------------------------------------------- */
51
52 static void
53 checkSmallBitmap( StgPtr payload, StgWord bitmap, uint32_t size )
54 {
55 uint32_t i;
56
57 for(i = 0; i < size; i++, bitmap >>= 1 ) {
58 if ((bitmap & 1) == 0) {
59 checkClosureShallow((StgClosure *)payload[i]);
60 }
61 }
62 }
63
64 static void
65 checkLargeBitmap( StgPtr payload, StgLargeBitmap* large_bitmap, uint32_t size )
66 {
67 StgWord bmp;
68 uint32_t i, j;
69
70 i = 0;
71 for (bmp=0; i < size; bmp++) {
72 StgWord bitmap = large_bitmap->bitmap[bmp];
73 j = 0;
74 for(; i < size && j < BITS_IN(W_); j++, i++, bitmap >>= 1 ) {
75 if ((bitmap & 1) == 0) {
76 checkClosureShallow((StgClosure *)payload[i]);
77 }
78 }
79 }
80 }
81
82 /*
83 * check that it looks like a valid closure - without checking its payload
84 * used to avoid recursion between checking PAPs and checking stack
85 * chunks.
86 */
87 static void
88 checkClosureShallow( const StgClosure* p )
89 {
90 ASSERT(LOOKS_LIKE_CLOSURE_PTR(UNTAG_CONST_CLOSURE(p)));
91 }
92
93 // check an individual stack object
94 StgOffset
95 checkStackFrame( StgPtr c )
96 {
97 uint32_t size;
98 const StgRetInfoTable* info;
99
100 info = get_ret_itbl((StgClosure *)c);
101
102 /* All activation records have 'bitmap' style layout info. */
103 switch (info->i.type) {
104
105 case UPDATE_FRAME:
106 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgUpdateFrame*)c)->updatee));
107 FALLTHROUGH;
108 case ATOMICALLY_FRAME:
109 case CATCH_RETRY_FRAME:
110 case CATCH_STM_FRAME:
111 case CATCH_FRAME:
112 // small bitmap cases (<= 32 entries)
113 case UNDERFLOW_FRAME:
114 case STOP_FRAME:
115 case RET_SMALL:
116 size = BITMAP_SIZE(info->i.layout.bitmap);
117 checkSmallBitmap((StgPtr)c + 1,
118 BITMAP_BITS(info->i.layout.bitmap), size);
119 return 1 + size;
120
121 case RET_BCO: {
122 StgBCO *bco;
123 uint32_t size;
124 bco = (StgBCO *)*(c+1);
125 size = BCO_BITMAP_SIZE(bco);
126 checkLargeBitmap((StgPtr)c + 2, BCO_BITMAP(bco), size);
127 return 2 + size;
128 }
129
130 case RET_BIG: // large bitmap (> 32 entries)
131 size = GET_LARGE_BITMAP(&info->i)->size;
132 checkLargeBitmap((StgPtr)c + 1, GET_LARGE_BITMAP(&info->i), size);
133 return 1 + size;
134
135 case RET_FUN:
136 {
137 const StgFunInfoTable *fun_info;
138 StgRetFun *ret_fun;
139
140 ret_fun = (StgRetFun *)c;
141 fun_info = get_fun_itbl(UNTAG_CONST_CLOSURE(ret_fun->fun));
142 size = ret_fun->size;
143 switch (fun_info->f.fun_type) {
144 case ARG_GEN:
145 checkSmallBitmap((StgPtr)ret_fun->payload,
146 BITMAP_BITS(fun_info->f.b.bitmap), size);
147 break;
148 case ARG_GEN_BIG:
149 checkLargeBitmap((StgPtr)ret_fun->payload,
150 GET_FUN_LARGE_BITMAP(fun_info), size);
151 break;
152 default:
153 checkSmallBitmap((StgPtr)ret_fun->payload,
154 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
155 size);
156 break;
157 }
158 return sizeofW(StgRetFun) + size;
159 }
160
161 default:
162 barf("checkStackFrame: weird activation record found on stack (%p %d).",c,info->i.type);
163 }
164 }
165
166 // check sections of stack between update frames
167 void
168 checkStackChunk( StgPtr sp, StgPtr stack_end )
169 {
170 StgPtr p;
171
172 p = sp;
173 while (p < stack_end) {
174 p += checkStackFrame( p );
175 }
176 ASSERT( p == stack_end );
177 }
178
179 static void
180 checkPAP (StgClosure *tagged_fun, StgClosure** payload, StgWord n_args)
181 {
182 const StgClosure *fun;
183 const StgFunInfoTable *fun_info;
184
185 fun = UNTAG_CONST_CLOSURE(tagged_fun);
186 ASSERT(LOOKS_LIKE_CLOSURE_PTR(fun));
187 fun_info = get_fun_itbl(fun);
188
189 switch (fun_info->f.fun_type) {
190 case ARG_GEN:
191 checkSmallBitmap( (StgPtr)payload,
192 BITMAP_BITS(fun_info->f.b.bitmap), n_args );
193 break;
194 case ARG_GEN_BIG:
195 checkLargeBitmap( (StgPtr)payload,
196 GET_FUN_LARGE_BITMAP(fun_info),
197 n_args );
198 break;
199 case ARG_BCO:
200 checkLargeBitmap( (StgPtr)payload,
201 BCO_BITMAP(fun),
202 n_args );
203 break;
204 default:
205 checkSmallBitmap( (StgPtr)payload,
206 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
207 n_args );
208 break;
209 }
210
211 ASSERT(fun_info->f.arity > TAG_MASK ? GET_CLOSURE_TAG(tagged_fun) == 0
212 : GET_CLOSURE_TAG(tagged_fun) == fun_info->f.arity);
213 }
214
215 #if defined(PROFILING)
216 static void
217 checkClosureProfSanity(const StgClosure *p)
218 {
219 StgProfHeader prof_hdr = p->header.prof;
220 CostCentreStack *ccs = prof_hdr.ccs;
221 if (HEAP_ALLOCED_GC((void*)ccs)) {
222 checkPtrInArena((StgPtr)ccs, prof_arena);
223 }
224 }
225 #endif
226
227 // Returns closure size in words
228 StgOffset
229 checkClosure( const StgClosure* p )
230 {
231 const StgInfoTable *info;
232
233 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
234
235 p = UNTAG_CONST_CLOSURE(p);
236
237 info = p->header.info;
238 load_load_barrier();
239
240 if (IS_FORWARDING_PTR(info)) {
241 barf("checkClosure: found EVACUATED closure %d", info->type);
242 }
243
244 #if defined(PROFILING)
245 checkClosureProfSanity(p);
246 #endif
247
248 info = INFO_PTR_TO_STRUCT(info);
249 load_load_barrier();
250
251 switch (info->type) {
252
253 case MVAR_CLEAN:
254 case MVAR_DIRTY:
255 {
256 StgMVar *mvar = (StgMVar *)p;
257 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->head));
258 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->tail));
259 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->value));
260 return sizeofW(StgMVar);
261 }
262
263 case THUNK:
264 case THUNK_1_0:
265 case THUNK_0_1:
266 case THUNK_1_1:
267 case THUNK_0_2:
268 case THUNK_2_0:
269 {
270 uint32_t i;
271 for (i = 0; i < info->layout.payload.ptrs; i++) {
272 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgThunk *)p)->payload[i]));
273 }
274 return thunk_sizeW_fromITBL(info);
275 }
276
277 case FUN:
278 case FUN_1_0:
279 case FUN_0_1:
280 case FUN_1_1:
281 case FUN_0_2:
282 case FUN_2_0:
283 case CONSTR:
284 case CONSTR_NOCAF:
285 case CONSTR_1_0:
286 case CONSTR_0_1:
287 case CONSTR_1_1:
288 case CONSTR_0_2:
289 case CONSTR_2_0:
290 case BLACKHOLE:
291 case PRIM:
292 case MUT_PRIM:
293 case MUT_VAR_CLEAN:
294 case MUT_VAR_DIRTY:
295 case TVAR:
296 case THUNK_STATIC:
297 case FUN_STATIC:
298 case COMPACT_NFDATA:
299 {
300 uint32_t i;
301 for (i = 0; i < info->layout.payload.ptrs; i++) {
302 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p->payload[i]));
303 }
304 return sizeW_fromITBL(info);
305 }
306
307 case BLOCKING_QUEUE:
308 {
309 StgBlockingQueue *bq = (StgBlockingQueue *)p;
310
311 // NO: the BH might have been updated now
312 // ASSERT(get_itbl(bq->bh)->type == BLACKHOLE);
313 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bq->bh));
314
315 ASSERT(get_itbl((StgClosure *)(bq->owner))->type == TSO);
316 ASSERT(// A bq with no other blocked TSOs:
317 bq->queue == (MessageBlackHole*)END_TSO_QUEUE ||
318 // A bq with blocked TSOs in its queue:
319 bq->queue->header.info == &stg_MSG_BLACKHOLE_info ||
320 // A bq with a deleted (in throwToMsg()) MSG_BLACKHOLE:
321 bq->queue->header.info == &stg_IND_info);
322 ASSERT(bq->link == (StgBlockingQueue*)END_TSO_QUEUE ||
323 get_itbl((StgClosure *)(bq->link))->type == IND ||
324 get_itbl((StgClosure *)(bq->link))->type == BLOCKING_QUEUE);
325
326 return sizeofW(StgBlockingQueue);
327 }
328
329 case BCO: {
330 StgBCO *bco = (StgBCO *)p;
331 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->instrs));
332 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->literals));
333 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->ptrs));
334 return bco_sizeW(bco);
335 }
336
337 case IND_STATIC: /* (1, 0) closure */
338 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgIndStatic*)p)->indirectee));
339 return sizeW_fromITBL(info);
340
341 case WEAK:
342 /* deal with these specially - the info table isn't
343 * representative of the actual layout.
344 */
345 { StgWeak *w = (StgWeak *)p;
346 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->key));
347 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->value));
348 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->finalizer));
349 if (w->link) {
350 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->link));
351 }
352 return sizeW_fromITBL(info);
353 }
354
355 case THUNK_SELECTOR:
356 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgSelector *)p)->selectee));
357 return THUNK_SELECTOR_sizeW();
358
359 case IND:
360 {
361 /* we don't expect to see any of these after GC
362 * but they might appear during execution
363 */
364 StgInd *ind = (StgInd *)p;
365 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ind->indirectee));
366 return sizeofW(StgInd);
367 }
368
369 case RET_BCO:
370 case RET_SMALL:
371 case RET_BIG:
372 case UPDATE_FRAME:
373 case UNDERFLOW_FRAME:
374 case STOP_FRAME:
375 case CATCH_FRAME:
376 case ATOMICALLY_FRAME:
377 case CATCH_RETRY_FRAME:
378 case CATCH_STM_FRAME:
379 barf("checkClosure: stack frame");
380
381 case AP:
382 {
383 StgAP* ap = (StgAP *)p;
384 checkPAP (ap->fun, ap->payload, ap->n_args);
385 return ap_sizeW(ap);
386 }
387
388 case PAP:
389 {
390 StgPAP* pap = (StgPAP *)p;
391 checkPAP (pap->fun, pap->payload, pap->n_args);
392 return pap_sizeW(pap);
393 }
394
395 case AP_STACK:
396 {
397 StgAP_STACK *ap = (StgAP_STACK *)p;
398 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ap->fun));
399 checkStackChunk((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
400 return ap_stack_sizeW(ap);
401 }
402
403 case ARR_WORDS:
404 return arr_words_sizeW((StgArrBytes *)p);
405
406 case MUT_ARR_PTRS_CLEAN:
407 case MUT_ARR_PTRS_DIRTY:
408 case MUT_ARR_PTRS_FROZEN_CLEAN:
409 case MUT_ARR_PTRS_FROZEN_DIRTY:
410 {
411 StgMutArrPtrs* a = (StgMutArrPtrs *)p;
412 uint32_t i;
413 for (i = 0; i < a->ptrs; i++) {
414 ASSERT(LOOKS_LIKE_CLOSURE_PTR(a->payload[i]));
415 }
416 return mut_arr_ptrs_sizeW(a);
417 }
418
419 case SMALL_MUT_ARR_PTRS_CLEAN:
420 case SMALL_MUT_ARR_PTRS_DIRTY:
421 case SMALL_MUT_ARR_PTRS_FROZEN_CLEAN:
422 case SMALL_MUT_ARR_PTRS_FROZEN_DIRTY:
423 {
424 StgSmallMutArrPtrs *a = (StgSmallMutArrPtrs *)p;
425 for (uint32_t i = 0; i < a->ptrs; i++) {
426 ASSERT(LOOKS_LIKE_CLOSURE_PTR(a->payload[i]));
427 }
428 return small_mut_arr_ptrs_sizeW(a);
429 }
430
431 case TSO:
432 checkTSO((StgTSO *)p);
433 return sizeofW(StgTSO);
434
435 case STACK:
436 checkSTACK((StgStack*)p);
437 return stack_sizeW((StgStack*)p);
438
439 case TREC_CHUNK:
440 {
441 uint32_t i;
442 StgTRecChunk *tc = (StgTRecChunk *)p;
443 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->prev_chunk));
444 for (i = 0; i < tc -> next_entry_idx; i ++) {
445 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].tvar));
446 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].expected_value));
447 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].new_value));
448 }
449 return sizeofW(StgTRecChunk);
450 }
451
452 default:
453 barf("checkClosure (closure type %d)", info->type);
454 }
455 }
456
457
458 /* -----------------------------------------------------------------------------
459 Check Heap Sanity
460
461 After garbage collection, the live heap is in a state where we can
462 run through and check that all the pointers point to the right
463 place. This function starts at a given position and sanity-checks
464 all the objects in the remainder of the chain.
465 -------------------------------------------------------------------------- */
466
467 void checkHeapChain (bdescr *bd)
468 {
469 for (; bd != NULL; bd = bd->link) {
470 if(!(bd->flags & BF_SWEPT)) {
471 StgPtr p = bd->start;
472 while (p < bd->free) {
473 uint32_t size = checkClosure((StgClosure *)p);
474 /* This is the smallest size of closure that can live in the heap */
475 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
476 p += size;
477
478 /* skip over slop */
479 while (p < bd->free &&
480 (*p < 0x1000 || !LOOKS_LIKE_INFO_PTR(*p))) { p++; }
481 }
482 }
483 }
484 }
485
486 /* -----------------------------------------------------------------------------
487 * Check nonmoving heap sanity
488 *
489 * After a concurrent sweep the nonmoving heap can be checked for validity.
490 * -------------------------------------------------------------------------- */
491
492 static void checkNonmovingSegments (struct NonmovingSegment *seg)
493 {
494 while (seg != NULL) {
495 const nonmoving_block_idx count = nonmovingSegmentBlockCount(seg);
496 for (nonmoving_block_idx i=0; i < count; i++) {
497 if (seg->bitmap[i] == nonmovingMarkEpoch) {
498 StgPtr p = nonmovingSegmentGetBlock(seg, i);
499 checkClosure((StgClosure *) p);
500 } else if (i < nonmovingSegmentInfo(seg)->next_free_snap){
501 seg->bitmap[i] = 0;
502 }
503 }
504 seg = seg->link;
505 }
506 }
507
508 void checkNonmovingHeap (const struct NonmovingHeap *heap)
509 {
510 for (unsigned int i=0; i < NONMOVING_ALLOCA_CNT; i++) {
511 const struct NonmovingAllocator *alloc = heap->allocators[i];
512 checkNonmovingSegments(alloc->filled);
513 checkNonmovingSegments(alloc->active);
514 for (unsigned int cap=0; cap < n_capabilities; cap++) {
515 checkNonmovingSegments(alloc->current[cap]);
516 }
517 }
518 }
519
520
521 void
522 checkHeapChunk(StgPtr start, StgPtr end)
523 {
524 StgPtr p;
525 uint32_t size;
526
527 for (p=start; p<end; p+=size) {
528 ASSERT(LOOKS_LIKE_INFO_PTR(*p));
529 size = checkClosure((StgClosure *)p);
530 /* This is the smallest size of closure that can live in the heap. */
531 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
532 }
533 }
534
535 void
536 checkLargeObjects(bdescr *bd)
537 {
538 while (bd != NULL) {
539 if (!(bd->flags & BF_PINNED)) {
540 checkClosure((StgClosure *)bd->start);
541 }
542 bd = bd->link;
543 }
544 }
545
546 static void
547 checkCompactObjects(bdescr *bd)
548 {
549 // Compact objects are similar to large objects, but they have a
550 // StgCompactNFDataBlock at the beginning, before the actual closure
551
552 for ( ; bd != NULL; bd = bd->link) {
553 ASSERT(bd->flags & BF_COMPACT);
554
555 StgCompactNFDataBlock *block = (StgCompactNFDataBlock*)bd->start;
556 StgCompactNFData *str = block->owner;
557 ASSERT((W_)str == (W_)block + sizeof(StgCompactNFDataBlock));
558
559 StgWord totalW = 0;
560 StgCompactNFDataBlock *last;
561 for ( ; block ; block = block->next) {
562 last = block;
563 ASSERT(block->owner == str);
564
565 totalW += Bdescr((P_)block)->blocks * BLOCK_SIZE_W;
566
567 StgPtr start = Bdescr((P_)block)->start + sizeofW(StgCompactNFDataBlock);
568 StgPtr free;
569 if (Bdescr((P_)block)->start == (P_)str->nursery) {
570 free = str->hp;
571 } else {
572 free = Bdescr((P_)block)->free;
573 }
574 StgPtr p = start;
575 while (p < free) {
576 // We can't use checkClosure() here because in
577 // compactAdd#/compactAddWithSharing# when we see a non-
578 // compactable object (a function, mutable object, or pinned
579 // object) we leave the location for the object in the payload
580 // empty.
581 StgClosure *c = (StgClosure*)p;
582 checkClosureShallow(c);
583 p += closure_sizeW(c);
584 }
585 }
586
587 ASSERT(str->totalW == totalW);
588 ASSERT(str->last == last);
589 }
590 }
591
592 static void
593 checkSTACK (StgStack *stack)
594 {
595 StgPtr sp = stack->sp;
596 StgOffset stack_size = stack->stack_size;
597 StgPtr stack_end = stack->stack + stack_size;
598
599 ASSERT(stack->stack <= sp && sp <= stack_end);
600
601 checkStackChunk(sp, stack_end);
602 }
603
604 void
605 checkTSO(StgTSO *tso)
606 {
607 StgTSO *next;
608 const StgInfoTable *info;
609
610 if (tso->what_next == ThreadKilled) {
611 /* The garbage collector doesn't bother following any pointers
612 * from dead threads, so don't check sanity here.
613 */
614 return;
615 }
616
617 next = tso->_link;
618 info = (const StgInfoTable*) tso->_link->header.info;
619 load_load_barrier();
620
621 ASSERT(next == END_TSO_QUEUE ||
622 info == &stg_MVAR_TSO_QUEUE_info ||
623 info == &stg_TSO_info ||
624 info == &stg_WHITEHOLE_info); // used to happen due to STM doing
625 // lockTSO(), might not happen now
626
627 if ( tso->why_blocked == BlockedOnMVar
628 || tso->why_blocked == BlockedOnMVarRead
629 || tso->why_blocked == BlockedOnBlackHole
630 || tso->why_blocked == BlockedOnMsgThrowTo
631 || tso->why_blocked == NotBlocked
632 ) {
633 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->block_info.closure));
634 }
635
636 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->bq));
637 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->blocked_exceptions));
638 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->stackobj));
639
640 // XXX are we checking the stack twice?
641 checkSTACK(tso->stackobj);
642 }
643
644 /*
645 Check that all TSOs have been evacuated.
646 Optionally also check the sanity of the TSOs.
647 */
648 void
649 checkGlobalTSOList (bool checkTSOs)
650 {
651 StgTSO *tso;
652 uint32_t g;
653
654 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
655 for (tso=generations[g].threads; tso != END_TSO_QUEUE;
656 tso = tso->global_link) {
657 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso));
658 ASSERT(get_itbl((StgClosure *)tso)->type == TSO);
659 if (checkTSOs)
660 checkTSO(tso);
661
662 // If this TSO is dirty and in an old generation, it better
663 // be on the mutable list.
664 if (tso->dirty) {
665 ASSERT(Bdescr((P_)tso)->gen_no == 0 || (tso->flags & TSO_MARKED));
666 tso->flags &= ~TSO_MARKED;
667 }
668
669 {
670 StgStack *stack;
671 StgUnderflowFrame *frame;
672
673 stack = tso->stackobj;
674 while (1) {
675 if (stack->dirty & STACK_DIRTY) {
676 ASSERT(Bdescr((P_)stack)->gen_no == 0 || (stack->dirty & STACK_SANE));
677 stack->dirty &= ~STACK_SANE;
678 }
679 frame = (StgUnderflowFrame*) (stack->stack + stack->stack_size
680 - sizeofW(StgUnderflowFrame));
681 if (frame->info != &stg_stack_underflow_frame_info
682 || frame->next_chunk == (StgStack*)END_TSO_QUEUE) break;
683 stack = frame->next_chunk;
684 }
685 }
686 }
687 }
688 }
689
690 /* -----------------------------------------------------------------------------
691 Check mutable list sanity.
692 -------------------------------------------------------------------------- */
693
694 static void
695 checkMutableList( bdescr *mut_bd, uint32_t gen )
696 {
697 bdescr *bd;
698 StgPtr q;
699 StgClosure *p;
700
701 for (bd = mut_bd; bd != NULL; bd = bd->link) {
702 for (q = bd->start; q < bd->free; q++) {
703 p = (StgClosure *)*q;
704 ASSERT(!HEAP_ALLOCED(p) || Bdescr((P_)p)->gen_no == gen);
705 checkClosure(p);
706
707 switch (get_itbl(p)->type) {
708 case TSO:
709 ((StgTSO *)p)->flags |= TSO_MARKED;
710 break;
711 case STACK:
712 ((StgStack *)p)->dirty |= STACK_SANE;
713 break;
714 }
715 }
716 }
717 }
718
719 static void
720 checkLocalMutableLists (uint32_t cap_no)
721 {
722 uint32_t g;
723 for (g = 1; g < RtsFlags.GcFlags.generations; g++) {
724 checkMutableList(capabilities[cap_no]->mut_lists[g], g);
725 }
726 }
727
728 static void
729 checkMutableLists (void)
730 {
731 uint32_t i;
732 for (i = 0; i < n_capabilities; i++) {
733 checkLocalMutableLists(i);
734 }
735 }
736
737 /*
738 Check the static objects list.
739 */
740 void
741 checkStaticObjects ( StgClosure* static_objects )
742 {
743 StgClosure *p = static_objects;
744 const StgInfoTable *info;
745
746 while (p != END_OF_STATIC_OBJECT_LIST) {
747 p = UNTAG_STATIC_LIST_PTR(p);
748 checkClosure(p);
749 info = get_itbl(p);
750 switch (info->type) {
751 case IND_STATIC:
752 {
753 const StgClosure *indirectee;
754
755 indirectee = UNTAG_CONST_CLOSURE(((StgIndStatic *)p)->indirectee);
756 ASSERT(LOOKS_LIKE_CLOSURE_PTR(indirectee));
757 ASSERT(LOOKS_LIKE_INFO_PTR((StgWord)indirectee->header.info));
758 p = *IND_STATIC_LINK((StgClosure *)p);
759 break;
760 }
761
762 case THUNK_STATIC:
763 p = *THUNK_STATIC_LINK((StgClosure *)p);
764 break;
765
766 case FUN_STATIC:
767 p = *STATIC_LINK(info,(StgClosure *)p);
768 break;
769
770 case CONSTR:
771 case CONSTR_NOCAF:
772 case CONSTR_1_0:
773 case CONSTR_2_0:
774 case CONSTR_1_1:
775 p = *STATIC_LINK(info,(StgClosure *)p);
776 break;
777
778 default:
779 barf("checkStaticObjetcs: strange closure %p (%s)",
780 p, info_type(p));
781 }
782 }
783 }
784
785 /* Nursery sanity check */
786 void
787 checkNurserySanity (nursery *nursery)
788 {
789 bdescr *bd, *prev;
790 uint32_t blocks = 0;
791
792 prev = NULL;
793 for (bd = nursery->blocks; bd != NULL; bd = bd->link) {
794 ASSERT(bd->gen == g0);
795 ASSERT(bd->u.back == prev);
796 prev = bd;
797 blocks += bd->blocks;
798 }
799
800 ASSERT(blocks == nursery->n_blocks);
801 }
802
803 static void checkGeneration (generation *gen,
804 bool after_major_gc USED_IF_THREADS)
805 {
806 uint32_t n;
807 gen_workspace *ws;
808
809 //ASSERT(countBlocks(gen->blocks) == gen->n_blocks);
810 ASSERT(countBlocks(gen->large_objects) == gen->n_large_blocks);
811
812 #if defined(THREADED_RTS)
813 // heap sanity checking doesn't work with SMP for two reasons:
814 // * we can't zero the slop (see Updates.h). However, we can sanity-check
815 // the heap after a major gc, because there is no slop.
816 //
817 // * the nonmoving collector may be mutating its large object lists, unless we
818 // were in fact called by the nonmoving collector.
819 if (!after_major_gc) return;
820 #endif
821
822 if (RtsFlags.GcFlags.useNonmoving && gen == oldest_gen) {
823 ASSERT(countNonMovingSegments(nonmovingHeap.free) == (W_) nonmovingHeap.n_free * NONMOVING_SEGMENT_BLOCKS);
824 ASSERT(countBlocks(nonmoving_large_objects) == n_nonmoving_large_blocks);
825 ASSERT(countBlocks(nonmoving_marked_large_objects) == n_nonmoving_marked_large_blocks);
826
827 // Compact regions
828 // Accounting here is tricky due to the fact that the CNF allocation
829 // code modifies generation->n_compact_blocks directly. However, most
830 // objects being swept by the nonmoving GC are tracked in
831 // nonmoving_*_compact_objects. Consequently we can only maintain a very loose
832 // sanity invariant here.
833 uint32_t counted_cnf_blocks = 0;
834 counted_cnf_blocks += countCompactBlocks(nonmoving_marked_compact_objects);
835 counted_cnf_blocks += countCompactBlocks(nonmoving_compact_objects);
836 counted_cnf_blocks += countCompactBlocks(oldest_gen->compact_objects);
837
838 uint32_t total_cnf_blocks = 0;
839 total_cnf_blocks += n_nonmoving_compact_blocks + oldest_gen->n_compact_blocks;
840 total_cnf_blocks += n_nonmoving_marked_compact_blocks;
841
842 ASSERT(counted_cnf_blocks == total_cnf_blocks);
843 }
844
845 checkHeapChain(gen->blocks);
846
847 for (n = 0; n < n_capabilities; n++) {
848 ws = &gc_threads[n]->gens[gen->no];
849 checkHeapChain(ws->todo_bd);
850 checkHeapChain(ws->part_list);
851 checkHeapChain(ws->scavd_list);
852 }
853
854 checkLargeObjects(gen->large_objects);
855 checkCompactObjects(gen->compact_objects);
856 }
857
858 /* Full heap sanity check. */
859 static void checkFullHeap (bool after_major_gc)
860 {
861 uint32_t g, n;
862
863 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
864 checkGeneration(&generations[g], after_major_gc);
865 }
866 for (n = 0; n < n_capabilities; n++) {
867 checkNurserySanity(&nurseries[n]);
868 }
869 }
870
871 void checkSanity (bool after_gc, bool major_gc)
872 {
873 checkFullHeap(after_gc && major_gc);
874
875 checkFreeListSanity();
876
877 // always check the stacks in threaded mode, because checkHeap()
878 // does nothing in this case.
879 if (after_gc) {
880 checkMutableLists();
881 checkGlobalTSOList(true);
882 }
883 }
884
885 static void
886 markCompactBlocks(bdescr *bd)
887 {
888 for (; bd != NULL; bd = bd->link) {
889 compactMarkKnown(((StgCompactNFDataBlock*)bd->start)->owner);
890 }
891 }
892
893 static void
894 markNonMovingSegments(struct NonmovingSegment *seg)
895 {
896 while (seg) {
897 markBlocks(Bdescr((P_)seg));
898 seg = seg->link;
899 }
900 }
901
902 // If memInventory() calculates that we have a memory leak, this
903 // function will try to find the block(s) that are leaking by marking
904 // all the ones that we know about, and search through memory to find
905 // blocks that are not marked. In the debugger this can help to give
906 // us a clue about what kind of block leaked. In the future we might
907 // annotate blocks with their allocation site to give more helpful
908 // info.
909 static void
910 findMemoryLeak (void)
911 {
912 uint32_t g, i, j;
913 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
914 for (i = 0; i < n_capabilities; i++) {
915 markBlocks(capabilities[i]->mut_lists[g]);
916 markBlocks(gc_threads[i]->gens[g].part_list);
917 markBlocks(gc_threads[i]->gens[g].scavd_list);
918 markBlocks(gc_threads[i]->gens[g].todo_bd);
919 }
920 markBlocks(generations[g].blocks);
921 markBlocks(generations[g].large_objects);
922 markCompactBlocks(generations[g].compact_objects);
923 }
924
925 for (i = 0; i < n_nurseries; i++) {
926 markBlocks(nurseries[i].blocks);
927 }
928
929 for (i = 0; i < n_capabilities; i++) {
930 markBlocks(gc_threads[i]->free_blocks);
931 markBlocks(capabilities[i]->pinned_object_block);
932 markBlocks(capabilities[i]->upd_rem_set.queue.blocks);
933 }
934
935 if (RtsFlags.GcFlags.useNonmoving) {
936 markBlocks(upd_rem_set_block_list);
937 markBlocks(nonmoving_large_objects);
938 markBlocks(nonmoving_marked_large_objects);
939 markBlocks(nonmoving_compact_objects);
940 markBlocks(nonmoving_marked_compact_objects);
941 for (i = 0; i < NONMOVING_ALLOCA_CNT; i++) {
942 struct NonmovingAllocator *alloc = nonmovingHeap.allocators[i];
943 markNonMovingSegments(alloc->filled);
944 markNonMovingSegments(alloc->active);
945 for (j = 0; j < n_capabilities; j++) {
946 markNonMovingSegments(alloc->current[j]);
947 }
948 }
949 markNonMovingSegments(nonmovingHeap.sweep_list);
950 markNonMovingSegments(nonmovingHeap.free);
951 if (current_mark_queue)
952 markBlocks(current_mark_queue->blocks);
953 }
954
955 #if defined(PROFILING)
956 // TODO:
957 // if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER) {
958 // markRetainerBlocks();
959 // }
960 #endif
961
962 // count the blocks allocated by the arena allocator
963 // TODO:
964 // markArenaBlocks();
965
966 // count the blocks containing executable memory
967 markBlocks(exec_block);
968
969 reportUnmarkedBlocks();
970 }
971
972 void
973 checkRunQueue(Capability *cap)
974 {
975 StgTSO *prev, *tso;
976 prev = END_TSO_QUEUE;
977 uint32_t n;
978 for (n = 0, tso = cap->run_queue_hd; tso != END_TSO_QUEUE;
979 prev = tso, tso = tso->_link, n++) {
980 ASSERT(prev == END_TSO_QUEUE || prev->_link == tso);
981 ASSERT(tso->block_info.prev == prev);
982 }
983 ASSERT(cap->run_queue_tl == prev);
984 ASSERT(cap->n_run_queue == n);
985 }
986
987 /* -----------------------------------------------------------------------------
988 Memory leak detection
989
990 memInventory() checks for memory leaks by counting up all the
991 blocks we know about and comparing that to the number of blocks
992 allegedly floating around in the system.
993 -------------------------------------------------------------------------- */
994
995 // Useful for finding partially full blocks in gdb
996 void findSlop(bdescr *bd);
997 void findSlop(bdescr *bd)
998 {
999 W_ slop;
1000
1001 for (; bd != NULL; bd = bd->link) {
1002 slop = (bd->blocks * BLOCK_SIZE_W) - (bd->free - bd->start);
1003 if (slop > (1024/sizeof(W_))) {
1004 debugBelch("block at %p (bdescr %p) has %" FMT_Word "KB slop\n",
1005 bd->start, bd, slop / (1024/(W_)sizeof(W_)));
1006 }
1007 }
1008 }
1009
1010 static W_
1011 genBlocks (generation *gen)
1012 {
1013 W_ ret = 0;
1014 if (RtsFlags.GcFlags.useNonmoving && gen == oldest_gen) {
1015 // See Note [Live data accounting in nonmoving collector].
1016 ASSERT(countNonMovingHeap(&nonmovingHeap) == gen->n_blocks);
1017 ret += countAllocdBlocks(nonmoving_large_objects);
1018 ret += countAllocdBlocks(nonmoving_marked_large_objects);
1019 ret += countAllocdCompactBlocks(nonmoving_compact_objects);
1020 ret += countAllocdCompactBlocks(nonmoving_marked_compact_objects);
1021 ret += countNonMovingHeap(&nonmovingHeap);
1022 if (current_mark_queue)
1023 ret += countBlocks(current_mark_queue->blocks);
1024 } else {
1025 ASSERT(countBlocks(gen->blocks) == gen->n_blocks);
1026 ASSERT(countCompactBlocks(gen->compact_objects) == gen->n_compact_blocks);
1027 ASSERT(countCompactBlocks(gen->compact_blocks_in_import) == gen->n_compact_blocks_in_import);
1028 ret += gen->n_blocks;
1029 }
1030
1031 ASSERT(countBlocks(gen->large_objects) == gen->n_large_blocks);
1032
1033 ret += gen->n_old_blocks +
1034 countAllocdBlocks(gen->large_objects) +
1035 countAllocdCompactBlocks(gen->compact_objects) +
1036 countAllocdCompactBlocks(gen->compact_blocks_in_import);
1037 return ret;
1038 }
1039
1040 static W_
1041 countNonMovingSegments(struct NonmovingSegment *segs)
1042 {
1043 W_ ret = 0;
1044 while (segs) {
1045 ret += countBlocks(Bdescr((P_)segs));
1046 segs = segs->link;
1047 }
1048 return ret;
1049 }
1050
1051 static W_
1052 countNonMovingAllocator(struct NonmovingAllocator *alloc)
1053 {
1054 W_ ret = countNonMovingSegments(alloc->filled)
1055 + countNonMovingSegments(alloc->active);
1056 for (uint32_t i = 0; i < n_capabilities; ++i) {
1057 ret += countNonMovingSegments(alloc->current[i]);
1058 }
1059 return ret;
1060 }
1061
1062 static W_
1063 countNonMovingHeap(struct NonmovingHeap *heap)
1064 {
1065 W_ ret = 0;
1066 for (int alloc_idx = 0; alloc_idx < NONMOVING_ALLOCA_CNT; alloc_idx++) {
1067 ret += countNonMovingAllocator(heap->allocators[alloc_idx]);
1068 }
1069 ret += countNonMovingSegments(heap->sweep_list);
1070 ret += countNonMovingSegments(heap->free);
1071 return ret;
1072 }
1073
1074 void
1075 memInventory (bool show)
1076 {
1077 uint32_t g, i;
1078 W_ gen_blocks[RtsFlags.GcFlags.generations];
1079 W_ nursery_blocks = 0, retainer_blocks = 0,
1080 arena_blocks = 0, exec_blocks = 0, gc_free_blocks = 0,
1081 upd_rem_set_blocks = 0;
1082 W_ live_blocks = 0, free_blocks = 0;
1083 bool leak;
1084
1085 #if defined(THREADED_RTS)
1086 // Can't easily do a memory inventory: We might race with the nonmoving
1087 // collector. In principle we could try to take nonmoving_collection_mutex
1088 // and do an inventory if we have it but we don't currently implement this.
1089 if (RtsFlags.GcFlags.useNonmoving)
1090 return;
1091 #endif
1092
1093 // count the blocks we current have
1094
1095 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
1096 gen_blocks[g] = 0;
1097 for (i = 0; i < n_capabilities; i++) {
1098 gen_blocks[g] += countBlocks(capabilities[i]->mut_lists[g]);
1099 gen_blocks[g] += countBlocks(gc_threads[i]->gens[g].part_list);
1100 gen_blocks[g] += countBlocks(gc_threads[i]->gens[g].scavd_list);
1101 gen_blocks[g] += countBlocks(gc_threads[i]->gens[g].todo_bd);
1102 }
1103 gen_blocks[g] += genBlocks(&generations[g]);
1104 }
1105
1106 for (i = 0; i < n_nurseries; i++) {
1107 ASSERT(countBlocks(nurseries[i].blocks) == nurseries[i].n_blocks);
1108 nursery_blocks += nurseries[i].n_blocks;
1109 }
1110 for (i = 0; i < n_capabilities; i++) {
1111 W_ n = countBlocks(gc_threads[i]->free_blocks);
1112 gc_free_blocks += n;
1113 if (capabilities[i]->pinned_object_block != NULL) {
1114 nursery_blocks += capabilities[i]->pinned_object_block->blocks;
1115 }
1116 nursery_blocks += countBlocks(capabilities[i]->pinned_object_blocks);
1117 }
1118
1119 #if defined(PROFILING)
1120 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER) {
1121 retainer_blocks = retainerStackBlocks();
1122 }
1123 #endif
1124
1125 // count the blocks allocated by the arena allocator
1126 arena_blocks = arenaBlocks();
1127
1128 // count the blocks containing executable memory
1129 exec_blocks = countAllocdBlocks(exec_block);
1130
1131 /* count the blocks on the free list */
1132 free_blocks = countFreeList();
1133
1134 // count UpdRemSet blocks
1135 for (i = 0; i < n_capabilities; ++i) {
1136 upd_rem_set_blocks += countBlocks(capabilities[i]->upd_rem_set.queue.blocks);
1137 }
1138 upd_rem_set_blocks += countBlocks(upd_rem_set_block_list);
1139
1140 live_blocks = 0;
1141 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
1142 live_blocks += gen_blocks[g];
1143 }
1144 live_blocks += nursery_blocks +
1145 + retainer_blocks + arena_blocks + exec_blocks + gc_free_blocks
1146 + upd_rem_set_blocks;
1147
1148 #define MB(n) (((double)(n) * BLOCK_SIZE_W) / ((1024*1024)/sizeof(W_)))
1149
1150 leak = live_blocks + free_blocks != mblocks_allocated * BLOCKS_PER_MBLOCK;
1151
1152 if (show || leak)
1153 {
1154 if (leak) {
1155 debugBelch("Memory leak detected:\n");
1156 } else {
1157 debugBelch("Memory inventory:\n");
1158 }
1159 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
1160 debugBelch(" gen %d blocks : %5" FMT_Word " blocks (%6.1lf MB)\n", g,
1161 gen_blocks[g], MB(gen_blocks[g]));
1162 }
1163 debugBelch(" nursery : %5" FMT_Word " blocks (%6.1lf MB)\n",
1164 nursery_blocks, MB(nursery_blocks));
1165 debugBelch(" retainer : %5" FMT_Word " blocks (%6.1lf MB)\n",
1166 retainer_blocks, MB(retainer_blocks));
1167 debugBelch(" arena blocks : %5" FMT_Word " blocks (%6.1lf MB)\n",
1168 arena_blocks, MB(arena_blocks));
1169 debugBelch(" exec : %5" FMT_Word " blocks (%6.1lf MB)\n",
1170 exec_blocks, MB(exec_blocks));
1171 debugBelch(" GC free pool : %5" FMT_Word " blocks (%6.1lf MB)\n",
1172 gc_free_blocks, MB(gc_free_blocks));
1173 debugBelch(" free : %5" FMT_Word " blocks (%6.1lf MB)\n",
1174 free_blocks, MB(free_blocks));
1175 debugBelch(" UpdRemSet : %5" FMT_Word " blocks (%6.1lf MB)\n",
1176 upd_rem_set_blocks, MB(upd_rem_set_blocks));
1177 debugBelch(" total : %5" FMT_Word " blocks (%6.1lf MB)\n",
1178 live_blocks + free_blocks, MB(live_blocks+free_blocks));
1179 if (leak) {
1180 debugBelch("\n in system : %5" FMT_Word " blocks (%" FMT_Word " MB)\n",
1181 (W_)(mblocks_allocated * BLOCKS_PER_MBLOCK), mblocks_allocated);
1182 }
1183 }
1184
1185 if (leak) {
1186 debugBelch("\n");
1187 findMemoryLeak();
1188 }
1189 ASSERT(n_alloc_blocks == live_blocks);
1190 ASSERT(!leak);
1191 }
1192
1193
1194 #endif /* DEBUG */