Fix deadlock between STM and throwTo
[ghc.git] / rts / sm / Sanity.c
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 1998-2006
4 *
5 * Sanity checking code for the heap and stack.
6 *
7 * Used when debugging: check that everything reasonable.
8 *
9 * - All things that are supposed to be pointers look like pointers.
10 *
11 * - Objects in text space are marked as static closures, those
12 * in the heap are dynamic.
13 *
14 * ---------------------------------------------------------------------------*/
15
16 #include "PosixSource.h"
17 #include "Rts.h"
18
19 #if defined(DEBUG) /* whole file */
20
21 #include "RtsUtils.h"
22 #include "sm/Storage.h"
23 #include "sm/BlockAlloc.h"
24 #include "GCThread.h"
25 #include "Sanity.h"
26 #include "Schedule.h"
27 #include "Apply.h"
28 #include "Printer.h"
29 #include "Arena.h"
30 #include "RetainerProfile.h"
31 #include "CNF.h"
32
33 /* -----------------------------------------------------------------------------
34 Forward decls.
35 -------------------------------------------------------------------------- */
36
37 static void checkSmallBitmap ( StgPtr payload, StgWord bitmap, uint32_t );
38 static void checkLargeBitmap ( StgPtr payload, StgLargeBitmap*, uint32_t );
39 static void checkClosureShallow ( const StgClosure * );
40 static void checkSTACK (StgStack *stack);
41
42 /* -----------------------------------------------------------------------------
43 Check stack sanity
44 -------------------------------------------------------------------------- */
45
46 static void
47 checkSmallBitmap( StgPtr payload, StgWord bitmap, uint32_t size )
48 {
49 uint32_t i;
50
51 for(i = 0; i < size; i++, bitmap >>= 1 ) {
52 if ((bitmap & 1) == 0) {
53 checkClosureShallow((StgClosure *)payload[i]);
54 }
55 }
56 }
57
58 static void
59 checkLargeBitmap( StgPtr payload, StgLargeBitmap* large_bitmap, uint32_t size )
60 {
61 StgWord bmp;
62 uint32_t i, j;
63
64 i = 0;
65 for (bmp=0; i < size; bmp++) {
66 StgWord bitmap = large_bitmap->bitmap[bmp];
67 j = 0;
68 for(; i < size && j < BITS_IN(W_); j++, i++, bitmap >>= 1 ) {
69 if ((bitmap & 1) == 0) {
70 checkClosureShallow((StgClosure *)payload[i]);
71 }
72 }
73 }
74 }
75
76 /*
77 * check that it looks like a valid closure - without checking its payload
78 * used to avoid recursion between checking PAPs and checking stack
79 * chunks.
80 */
81
82 static void
83 checkClosureShallow( const StgClosure* p )
84 {
85 const StgClosure *q;
86
87 q = UNTAG_CONST_CLOSURE(p);
88 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q));
89 }
90
91 // check an individual stack object
92 StgOffset
93 checkStackFrame( StgPtr c )
94 {
95 uint32_t size;
96 const StgRetInfoTable* info;
97
98 info = get_ret_itbl((StgClosure *)c);
99
100 /* All activation records have 'bitmap' style layout info. */
101 switch (info->i.type) {
102
103 case UPDATE_FRAME:
104 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgUpdateFrame*)c)->updatee));
105 /* fallthrough */
106 case ATOMICALLY_FRAME:
107 case CATCH_RETRY_FRAME:
108 case CATCH_STM_FRAME:
109 case CATCH_FRAME:
110 // small bitmap cases (<= 32 entries)
111 case UNDERFLOW_FRAME:
112 case STOP_FRAME:
113 case RET_SMALL:
114 size = BITMAP_SIZE(info->i.layout.bitmap);
115 checkSmallBitmap((StgPtr)c + 1,
116 BITMAP_BITS(info->i.layout.bitmap), size);
117 return 1 + size;
118
119 case RET_BCO: {
120 StgBCO *bco;
121 uint32_t size;
122 bco = (StgBCO *)*(c+1);
123 size = BCO_BITMAP_SIZE(bco);
124 checkLargeBitmap((StgPtr)c + 2, BCO_BITMAP(bco), size);
125 return 2 + size;
126 }
127
128 case RET_BIG: // large bitmap (> 32 entries)
129 size = GET_LARGE_BITMAP(&info->i)->size;
130 checkLargeBitmap((StgPtr)c + 1, GET_LARGE_BITMAP(&info->i), size);
131 return 1 + size;
132
133 case RET_FUN:
134 {
135 const StgFunInfoTable *fun_info;
136 StgRetFun *ret_fun;
137
138 ret_fun = (StgRetFun *)c;
139 fun_info = get_fun_itbl(UNTAG_CONST_CLOSURE(ret_fun->fun));
140 size = ret_fun->size;
141 switch (fun_info->f.fun_type) {
142 case ARG_GEN:
143 checkSmallBitmap((StgPtr)ret_fun->payload,
144 BITMAP_BITS(fun_info->f.b.bitmap), size);
145 break;
146 case ARG_GEN_BIG:
147 checkLargeBitmap((StgPtr)ret_fun->payload,
148 GET_FUN_LARGE_BITMAP(fun_info), size);
149 break;
150 default:
151 checkSmallBitmap((StgPtr)ret_fun->payload,
152 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
153 size);
154 break;
155 }
156 return sizeofW(StgRetFun) + size;
157 }
158
159 default:
160 barf("checkStackFrame: weird activation record found on stack (%p %d).",c,info->i.type);
161 }
162 }
163
164 // check sections of stack between update frames
165 void
166 checkStackChunk( StgPtr sp, StgPtr stack_end )
167 {
168 StgPtr p;
169
170 p = sp;
171 while (p < stack_end) {
172 p += checkStackFrame( p );
173 }
174 // ASSERT( p == stack_end ); -- HWL
175 }
176
177 static void
178 checkPAP (StgClosure *tagged_fun, StgClosure** payload, StgWord n_args)
179 {
180 const StgClosure *fun;
181 const StgFunInfoTable *fun_info;
182
183 fun = UNTAG_CONST_CLOSURE(tagged_fun);
184 ASSERT(LOOKS_LIKE_CLOSURE_PTR(fun));
185 fun_info = get_fun_itbl(fun);
186
187 switch (fun_info->f.fun_type) {
188 case ARG_GEN:
189 checkSmallBitmap( (StgPtr)payload,
190 BITMAP_BITS(fun_info->f.b.bitmap), n_args );
191 break;
192 case ARG_GEN_BIG:
193 checkLargeBitmap( (StgPtr)payload,
194 GET_FUN_LARGE_BITMAP(fun_info),
195 n_args );
196 break;
197 case ARG_BCO:
198 checkLargeBitmap( (StgPtr)payload,
199 BCO_BITMAP(fun),
200 n_args );
201 break;
202 default:
203 checkSmallBitmap( (StgPtr)payload,
204 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
205 n_args );
206 break;
207 }
208
209 ASSERT(fun_info->f.arity > TAG_MASK ? GET_CLOSURE_TAG(tagged_fun) == 0
210 : GET_CLOSURE_TAG(tagged_fun) == fun_info->f.arity);
211 }
212
213
214 StgOffset
215 checkClosure( const StgClosure* p )
216 {
217 const StgInfoTable *info;
218
219 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
220
221 p = UNTAG_CONST_CLOSURE(p);
222
223 info = p->header.info;
224
225 if (IS_FORWARDING_PTR(info)) {
226 barf("checkClosure: found EVACUATED closure %d", info->type);
227 }
228 info = INFO_PTR_TO_STRUCT(info);
229
230 switch (info->type) {
231
232 case MVAR_CLEAN:
233 case MVAR_DIRTY:
234 {
235 StgMVar *mvar = (StgMVar *)p;
236 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->head));
237 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->tail));
238 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->value));
239 return sizeofW(StgMVar);
240 }
241
242 case THUNK:
243 case THUNK_1_0:
244 case THUNK_0_1:
245 case THUNK_1_1:
246 case THUNK_0_2:
247 case THUNK_2_0:
248 {
249 uint32_t i;
250 for (i = 0; i < info->layout.payload.ptrs; i++) {
251 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgThunk *)p)->payload[i]));
252 }
253 return thunk_sizeW_fromITBL(info);
254 }
255
256 case FUN:
257 case FUN_1_0:
258 case FUN_0_1:
259 case FUN_1_1:
260 case FUN_0_2:
261 case FUN_2_0:
262 case CONSTR:
263 case CONSTR_NOCAF:
264 case CONSTR_1_0:
265 case CONSTR_0_1:
266 case CONSTR_1_1:
267 case CONSTR_0_2:
268 case CONSTR_2_0:
269 case BLACKHOLE:
270 case PRIM:
271 case MUT_PRIM:
272 case MUT_VAR_CLEAN:
273 case MUT_VAR_DIRTY:
274 case TVAR:
275 case THUNK_STATIC:
276 case FUN_STATIC:
277 case COMPACT_NFDATA:
278 {
279 uint32_t i;
280 for (i = 0; i < info->layout.payload.ptrs; i++) {
281 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p->payload[i]));
282 }
283 return sizeW_fromITBL(info);
284 }
285
286 case BLOCKING_QUEUE:
287 {
288 StgBlockingQueue *bq = (StgBlockingQueue *)p;
289
290 // NO: the BH might have been updated now
291 // ASSERT(get_itbl(bq->bh)->type == BLACKHOLE);
292 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bq->bh));
293
294 ASSERT(get_itbl((StgClosure *)(bq->owner))->type == TSO);
295 ASSERT(bq->queue == (MessageBlackHole*)END_TSO_QUEUE
296 || bq->queue->header.info == &stg_MSG_BLACKHOLE_info);
297 ASSERT(bq->link == (StgBlockingQueue*)END_TSO_QUEUE ||
298 get_itbl((StgClosure *)(bq->link))->type == IND ||
299 get_itbl((StgClosure *)(bq->link))->type == BLOCKING_QUEUE);
300
301 return sizeofW(StgBlockingQueue);
302 }
303
304 case BCO: {
305 StgBCO *bco = (StgBCO *)p;
306 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->instrs));
307 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->literals));
308 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->ptrs));
309 return bco_sizeW(bco);
310 }
311
312 case IND_STATIC: /* (1, 0) closure */
313 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgIndStatic*)p)->indirectee));
314 return sizeW_fromITBL(info);
315
316 case WEAK:
317 /* deal with these specially - the info table isn't
318 * representative of the actual layout.
319 */
320 { StgWeak *w = (StgWeak *)p;
321 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->key));
322 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->value));
323 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->finalizer));
324 if (w->link) {
325 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->link));
326 }
327 return sizeW_fromITBL(info);
328 }
329
330 case THUNK_SELECTOR:
331 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgSelector *)p)->selectee));
332 return THUNK_SELECTOR_sizeW();
333
334 case IND:
335 {
336 /* we don't expect to see any of these after GC
337 * but they might appear during execution
338 */
339 StgInd *ind = (StgInd *)p;
340 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ind->indirectee));
341 return sizeofW(StgInd);
342 }
343
344 case RET_BCO:
345 case RET_SMALL:
346 case RET_BIG:
347 case UPDATE_FRAME:
348 case UNDERFLOW_FRAME:
349 case STOP_FRAME:
350 case CATCH_FRAME:
351 case ATOMICALLY_FRAME:
352 case CATCH_RETRY_FRAME:
353 case CATCH_STM_FRAME:
354 barf("checkClosure: stack frame");
355
356 case AP:
357 {
358 StgAP* ap = (StgAP *)p;
359 checkPAP (ap->fun, ap->payload, ap->n_args);
360 return ap_sizeW(ap);
361 }
362
363 case PAP:
364 {
365 StgPAP* pap = (StgPAP *)p;
366 checkPAP (pap->fun, pap->payload, pap->n_args);
367 return pap_sizeW(pap);
368 }
369
370 case AP_STACK:
371 {
372 StgAP_STACK *ap = (StgAP_STACK *)p;
373 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ap->fun));
374 checkStackChunk((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
375 return ap_stack_sizeW(ap);
376 }
377
378 case ARR_WORDS:
379 return arr_words_sizeW((StgArrBytes *)p);
380
381 case MUT_ARR_PTRS_CLEAN:
382 case MUT_ARR_PTRS_DIRTY:
383 case MUT_ARR_PTRS_FROZEN_CLEAN:
384 case MUT_ARR_PTRS_FROZEN_DIRTY:
385 {
386 StgMutArrPtrs* a = (StgMutArrPtrs *)p;
387 uint32_t i;
388 for (i = 0; i < a->ptrs; i++) {
389 ASSERT(LOOKS_LIKE_CLOSURE_PTR(a->payload[i]));
390 }
391 return mut_arr_ptrs_sizeW(a);
392 }
393
394 case SMALL_MUT_ARR_PTRS_CLEAN:
395 case SMALL_MUT_ARR_PTRS_DIRTY:
396 case SMALL_MUT_ARR_PTRS_FROZEN_CLEAN:
397 case SMALL_MUT_ARR_PTRS_FROZEN_DIRTY:
398 {
399 StgSmallMutArrPtrs *a = (StgSmallMutArrPtrs *)p;
400 for (uint32_t i = 0; i < a->ptrs; i++) {
401 ASSERT(LOOKS_LIKE_CLOSURE_PTR(a->payload[i]));
402 }
403 return small_mut_arr_ptrs_sizeW(a);
404 }
405
406 case TSO:
407 checkTSO((StgTSO *)p);
408 return sizeofW(StgTSO);
409
410 case STACK:
411 checkSTACK((StgStack*)p);
412 return stack_sizeW((StgStack*)p);
413
414 case TREC_CHUNK:
415 {
416 uint32_t i;
417 StgTRecChunk *tc = (StgTRecChunk *)p;
418 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->prev_chunk));
419 for (i = 0; i < tc -> next_entry_idx; i ++) {
420 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].tvar));
421 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].expected_value));
422 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].new_value));
423 }
424 return sizeofW(StgTRecChunk);
425 }
426
427 default:
428 barf("checkClosure (closure type %d)", info->type);
429 }
430 }
431
432
433 /* -----------------------------------------------------------------------------
434 Check Heap Sanity
435
436 After garbage collection, the live heap is in a state where we can
437 run through and check that all the pointers point to the right
438 place. This function starts at a given position and sanity-checks
439 all the objects in the remainder of the chain.
440 -------------------------------------------------------------------------- */
441
442 void checkHeapChain (bdescr *bd)
443 {
444 StgPtr p;
445
446 for (; bd != NULL; bd = bd->link) {
447 if(!(bd->flags & BF_SWEPT)) {
448 p = bd->start;
449 while (p < bd->free) {
450 uint32_t size = checkClosure((StgClosure *)p);
451 /* This is the smallest size of closure that can live in the heap */
452 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
453 p += size;
454
455 /* skip over slop */
456 while (p < bd->free &&
457 (*p < 0x1000 || !LOOKS_LIKE_INFO_PTR(*p))) { p++; }
458 }
459 }
460 }
461 }
462
463 void
464 checkHeapChunk(StgPtr start, StgPtr end)
465 {
466 StgPtr p;
467 uint32_t size;
468
469 for (p=start; p<end; p+=size) {
470 ASSERT(LOOKS_LIKE_INFO_PTR(*p));
471 size = checkClosure((StgClosure *)p);
472 /* This is the smallest size of closure that can live in the heap. */
473 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
474 }
475 }
476
477 void
478 checkLargeObjects(bdescr *bd)
479 {
480 while (bd != NULL) {
481 if (!(bd->flags & BF_PINNED)) {
482 checkClosure((StgClosure *)bd->start);
483 }
484 bd = bd->link;
485 }
486 }
487
488 static void
489 checkCompactObjects(bdescr *bd)
490 {
491 // Compact objects are similar to large objects,
492 // but they have a StgCompactNFDataBlock at the beginning,
493 // before the actual closure
494
495 for ( ; bd != NULL; bd = bd->link) {
496 StgCompactNFDataBlock *block, *last;
497 StgCompactNFData *str;
498 StgWord totalW;
499
500 ASSERT(bd->flags & BF_COMPACT);
501
502 block = (StgCompactNFDataBlock*)bd->start;
503 str = block->owner;
504 ASSERT((W_)str == (W_)block + sizeof(StgCompactNFDataBlock));
505
506 totalW = 0;
507 for ( ; block ; block = block->next) {
508 last = block;
509 ASSERT(block->owner == str);
510
511 totalW += Bdescr((P_)block)->blocks * BLOCK_SIZE_W;
512 }
513
514 ASSERT(str->totalW == totalW);
515 ASSERT(str->last == last);
516 }
517 }
518
519 static void
520 checkSTACK (StgStack *stack)
521 {
522 StgPtr sp = stack->sp;
523 StgOffset stack_size = stack->stack_size;
524 StgPtr stack_end = stack->stack + stack_size;
525
526 ASSERT(stack->stack <= sp && sp <= stack_end);
527
528 checkStackChunk(sp, stack_end);
529 }
530
531 void
532 checkTSO(StgTSO *tso)
533 {
534 StgTSO *next;
535 const StgInfoTable *info;
536
537 if (tso->what_next == ThreadKilled) {
538 /* The garbage collector doesn't bother following any pointers
539 * from dead threads, so don't check sanity here.
540 */
541 return;
542 }
543
544 next = tso->_link;
545 info = (const StgInfoTable*) tso->_link->header.info;
546
547 ASSERT(next == END_TSO_QUEUE ||
548 info == &stg_MVAR_TSO_QUEUE_info ||
549 info == &stg_TSO_info ||
550 info == &stg_WHITEHOLE_info); // used to happen due to STM doing
551 // lockTSO(), might not happen now
552
553 if ( tso->why_blocked == BlockedOnMVar
554 || tso->why_blocked == BlockedOnMVarRead
555 || tso->why_blocked == BlockedOnBlackHole
556 || tso->why_blocked == BlockedOnMsgThrowTo
557 || tso->why_blocked == NotBlocked
558 ) {
559 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->block_info.closure));
560 }
561
562 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->bq));
563 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->blocked_exceptions));
564 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->stackobj));
565
566 // XXX are we checking the stack twice?
567 checkSTACK(tso->stackobj);
568 }
569
570 /*
571 Check that all TSOs have been evacuated.
572 Optionally also check the sanity of the TSOs.
573 */
574 void
575 checkGlobalTSOList (bool checkTSOs)
576 {
577 StgTSO *tso;
578 uint32_t g;
579
580 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
581 for (tso=generations[g].threads; tso != END_TSO_QUEUE;
582 tso = tso->global_link) {
583 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso));
584 ASSERT(get_itbl((StgClosure *)tso)->type == TSO);
585 if (checkTSOs)
586 checkTSO(tso);
587
588 // If this TSO is dirty and in an old generation, it better
589 // be on the mutable list.
590 if (tso->dirty) {
591 ASSERT(Bdescr((P_)tso)->gen_no == 0 || (tso->flags & TSO_MARKED));
592 tso->flags &= ~TSO_MARKED;
593 }
594
595 {
596 StgStack *stack;
597 StgUnderflowFrame *frame;
598
599 stack = tso->stackobj;
600 while (1) {
601 if (stack->dirty & 1) {
602 ASSERT(Bdescr((P_)stack)->gen_no == 0 || (stack->dirty & TSO_MARKED));
603 stack->dirty &= ~TSO_MARKED;
604 }
605 frame = (StgUnderflowFrame*) (stack->stack + stack->stack_size
606 - sizeofW(StgUnderflowFrame));
607 if (frame->info != &stg_stack_underflow_frame_info
608 || frame->next_chunk == (StgStack*)END_TSO_QUEUE) break;
609 stack = frame->next_chunk;
610 }
611 }
612 }
613 }
614 }
615
616 /* -----------------------------------------------------------------------------
617 Check mutable list sanity.
618 -------------------------------------------------------------------------- */
619
620 static void
621 checkMutableList( bdescr *mut_bd, uint32_t gen )
622 {
623 bdescr *bd;
624 StgPtr q;
625 StgClosure *p;
626
627 for (bd = mut_bd; bd != NULL; bd = bd->link) {
628 for (q = bd->start; q < bd->free; q++) {
629 p = (StgClosure *)*q;
630 ASSERT(!HEAP_ALLOCED(p) || Bdescr((P_)p)->gen_no == gen);
631 checkClosure(p);
632
633 switch (get_itbl(p)->type) {
634 case TSO:
635 ((StgTSO *)p)->flags |= TSO_MARKED;
636 break;
637 case STACK:
638 ((StgStack *)p)->dirty |= TSO_MARKED;
639 break;
640 }
641 }
642 }
643 }
644
645 static void
646 checkLocalMutableLists (uint32_t cap_no)
647 {
648 uint32_t g;
649 for (g = 1; g < RtsFlags.GcFlags.generations; g++) {
650 checkMutableList(capabilities[cap_no]->mut_lists[g], g);
651 }
652 }
653
654 static void
655 checkMutableLists (void)
656 {
657 uint32_t i;
658 for (i = 0; i < n_capabilities; i++) {
659 checkLocalMutableLists(i);
660 }
661 }
662
663 /*
664 Check the static objects list.
665 */
666 void
667 checkStaticObjects ( StgClosure* static_objects )
668 {
669 StgClosure *p = static_objects;
670 const StgInfoTable *info;
671
672 while (p != END_OF_STATIC_OBJECT_LIST) {
673 p = UNTAG_STATIC_LIST_PTR(p);
674 checkClosure(p);
675 info = get_itbl(p);
676 switch (info->type) {
677 case IND_STATIC:
678 {
679 const StgClosure *indirectee;
680
681 indirectee = UNTAG_CONST_CLOSURE(((StgIndStatic *)p)->indirectee);
682 ASSERT(LOOKS_LIKE_CLOSURE_PTR(indirectee));
683 ASSERT(LOOKS_LIKE_INFO_PTR((StgWord)indirectee->header.info));
684 p = *IND_STATIC_LINK((StgClosure *)p);
685 break;
686 }
687
688 case THUNK_STATIC:
689 p = *THUNK_STATIC_LINK((StgClosure *)p);
690 break;
691
692 case FUN_STATIC:
693 p = *STATIC_LINK(info,(StgClosure *)p);
694 break;
695
696 case CONSTR:
697 case CONSTR_NOCAF:
698 case CONSTR_1_0:
699 case CONSTR_2_0:
700 case CONSTR_1_1:
701 p = *STATIC_LINK(info,(StgClosure *)p);
702 break;
703
704 default:
705 barf("checkStaticObjetcs: strange closure %p (%s)",
706 p, info_type(p));
707 }
708 }
709 }
710
711 /* Nursery sanity check */
712 void
713 checkNurserySanity (nursery *nursery)
714 {
715 bdescr *bd, *prev;
716 uint32_t blocks = 0;
717
718 prev = NULL;
719 for (bd = nursery->blocks; bd != NULL; bd = bd->link) {
720 ASSERT(bd->gen == g0);
721 ASSERT(bd->u.back == prev);
722 prev = bd;
723 blocks += bd->blocks;
724 }
725
726 ASSERT(blocks == nursery->n_blocks);
727 }
728
729 static void checkGeneration (generation *gen,
730 bool after_major_gc USED_IF_THREADS)
731 {
732 uint32_t n;
733 gen_workspace *ws;
734
735 ASSERT(countBlocks(gen->blocks) == gen->n_blocks);
736 ASSERT(countBlocks(gen->large_objects) == gen->n_large_blocks);
737
738 #if defined(THREADED_RTS)
739 // heap sanity checking doesn't work with SMP, because we can't
740 // zero the slop (see Updates.h). However, we can sanity-check
741 // the heap after a major gc, because there is no slop.
742 if (!after_major_gc) return;
743 #endif
744
745 checkHeapChain(gen->blocks);
746
747 for (n = 0; n < n_capabilities; n++) {
748 ws = &gc_threads[n]->gens[gen->no];
749 checkHeapChain(ws->todo_bd);
750 checkHeapChain(ws->part_list);
751 checkHeapChain(ws->scavd_list);
752 }
753
754 checkLargeObjects(gen->large_objects);
755 checkCompactObjects(gen->compact_objects);
756 }
757
758 /* Full heap sanity check. */
759 static void checkFullHeap (bool after_major_gc)
760 {
761 uint32_t g, n;
762
763 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
764 checkGeneration(&generations[g], after_major_gc);
765 }
766 for (n = 0; n < n_capabilities; n++) {
767 checkNurserySanity(&nurseries[n]);
768 }
769 }
770
771 void checkSanity (bool after_gc, bool major_gc)
772 {
773 checkFullHeap(after_gc && major_gc);
774
775 checkFreeListSanity();
776
777 // always check the stacks in threaded mode, because checkHeap()
778 // does nothing in this case.
779 if (after_gc) {
780 checkMutableLists();
781 checkGlobalTSOList(true);
782 }
783 }
784
785 static void
786 markCompactBlocks(bdescr *bd)
787 {
788 for (; bd != NULL; bd = bd->link) {
789 compactMarkKnown(((StgCompactNFDataBlock*)bd->start)->owner);
790 }
791 }
792
793 // If memInventory() calculates that we have a memory leak, this
794 // function will try to find the block(s) that are leaking by marking
795 // all the ones that we know about, and search through memory to find
796 // blocks that are not marked. In the debugger this can help to give
797 // us a clue about what kind of block leaked. In the future we might
798 // annotate blocks with their allocation site to give more helpful
799 // info.
800 static void
801 findMemoryLeak (void)
802 {
803 uint32_t g, i;
804 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
805 for (i = 0; i < n_capabilities; i++) {
806 markBlocks(capabilities[i]->mut_lists[g]);
807 markBlocks(gc_threads[i]->gens[g].part_list);
808 markBlocks(gc_threads[i]->gens[g].scavd_list);
809 markBlocks(gc_threads[i]->gens[g].todo_bd);
810 }
811 markBlocks(generations[g].blocks);
812 markBlocks(generations[g].large_objects);
813 markCompactBlocks(generations[g].compact_objects);
814 }
815
816 for (i = 0; i < n_nurseries; i++) {
817 markBlocks(nurseries[i].blocks);
818 }
819
820 for (i = 0; i < n_capabilities; i++) {
821 markBlocks(gc_threads[i]->free_blocks);
822 markBlocks(capabilities[i]->pinned_object_block);
823 }
824
825 #if defined(PROFILING)
826 // TODO:
827 // if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER) {
828 // markRetainerBlocks();
829 // }
830 #endif
831
832 // count the blocks allocated by the arena allocator
833 // TODO:
834 // markArenaBlocks();
835
836 // count the blocks containing executable memory
837 markBlocks(exec_block);
838
839 reportUnmarkedBlocks();
840 }
841
842 void
843 checkRunQueue(Capability *cap)
844 {
845 StgTSO *prev, *tso;
846 prev = END_TSO_QUEUE;
847 uint32_t n;
848 for (n = 0, tso = cap->run_queue_hd; tso != END_TSO_QUEUE;
849 prev = tso, tso = tso->_link, n++) {
850 ASSERT(prev == END_TSO_QUEUE || prev->_link == tso);
851 ASSERT(tso->block_info.prev == prev);
852 }
853 ASSERT(cap->run_queue_tl == prev);
854 ASSERT(cap->n_run_queue == n);
855 }
856
857 /* -----------------------------------------------------------------------------
858 Memory leak detection
859
860 memInventory() checks for memory leaks by counting up all the
861 blocks we know about and comparing that to the number of blocks
862 allegedly floating around in the system.
863 -------------------------------------------------------------------------- */
864
865 // Useful for finding partially full blocks in gdb
866 void findSlop(bdescr *bd);
867 void findSlop(bdescr *bd)
868 {
869 W_ slop;
870
871 for (; bd != NULL; bd = bd->link) {
872 slop = (bd->blocks * BLOCK_SIZE_W) - (bd->free - bd->start);
873 if (slop > (1024/sizeof(W_))) {
874 debugBelch("block at %p (bdescr %p) has %" FMT_Word "KB slop\n",
875 bd->start, bd, slop / (1024/(W_)sizeof(W_)));
876 }
877 }
878 }
879
880 static W_
881 genBlocks (generation *gen)
882 {
883 ASSERT(countBlocks(gen->blocks) == gen->n_blocks);
884 ASSERT(countBlocks(gen->large_objects) == gen->n_large_blocks);
885 ASSERT(countCompactBlocks(gen->compact_objects) == gen->n_compact_blocks);
886 ASSERT(countCompactBlocks(gen->compact_blocks_in_import) == gen->n_compact_blocks_in_import);
887 return gen->n_blocks + gen->n_old_blocks +
888 countAllocdBlocks(gen->large_objects) +
889 countAllocdCompactBlocks(gen->compact_objects) +
890 countAllocdCompactBlocks(gen->compact_blocks_in_import);
891 }
892
893 void
894 memInventory (bool show)
895 {
896 uint32_t g, i;
897 W_ gen_blocks[RtsFlags.GcFlags.generations];
898 W_ nursery_blocks, retainer_blocks,
899 arena_blocks, exec_blocks, gc_free_blocks = 0;
900 W_ live_blocks = 0, free_blocks = 0;
901 bool leak;
902
903 // count the blocks we current have
904
905 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
906 gen_blocks[g] = 0;
907 for (i = 0; i < n_capabilities; i++) {
908 gen_blocks[g] += countBlocks(capabilities[i]->mut_lists[g]);
909 gen_blocks[g] += countBlocks(gc_threads[i]->gens[g].part_list);
910 gen_blocks[g] += countBlocks(gc_threads[i]->gens[g].scavd_list);
911 gen_blocks[g] += countBlocks(gc_threads[i]->gens[g].todo_bd);
912 }
913 gen_blocks[g] += genBlocks(&generations[g]);
914 }
915
916 nursery_blocks = 0;
917 for (i = 0; i < n_nurseries; i++) {
918 ASSERT(countBlocks(nurseries[i].blocks) == nurseries[i].n_blocks);
919 nursery_blocks += nurseries[i].n_blocks;
920 }
921 for (i = 0; i < n_capabilities; i++) {
922 gc_free_blocks += countBlocks(gc_threads[i]->free_blocks);
923 if (capabilities[i]->pinned_object_block != NULL) {
924 nursery_blocks += capabilities[i]->pinned_object_block->blocks;
925 }
926 nursery_blocks += countBlocks(capabilities[i]->pinned_object_blocks);
927 }
928
929 retainer_blocks = 0;
930 #if defined(PROFILING)
931 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER) {
932 retainer_blocks = retainerStackBlocks();
933 }
934 #endif
935
936 // count the blocks allocated by the arena allocator
937 arena_blocks = arenaBlocks();
938
939 // count the blocks containing executable memory
940 exec_blocks = countAllocdBlocks(exec_block);
941
942 /* count the blocks on the free list */
943 free_blocks = countFreeList();
944
945 live_blocks = 0;
946 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
947 live_blocks += gen_blocks[g];
948 }
949 live_blocks += nursery_blocks +
950 + retainer_blocks + arena_blocks + exec_blocks + gc_free_blocks;
951
952 #define MB(n) (((double)(n) * BLOCK_SIZE_W) / ((1024*1024)/sizeof(W_)))
953
954 leak = live_blocks + free_blocks != mblocks_allocated * BLOCKS_PER_MBLOCK;
955
956 if (show || leak)
957 {
958 if (leak) {
959 debugBelch("Memory leak detected:\n");
960 } else {
961 debugBelch("Memory inventory:\n");
962 }
963 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
964 debugBelch(" gen %d blocks : %5" FMT_Word " blocks (%6.1lf MB)\n", g,
965 gen_blocks[g], MB(gen_blocks[g]));
966 }
967 debugBelch(" nursery : %5" FMT_Word " blocks (%6.1lf MB)\n",
968 nursery_blocks, MB(nursery_blocks));
969 debugBelch(" retainer : %5" FMT_Word " blocks (%6.1lf MB)\n",
970 retainer_blocks, MB(retainer_blocks));
971 debugBelch(" arena blocks : %5" FMT_Word " blocks (%6.1lf MB)\n",
972 arena_blocks, MB(arena_blocks));
973 debugBelch(" exec : %5" FMT_Word " blocks (%6.1lf MB)\n",
974 exec_blocks, MB(exec_blocks));
975 debugBelch(" GC free pool : %5" FMT_Word " blocks (%6.1lf MB)\n",
976 gc_free_blocks, MB(gc_free_blocks));
977 debugBelch(" free : %5" FMT_Word " blocks (%6.1lf MB)\n",
978 free_blocks, MB(free_blocks));
979 debugBelch(" total : %5" FMT_Word " blocks (%6.1lf MB)\n",
980 live_blocks + free_blocks, MB(live_blocks+free_blocks));
981 if (leak) {
982 debugBelch("\n in system : %5" FMT_Word " blocks (%" FMT_Word " MB)\n",
983 (W_)(mblocks_allocated * BLOCKS_PER_MBLOCK), mblocks_allocated);
984 }
985 }
986
987 if (leak) {
988 debugBelch("\n");
989 findMemoryLeak();
990 }
991 ASSERT(n_alloc_blocks == live_blocks);
992 ASSERT(!leak);
993 }
994
995
996 #endif /* DEBUG */