Move a thread to the front of the run queue when another thread blocks on it
[ghc.git] / rts / sm / Sanity.c
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 1998-2006
4 *
5 * Sanity checking code for the heap and stack.
6 *
7 * Used when debugging: check that everything reasonable.
8 *
9 * - All things that are supposed to be pointers look like pointers.
10 *
11 * - Objects in text space are marked as static closures, those
12 * in the heap are dynamic.
13 *
14 * ---------------------------------------------------------------------------*/
15
16 #include "PosixSource.h"
17 #include "Rts.h"
18
19 #ifdef DEBUG /* whole file */
20
21 #include "RtsUtils.h"
22 #include "sm/Storage.h"
23 #include "sm/BlockAlloc.h"
24 #include "Sanity.h"
25 #include "Schedule.h"
26 #include "Apply.h"
27 #include "Printer.h"
28 #include "Arena.h"
29
30 /* -----------------------------------------------------------------------------
31 Forward decls.
32 -------------------------------------------------------------------------- */
33
34 static void checkSmallBitmap ( StgPtr payload, StgWord bitmap, nat );
35 static void checkLargeBitmap ( StgPtr payload, StgLargeBitmap*, nat );
36 static void checkClosureShallow ( StgClosure * );
37
38 /* -----------------------------------------------------------------------------
39 Check stack sanity
40 -------------------------------------------------------------------------- */
41
42 static void
43 checkSmallBitmap( StgPtr payload, StgWord bitmap, nat size )
44 {
45 StgPtr p;
46 nat i;
47
48 p = payload;
49 for(i = 0; i < size; i++, bitmap >>= 1 ) {
50 if ((bitmap & 1) == 0) {
51 checkClosureShallow((StgClosure *)payload[i]);
52 }
53 }
54 }
55
56 static void
57 checkLargeBitmap( StgPtr payload, StgLargeBitmap* large_bitmap, nat size )
58 {
59 StgWord bmp;
60 nat i, j;
61
62 i = 0;
63 for (bmp=0; i < size; bmp++) {
64 StgWord bitmap = large_bitmap->bitmap[bmp];
65 j = 0;
66 for(; i < size && j < BITS_IN(W_); j++, i++, bitmap >>= 1 ) {
67 if ((bitmap & 1) == 0) {
68 checkClosureShallow((StgClosure *)payload[i]);
69 }
70 }
71 }
72 }
73
74 /*
75 * check that it looks like a valid closure - without checking its payload
76 * used to avoid recursion between checking PAPs and checking stack
77 * chunks.
78 */
79
80 static void
81 checkClosureShallow( StgClosure* p )
82 {
83 StgClosure *q;
84
85 q = UNTAG_CLOSURE(p);
86 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q));
87
88 /* Is it a static closure? */
89 if (!HEAP_ALLOCED(q)) {
90 ASSERT(closure_STATIC(q));
91 } else {
92 ASSERT(!closure_STATIC(q));
93 }
94 }
95
96 // check an individual stack object
97 StgOffset
98 checkStackFrame( StgPtr c )
99 {
100 nat size;
101 const StgRetInfoTable* info;
102
103 info = get_ret_itbl((StgClosure *)c);
104
105 /* All activation records have 'bitmap' style layout info. */
106 switch (info->i.type) {
107 case RET_DYN: /* Dynamic bitmap: the mask is stored on the stack */
108 {
109 StgWord dyn;
110 StgPtr p;
111 StgRetDyn* r;
112
113 r = (StgRetDyn *)c;
114 dyn = r->liveness;
115
116 p = (P_)(r->payload);
117 checkSmallBitmap(p,RET_DYN_LIVENESS(r->liveness),RET_DYN_BITMAP_SIZE);
118 p += RET_DYN_BITMAP_SIZE + RET_DYN_NONPTR_REGS_SIZE;
119
120 // skip over the non-pointers
121 p += RET_DYN_NONPTRS(dyn);
122
123 // follow the ptr words
124 for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
125 checkClosureShallow((StgClosure *)*p);
126 p++;
127 }
128
129 return sizeofW(StgRetDyn) + RET_DYN_BITMAP_SIZE +
130 RET_DYN_NONPTR_REGS_SIZE +
131 RET_DYN_NONPTRS(dyn) + RET_DYN_PTRS(dyn);
132 }
133
134 case UPDATE_FRAME:
135 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgUpdateFrame*)c)->updatee));
136 case ATOMICALLY_FRAME:
137 case CATCH_RETRY_FRAME:
138 case CATCH_STM_FRAME:
139 case CATCH_FRAME:
140 // small bitmap cases (<= 32 entries)
141 case STOP_FRAME:
142 case RET_SMALL:
143 size = BITMAP_SIZE(info->i.layout.bitmap);
144 checkSmallBitmap((StgPtr)c + 1,
145 BITMAP_BITS(info->i.layout.bitmap), size);
146 return 1 + size;
147
148 case RET_BCO: {
149 StgBCO *bco;
150 nat size;
151 bco = (StgBCO *)*(c+1);
152 size = BCO_BITMAP_SIZE(bco);
153 checkLargeBitmap((StgPtr)c + 2, BCO_BITMAP(bco), size);
154 return 2 + size;
155 }
156
157 case RET_BIG: // large bitmap (> 32 entries)
158 size = GET_LARGE_BITMAP(&info->i)->size;
159 checkLargeBitmap((StgPtr)c + 1, GET_LARGE_BITMAP(&info->i), size);
160 return 1 + size;
161
162 case RET_FUN:
163 {
164 StgFunInfoTable *fun_info;
165 StgRetFun *ret_fun;
166
167 ret_fun = (StgRetFun *)c;
168 fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun));
169 size = ret_fun->size;
170 switch (fun_info->f.fun_type) {
171 case ARG_GEN:
172 checkSmallBitmap((StgPtr)ret_fun->payload,
173 BITMAP_BITS(fun_info->f.b.bitmap), size);
174 break;
175 case ARG_GEN_BIG:
176 checkLargeBitmap((StgPtr)ret_fun->payload,
177 GET_FUN_LARGE_BITMAP(fun_info), size);
178 break;
179 default:
180 checkSmallBitmap((StgPtr)ret_fun->payload,
181 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
182 size);
183 break;
184 }
185 return sizeofW(StgRetFun) + size;
186 }
187
188 default:
189 barf("checkStackFrame: weird activation record found on stack (%p %d).",c,info->i.type);
190 }
191 }
192
193 // check sections of stack between update frames
194 void
195 checkStackChunk( StgPtr sp, StgPtr stack_end )
196 {
197 StgPtr p;
198
199 p = sp;
200 while (p < stack_end) {
201 p += checkStackFrame( p );
202 }
203 // ASSERT( p == stack_end ); -- HWL
204 }
205
206 static void
207 checkPAP (StgClosure *tagged_fun, StgClosure** payload, StgWord n_args)
208 {
209 StgClosure *fun;
210 StgClosure *p;
211 StgFunInfoTable *fun_info;
212
213 fun = UNTAG_CLOSURE(tagged_fun);
214 ASSERT(LOOKS_LIKE_CLOSURE_PTR(fun));
215 fun_info = get_fun_itbl(fun);
216
217 p = (StgClosure *)payload;
218 switch (fun_info->f.fun_type) {
219 case ARG_GEN:
220 checkSmallBitmap( (StgPtr)payload,
221 BITMAP_BITS(fun_info->f.b.bitmap), n_args );
222 break;
223 case ARG_GEN_BIG:
224 checkLargeBitmap( (StgPtr)payload,
225 GET_FUN_LARGE_BITMAP(fun_info),
226 n_args );
227 break;
228 case ARG_BCO:
229 checkLargeBitmap( (StgPtr)payload,
230 BCO_BITMAP(fun),
231 n_args );
232 break;
233 default:
234 checkSmallBitmap( (StgPtr)payload,
235 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
236 n_args );
237 break;
238 }
239
240 ASSERT(fun_info->f.arity > TAG_MASK ? GET_CLOSURE_TAG(tagged_fun) == 0
241 : GET_CLOSURE_TAG(tagged_fun) == fun_info->f.arity);
242 }
243
244
245 StgOffset
246 checkClosure( StgClosure* p )
247 {
248 const StgInfoTable *info;
249
250 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
251
252 p = UNTAG_CLOSURE(p);
253 /* Is it a static closure (i.e. in the data segment)? */
254 if (!HEAP_ALLOCED(p)) {
255 ASSERT(closure_STATIC(p));
256 } else {
257 ASSERT(!closure_STATIC(p));
258 }
259
260 info = p->header.info;
261
262 if (IS_FORWARDING_PTR(info)) {
263 barf("checkClosure: found EVACUATED closure %d", info->type);
264 }
265 info = INFO_PTR_TO_STRUCT(info);
266
267 switch (info->type) {
268
269 case MVAR_CLEAN:
270 case MVAR_DIRTY:
271 {
272 StgMVar *mvar = (StgMVar *)p;
273 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->head));
274 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->tail));
275 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->value));
276 return sizeofW(StgMVar);
277 }
278
279 case THUNK:
280 case THUNK_1_0:
281 case THUNK_0_1:
282 case THUNK_1_1:
283 case THUNK_0_2:
284 case THUNK_2_0:
285 {
286 nat i;
287 for (i = 0; i < info->layout.payload.ptrs; i++) {
288 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgThunk *)p)->payload[i]));
289 }
290 return thunk_sizeW_fromITBL(info);
291 }
292
293 case FUN:
294 case FUN_1_0:
295 case FUN_0_1:
296 case FUN_1_1:
297 case FUN_0_2:
298 case FUN_2_0:
299 case CONSTR:
300 case CONSTR_1_0:
301 case CONSTR_0_1:
302 case CONSTR_1_1:
303 case CONSTR_0_2:
304 case CONSTR_2_0:
305 case IND_PERM:
306 case IND_OLDGEN:
307 case IND_OLDGEN_PERM:
308 case BLACKHOLE:
309 case PRIM:
310 case MUT_PRIM:
311 case MUT_VAR_CLEAN:
312 case MUT_VAR_DIRTY:
313 case CONSTR_STATIC:
314 case CONSTR_NOCAF_STATIC:
315 case THUNK_STATIC:
316 case FUN_STATIC:
317 {
318 nat i;
319 for (i = 0; i < info->layout.payload.ptrs; i++) {
320 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p->payload[i]));
321 }
322 return sizeW_fromITBL(info);
323 }
324
325 case BLOCKING_QUEUE:
326 {
327 StgBlockingQueue *bq = (StgBlockingQueue *)p;
328
329 // NO: the BH might have been updated now
330 // ASSERT(get_itbl(bq->bh)->type == BLACKHOLE);
331 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bq->bh));
332
333 ASSERT(get_itbl(bq->owner)->type == TSO);
334 ASSERT(bq->queue == (MessageBlackHole*)END_TSO_QUEUE
335 || get_itbl(bq->queue)->type == TSO);
336 ASSERT(bq->link == (StgBlockingQueue*)END_TSO_QUEUE ||
337 get_itbl(bq->link)->type == IND ||
338 get_itbl(bq->link)->type == BLOCKING_QUEUE);
339
340 return sizeofW(StgBlockingQueue);
341 }
342
343 case BCO: {
344 StgBCO *bco = (StgBCO *)p;
345 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->instrs));
346 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->literals));
347 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->ptrs));
348 return bco_sizeW(bco);
349 }
350
351 case IND_STATIC: /* (1, 0) closure */
352 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgIndStatic*)p)->indirectee));
353 return sizeW_fromITBL(info);
354
355 case WEAK:
356 /* deal with these specially - the info table isn't
357 * representative of the actual layout.
358 */
359 { StgWeak *w = (StgWeak *)p;
360 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->key));
361 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->value));
362 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->finalizer));
363 if (w->link) {
364 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->link));
365 }
366 return sizeW_fromITBL(info);
367 }
368
369 case THUNK_SELECTOR:
370 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgSelector *)p)->selectee));
371 return THUNK_SELECTOR_sizeW();
372
373 case IND:
374 {
375 /* we don't expect to see any of these after GC
376 * but they might appear during execution
377 */
378 StgInd *ind = (StgInd *)p;
379 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ind->indirectee));
380 return sizeofW(StgInd);
381 }
382
383 case RET_BCO:
384 case RET_SMALL:
385 case RET_BIG:
386 case RET_DYN:
387 case UPDATE_FRAME:
388 case STOP_FRAME:
389 case CATCH_FRAME:
390 case ATOMICALLY_FRAME:
391 case CATCH_RETRY_FRAME:
392 case CATCH_STM_FRAME:
393 barf("checkClosure: stack frame");
394
395 case AP:
396 {
397 StgAP* ap = (StgAP *)p;
398 checkPAP (ap->fun, ap->payload, ap->n_args);
399 return ap_sizeW(ap);
400 }
401
402 case PAP:
403 {
404 StgPAP* pap = (StgPAP *)p;
405 checkPAP (pap->fun, pap->payload, pap->n_args);
406 return pap_sizeW(pap);
407 }
408
409 case AP_STACK:
410 {
411 StgAP_STACK *ap = (StgAP_STACK *)p;
412 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ap->fun));
413 checkStackChunk((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
414 return ap_stack_sizeW(ap);
415 }
416
417 case ARR_WORDS:
418 return arr_words_sizeW((StgArrWords *)p);
419
420 case MUT_ARR_PTRS_CLEAN:
421 case MUT_ARR_PTRS_DIRTY:
422 case MUT_ARR_PTRS_FROZEN:
423 case MUT_ARR_PTRS_FROZEN0:
424 {
425 StgMutArrPtrs* a = (StgMutArrPtrs *)p;
426 nat i;
427 for (i = 0; i < a->ptrs; i++) {
428 ASSERT(LOOKS_LIKE_CLOSURE_PTR(a->payload[i]));
429 }
430 return mut_arr_ptrs_sizeW(a);
431 }
432
433 case TSO:
434 checkTSO((StgTSO *)p);
435 return tso_sizeW((StgTSO *)p);
436
437 case TREC_CHUNK:
438 {
439 nat i;
440 StgTRecChunk *tc = (StgTRecChunk *)p;
441 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->prev_chunk));
442 for (i = 0; i < tc -> next_entry_idx; i ++) {
443 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].tvar));
444 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].expected_value));
445 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].new_value));
446 }
447 return sizeofW(StgTRecChunk);
448 }
449
450 default:
451 barf("checkClosure (closure type %d)", info->type);
452 }
453 }
454
455
456 /* -----------------------------------------------------------------------------
457 Check Heap Sanity
458
459 After garbage collection, the live heap is in a state where we can
460 run through and check that all the pointers point to the right
461 place. This function starts at a given position and sanity-checks
462 all the objects in the remainder of the chain.
463 -------------------------------------------------------------------------- */
464
465 void
466 checkHeap(bdescr *bd)
467 {
468 StgPtr p;
469
470 #if defined(THREADED_RTS)
471 // heap sanity checking doesn't work with SMP, because we can't
472 // zero the slop (see Updates.h).
473 return;
474 #endif
475
476 for (; bd != NULL; bd = bd->link) {
477 p = bd->start;
478 while (p < bd->free) {
479 nat size = checkClosure((StgClosure *)p);
480 /* This is the smallest size of closure that can live in the heap */
481 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
482 p += size;
483
484 /* skip over slop */
485 while (p < bd->free &&
486 (*p < 0x1000 || !LOOKS_LIKE_INFO_PTR(*p))) { p++; }
487 }
488 }
489 }
490
491 void
492 checkHeapChunk(StgPtr start, StgPtr end)
493 {
494 StgPtr p;
495 nat size;
496
497 for (p=start; p<end; p+=size) {
498 ASSERT(LOOKS_LIKE_INFO_PTR(*p));
499 size = checkClosure((StgClosure *)p);
500 /* This is the smallest size of closure that can live in the heap. */
501 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
502 }
503 }
504
505 void
506 checkLargeObjects(bdescr *bd)
507 {
508 while (bd != NULL) {
509 if (!(bd->flags & BF_PINNED)) {
510 checkClosure((StgClosure *)bd->start);
511 }
512 bd = bd->link;
513 }
514 }
515
516 void
517 checkTSO(StgTSO *tso)
518 {
519 StgPtr sp = tso->sp;
520 StgPtr stack = tso->stack;
521 StgOffset stack_size = tso->stack_size;
522 StgPtr stack_end = stack + stack_size;
523
524 if (tso->what_next == ThreadRelocated) {
525 checkTSO(tso->_link);
526 return;
527 }
528
529 if (tso->what_next == ThreadKilled) {
530 /* The garbage collector doesn't bother following any pointers
531 * from dead threads, so don't check sanity here.
532 */
533 return;
534 }
535
536 ASSERT(tso->_link == END_TSO_QUEUE || get_itbl(tso->_link)->type == TSO);
537 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->block_info.closure));
538 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->bq));
539 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->blocked_exceptions));
540
541 ASSERT(stack <= sp && sp < stack_end);
542
543 checkStackChunk(sp, stack_end);
544 }
545
546 /*
547 Check that all TSOs have been evacuated.
548 Optionally also check the sanity of the TSOs.
549 */
550 void
551 checkGlobalTSOList (rtsBool checkTSOs)
552 {
553 StgTSO *tso;
554 nat g;
555
556 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
557 for (tso=generations[g].threads; tso != END_TSO_QUEUE;
558 tso = tso->global_link) {
559 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso));
560 ASSERT(get_itbl(tso)->type == TSO);
561 if (checkTSOs)
562 checkTSO(tso);
563
564 tso = deRefTSO(tso);
565
566 // If this TSO is dirty and in an old generation, it better
567 // be on the mutable list.
568 if (tso->dirty || (tso->flags & TSO_LINK_DIRTY)) {
569 ASSERT(Bdescr((P_)tso)->gen_no == 0 || (tso->flags & TSO_MARKED));
570 tso->flags &= ~TSO_MARKED;
571 }
572 }
573 }
574 }
575
576 /* -----------------------------------------------------------------------------
577 Check mutable list sanity.
578 -------------------------------------------------------------------------- */
579
580 void
581 checkMutableList( bdescr *mut_bd, nat gen )
582 {
583 bdescr *bd;
584 StgPtr q;
585 StgClosure *p;
586
587 for (bd = mut_bd; bd != NULL; bd = bd->link) {
588 for (q = bd->start; q < bd->free; q++) {
589 p = (StgClosure *)*q;
590 ASSERT(!HEAP_ALLOCED(p) || Bdescr((P_)p)->gen_no == gen);
591 if (get_itbl(p)->type == TSO) {
592 ((StgTSO *)p)->flags |= TSO_MARKED;
593 }
594 }
595 }
596 }
597
598 void
599 checkMutableLists (rtsBool checkTSOs)
600 {
601 nat g, i;
602
603 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
604 checkMutableList(generations[g].mut_list, g);
605 for (i = 0; i < n_capabilities; i++) {
606 checkMutableList(capabilities[i].mut_lists[g], g);
607 }
608 }
609 checkGlobalTSOList(checkTSOs);
610 }
611
612 /*
613 Check the static objects list.
614 */
615 void
616 checkStaticObjects ( StgClosure* static_objects )
617 {
618 StgClosure *p = static_objects;
619 StgInfoTable *info;
620
621 while (p != END_OF_STATIC_LIST) {
622 checkClosure(p);
623 info = get_itbl(p);
624 switch (info->type) {
625 case IND_STATIC:
626 {
627 StgClosure *indirectee = UNTAG_CLOSURE(((StgIndStatic *)p)->indirectee);
628
629 ASSERT(LOOKS_LIKE_CLOSURE_PTR(indirectee));
630 ASSERT(LOOKS_LIKE_INFO_PTR((StgWord)indirectee->header.info));
631 p = *IND_STATIC_LINK((StgClosure *)p);
632 break;
633 }
634
635 case THUNK_STATIC:
636 p = *THUNK_STATIC_LINK((StgClosure *)p);
637 break;
638
639 case FUN_STATIC:
640 p = *FUN_STATIC_LINK((StgClosure *)p);
641 break;
642
643 case CONSTR_STATIC:
644 p = *STATIC_LINK(info,(StgClosure *)p);
645 break;
646
647 default:
648 barf("checkStaticObjetcs: strange closure %p (%s)",
649 p, info_type(p));
650 }
651 }
652 }
653
654 /* Nursery sanity check */
655 void
656 checkNurserySanity (nursery *nursery)
657 {
658 bdescr *bd, *prev;
659 nat blocks = 0;
660
661 prev = NULL;
662 for (bd = nursery->blocks; bd != NULL; bd = bd->link) {
663 ASSERT(bd->u.back == prev);
664 prev = bd;
665 blocks += bd->blocks;
666 }
667
668 ASSERT(blocks == nursery->n_blocks);
669 }
670
671
672 /* Full heap sanity check. */
673 void
674 checkSanity( rtsBool check_heap )
675 {
676 nat g, n;
677
678 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
679 ASSERT(countBlocks(generations[g].blocks)
680 == generations[g].n_blocks);
681 ASSERT(countBlocks(generations[g].large_objects)
682 == generations[g].n_large_blocks);
683 if (check_heap) {
684 checkHeap(generations[g].blocks);
685 }
686 checkLargeObjects(generations[g].large_objects);
687 }
688
689 for (n = 0; n < n_capabilities; n++) {
690 checkNurserySanity(&nurseries[n]);
691 }
692
693 checkFreeListSanity();
694
695 #if defined(THREADED_RTS)
696 // always check the stacks in threaded mode, because checkHeap()
697 // does nothing in this case.
698 checkMutableLists(rtsTrue);
699 #else
700 if (check_heap) {
701 checkMutableLists(rtsFalse);
702 } else {
703 checkMutableLists(rtsTrue);
704 }
705 #endif
706 }
707
708 // If memInventory() calculates that we have a memory leak, this
709 // function will try to find the block(s) that are leaking by marking
710 // all the ones that we know about, and search through memory to find
711 // blocks that are not marked. In the debugger this can help to give
712 // us a clue about what kind of block leaked. In the future we might
713 // annotate blocks with their allocation site to give more helpful
714 // info.
715 static void
716 findMemoryLeak (void)
717 {
718 nat g, i;
719 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
720 for (i = 0; i < n_capabilities; i++) {
721 markBlocks(capabilities[i].mut_lists[g]);
722 }
723 markBlocks(generations[g].mut_list);
724 markBlocks(generations[g].blocks);
725 markBlocks(generations[g].large_objects);
726 }
727
728 for (i = 0; i < n_capabilities; i++) {
729 markBlocks(nurseries[i].blocks);
730 }
731
732 #ifdef PROFILING
733 // TODO:
734 // if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER) {
735 // markRetainerBlocks();
736 // }
737 #endif
738
739 // count the blocks allocated by the arena allocator
740 // TODO:
741 // markArenaBlocks();
742
743 // count the blocks containing executable memory
744 markBlocks(exec_block);
745
746 reportUnmarkedBlocks();
747 }
748
749 void
750 checkRunQueue(Capability *cap)
751 {
752 StgTSO *prev, *tso;
753 prev = END_TSO_QUEUE;
754 for (tso = cap->run_queue_hd; tso != END_TSO_QUEUE;
755 prev = tso, tso = tso->_link) {
756 ASSERT(prev == END_TSO_QUEUE || prev->_link == tso);
757 ASSERT(tso->block_info.prev == prev);
758 }
759 ASSERT(cap->run_queue_tl == prev);
760 }
761
762 /* -----------------------------------------------------------------------------
763 Memory leak detection
764
765 memInventory() checks for memory leaks by counting up all the
766 blocks we know about and comparing that to the number of blocks
767 allegedly floating around in the system.
768 -------------------------------------------------------------------------- */
769
770 // Useful for finding partially full blocks in gdb
771 void findSlop(bdescr *bd);
772 void findSlop(bdescr *bd)
773 {
774 lnat slop;
775
776 for (; bd != NULL; bd = bd->link) {
777 slop = (bd->blocks * BLOCK_SIZE_W) - (bd->free - bd->start);
778 if (slop > (1024/sizeof(W_))) {
779 debugBelch("block at %p (bdescr %p) has %ldKB slop\n",
780 bd->start, bd, slop / (1024/sizeof(W_)));
781 }
782 }
783 }
784
785 static lnat
786 genBlocks (generation *gen)
787 {
788 ASSERT(countBlocks(gen->blocks) == gen->n_blocks);
789 ASSERT(countBlocks(gen->large_objects) == gen->n_large_blocks);
790 return gen->n_blocks + gen->n_old_blocks +
791 countAllocdBlocks(gen->large_objects);
792 }
793
794 void
795 memInventory (rtsBool show)
796 {
797 nat g, i;
798 lnat gen_blocks[RtsFlags.GcFlags.generations];
799 lnat nursery_blocks, retainer_blocks,
800 arena_blocks, exec_blocks;
801 lnat live_blocks = 0, free_blocks = 0;
802 rtsBool leak;
803
804 // count the blocks we current have
805
806 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
807 gen_blocks[g] = 0;
808 for (i = 0; i < n_capabilities; i++) {
809 gen_blocks[g] += countBlocks(capabilities[i].mut_lists[g]);
810 }
811 gen_blocks[g] += countAllocdBlocks(generations[g].mut_list);
812 gen_blocks[g] += genBlocks(&generations[g]);
813 }
814
815 nursery_blocks = 0;
816 for (i = 0; i < n_capabilities; i++) {
817 ASSERT(countBlocks(nurseries[i].blocks) == nurseries[i].n_blocks);
818 nursery_blocks += nurseries[i].n_blocks;
819 }
820
821 retainer_blocks = 0;
822 #ifdef PROFILING
823 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER) {
824 retainer_blocks = retainerStackBlocks();
825 }
826 #endif
827
828 // count the blocks allocated by the arena allocator
829 arena_blocks = arenaBlocks();
830
831 // count the blocks containing executable memory
832 exec_blocks = countAllocdBlocks(exec_block);
833
834 /* count the blocks on the free list */
835 free_blocks = countFreeList();
836
837 live_blocks = 0;
838 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
839 live_blocks += gen_blocks[g];
840 }
841 live_blocks += nursery_blocks +
842 + retainer_blocks + arena_blocks + exec_blocks;
843
844 #define MB(n) (((n) * BLOCK_SIZE_W) / ((1024*1024)/sizeof(W_)))
845
846 leak = live_blocks + free_blocks != mblocks_allocated * BLOCKS_PER_MBLOCK;
847
848 if (show || leak)
849 {
850 if (leak) {
851 debugBelch("Memory leak detected:\n");
852 } else {
853 debugBelch("Memory inventory:\n");
854 }
855 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
856 debugBelch(" gen %d blocks : %5lu blocks (%lu MB)\n", g,
857 gen_blocks[g], MB(gen_blocks[g]));
858 }
859 debugBelch(" nursery : %5lu blocks (%lu MB)\n",
860 nursery_blocks, MB(nursery_blocks));
861 debugBelch(" retainer : %5lu blocks (%lu MB)\n",
862 retainer_blocks, MB(retainer_blocks));
863 debugBelch(" arena blocks : %5lu blocks (%lu MB)\n",
864 arena_blocks, MB(arena_blocks));
865 debugBelch(" exec : %5lu blocks (%lu MB)\n",
866 exec_blocks, MB(exec_blocks));
867 debugBelch(" free : %5lu blocks (%lu MB)\n",
868 free_blocks, MB(free_blocks));
869 debugBelch(" total : %5lu blocks (%lu MB)\n",
870 live_blocks + free_blocks, MB(live_blocks+free_blocks));
871 if (leak) {
872 debugBelch("\n in system : %5lu blocks (%lu MB)\n",
873 mblocks_allocated * BLOCKS_PER_MBLOCK, mblocks_allocated);
874 }
875 }
876
877 if (leak) {
878 debugBelch("\n");
879 findMemoryLeak();
880 }
881 ASSERT(n_alloc_blocks == live_blocks);
882 ASSERT(!leak);
883 }
884
885
886 #endif /* DEBUG */