EventLog: Factor out ensureRoomFor*Event
[ghc.git] / rts / sm / BlockAlloc.c
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team 1998-2008
4 *
5 * The block allocator and free list manager.
6 *
7 * This is the architecture independent part of the block allocator.
8 * It requires only the following support from the operating system:
9 *
10 * void *getMBlock(nat n);
11 *
12 * returns the address of an n*MBLOCK_SIZE region of memory, aligned on
13 * an MBLOCK_SIZE boundary. There are no other restrictions on the
14 * addresses of memory returned by getMBlock().
15 *
16 * ---------------------------------------------------------------------------*/
17
18 #include "PosixSource.h"
19 #include "Rts.h"
20
21 #include "Storage.h"
22 #include "RtsUtils.h"
23 #include "BlockAlloc.h"
24 #include "OSMem.h"
25
26 #include <string.h>
27
28 static void initMBlock(void *mblock);
29
30 /* -----------------------------------------------------------------------------
31
32 Implementation notes
33 ~~~~~~~~~~~~~~~~~~~~
34
35 Terminology:
36 - bdescr = block descriptor
37 - bgroup = block group (1 or more adjacent blocks)
38 - mblock = mega block
39 - mgroup = mega group (1 or more adjacent mblocks)
40
41 Invariants on block descriptors
42 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
43 bd->start always points to the start of the block.
44
45 bd->free is either:
46 - zero for a non-group-head; bd->link points to the head
47 - (-1) for the head of a free block group
48 - or it points within the block (group)
49
50 bd->blocks is either:
51 - zero for a non-group-head; bd->link points to the head
52 - number of blocks in this group otherwise
53
54 bd->link either points to a block descriptor or is NULL
55
56 The following fields are not used by the allocator:
57 bd->flags
58 bd->gen_no
59 bd->gen
60 bd->dest
61
62 Exceptions: we don't maintain invariants for all the blocks within a
63 group on the free list, because it is expensive to modify every
64 bdescr in a group when coalescing. Just the head and last bdescrs
65 will be correct for a group on the free list.
66
67
68 Free lists
69 ~~~~~~~~~~
70
71 Preliminaries:
72 - most allocations are for a small number of blocks
73 - sometimes the OS gives us new memory backwards in the address
74 space, sometimes forwards, so we should not be biased towards
75 any particular layout in the address space
76 - We want to avoid fragmentation
77 - We want allocation and freeing to be O(1) or close.
78
79 Coalescing trick: when a bgroup is freed (freeGroup()), we can check
80 whether it can be coalesced with other free bgroups by checking the
81 bdescrs for the blocks on either side of it. This means that we can
82 coalesce in O(1) time. Every free bgroup must have its head and tail
83 bdescrs initialised, the rest don't matter.
84
85 We keep the free list in buckets, using a heap-sort strategy.
86 Bucket N contains blocks with sizes 2^N - 2^(N+1)-1. The list of
87 blocks in each bucket is doubly-linked, so that if a block is
88 coalesced we can easily remove it from its current free list.
89
90 To allocate a new block of size S, grab a block from bucket
91 log2ceiling(S) (i.e. log2() rounded up), in which all blocks are at
92 least as big as S, and split it if necessary. If there are no
93 blocks in that bucket, look at bigger buckets until a block is found
94 Allocation is therefore O(logN) time.
95
96 To free a block:
97 - coalesce it with neighbours.
98 - remove coalesced neighbour(s) from free list(s)
99 - add the new (coalesced) block to the front of the appropriate
100 bucket, given by log2(S) where S is the size of the block.
101
102 Free is O(1).
103
104 Megablocks
105 ~~~~~~~~~~
106
107 Separately from the free list of block groups, which are smaller than
108 an mblock, we maintain a free list of mblock groups. This is the unit
109 of memory the operating system gives us, and we may either split mblocks
110 into blocks or allocate them directly (when very large contiguous regions
111 of memory). mblocks have a different set of invariants than blocks:
112
113 bd->start points to the start of the block IF the block is in the first mblock
114 bd->blocks and bd->link are only valid IF this block is the first block
115 of the first mblock
116 No other fields are used (in particular, free is not used, meaning that
117 space that is not used by the (single) object is wasted.
118
119 This has implications for the free list as well:
120 We cannot play the coalescing trick with mblocks, because there is
121 no requirement that the bdescrs in the second and subsequent mblock
122 of an mgroup are initialised (the mgroup might be filled with a
123 large array, overwriting the bdescrs for example).
124
125 The separate free list for megablocks is thus sorted in *address*
126 order, so that we can coalesce. Allocation in this list is best-fit
127 by traversing the whole list: we don't expect this list to be long,
128 and allocation/freeing of large blocks is rare; avoiding
129 fragmentation is more important than performance here.
130
131 freeGroup() might end up moving a block from free_list to
132 free_mblock_list, if after coalescing we end up with a full mblock.
133
134 checkFreeListSanity() checks all the invariants on the free lists.
135
136 --------------------------------------------------------------------------- */
137
138 /* ---------------------------------------------------------------------------
139 WATCH OUT FOR OVERFLOW
140
141 Be very careful with integer overflow here. If you have an
142 expression like (n_blocks * BLOCK_SIZE), and n_blocks is an int or
143 a nat, then it will very likely overflow on a 64-bit platform.
144 Always cast to StgWord (or W_ for short) first: ((W_)n_blocks * BLOCK_SIZE).
145
146 --------------------------------------------------------------------------- */
147
148 #define MAX_FREE_LIST 9
149
150 // In THREADED_RTS mode, the free list is protected by sm_mutex.
151
152 static bdescr *free_list[MAX_FREE_LIST];
153 static bdescr *free_mblock_list;
154
155 // free_list[i] contains blocks that are at least size 2^i, and at
156 // most size 2^(i+1) - 1.
157 //
158 // To find the free list in which to place a block, use log_2(size).
159 // To find a free block of the right size, use log_2_ceil(size).
160
161 W_ n_alloc_blocks; // currently allocated blocks
162 W_ hw_alloc_blocks; // high-water allocated blocks
163
164 /* -----------------------------------------------------------------------------
165 Initialisation
166 -------------------------------------------------------------------------- */
167
168 void initBlockAllocator(void)
169 {
170 nat i;
171 for (i=0; i < MAX_FREE_LIST; i++) {
172 free_list[i] = NULL;
173 }
174 free_mblock_list = NULL;
175 n_alloc_blocks = 0;
176 hw_alloc_blocks = 0;
177 }
178
179 /* -----------------------------------------------------------------------------
180 Allocation
181 -------------------------------------------------------------------------- */
182
183 STATIC_INLINE void
184 initGroup(bdescr *head)
185 {
186 head->free = head->start;
187 head->link = NULL;
188
189 // If this is a block group (but not a megablock group), we
190 // make the last block of the group point to the head. This is used
191 // when coalescing blocks in freeGroup(). We don't do this for
192 // megablock groups because blocks in the second and subsequent
193 // mblocks don't have bdescrs; freeing these is handled in a
194 // different way by free_mblock_group().
195 if (head->blocks > 1 && head->blocks <= BLOCKS_PER_MBLOCK) {
196 bdescr *last = head + head->blocks-1;
197 last->blocks = 0;
198 last->link = head;
199 }
200 }
201
202 // There are quicker non-loopy ways to do log_2, but we expect n to be
203 // usually small, and MAX_FREE_LIST is also small, so the loop version
204 // might well be the best choice here.
205 STATIC_INLINE nat
206 log_2_ceil(W_ n)
207 {
208 W_ i, x;
209 x = 1;
210 for (i=0; i < MAX_FREE_LIST; i++) {
211 if (x >= n) return i;
212 x = x << 1;
213 }
214 return MAX_FREE_LIST;
215 }
216
217 STATIC_INLINE nat
218 log_2(W_ n)
219 {
220 W_ i, x;
221 x = n;
222 for (i=0; i < MAX_FREE_LIST; i++) {
223 x = x >> 1;
224 if (x == 0) return i;
225 }
226 return MAX_FREE_LIST;
227 }
228
229 STATIC_INLINE void
230 free_list_insert (bdescr *bd)
231 {
232 nat ln;
233
234 ASSERT(bd->blocks < BLOCKS_PER_MBLOCK);
235 ln = log_2(bd->blocks);
236
237 dbl_link_onto(bd, &free_list[ln]);
238 }
239
240
241 STATIC_INLINE bdescr *
242 tail_of (bdescr *bd)
243 {
244 return bd + bd->blocks - 1;
245 }
246
247 // After splitting a group, the last block of each group must have a
248 // tail that points to the head block, to keep our invariants for
249 // coalescing.
250 STATIC_INLINE void
251 setup_tail (bdescr *bd)
252 {
253 bdescr *tail;
254 tail = tail_of(bd);
255 if (tail != bd) {
256 tail->blocks = 0;
257 tail->free = 0;
258 tail->link = bd;
259 }
260 }
261
262
263 // Take a free block group bd, and split off a group of size n from
264 // it. Adjust the free list as necessary, and return the new group.
265 static bdescr *
266 split_free_block (bdescr *bd, W_ n, nat ln)
267 {
268 bdescr *fg; // free group
269
270 ASSERT(bd->blocks > n);
271 dbl_link_remove(bd, &free_list[ln]);
272 fg = bd + bd->blocks - n; // take n blocks off the end
273 fg->blocks = n;
274 bd->blocks -= n;
275 setup_tail(bd);
276 ln = log_2(bd->blocks);
277 dbl_link_onto(bd, &free_list[ln]);
278 return fg;
279 }
280
281 /* Only initializes the start pointers on the first megablock and the
282 * blocks field of the first bdescr; callers are responsible for calling
283 * initGroup afterwards.
284 */
285 static bdescr *
286 alloc_mega_group (StgWord mblocks)
287 {
288 bdescr *best, *bd, *prev;
289 StgWord n;
290
291 n = MBLOCK_GROUP_BLOCKS(mblocks);
292
293 best = NULL;
294 prev = NULL;
295 for (bd = free_mblock_list; bd != NULL; prev = bd, bd = bd->link)
296 {
297 if (bd->blocks == n)
298 {
299 if (prev) {
300 prev->link = bd->link;
301 } else {
302 free_mblock_list = bd->link;
303 }
304 return bd;
305 }
306 else if (bd->blocks > n)
307 {
308 if (!best || bd->blocks < best->blocks)
309 {
310 best = bd;
311 }
312 }
313 }
314
315 if (best)
316 {
317 // we take our chunk off the end here.
318 StgWord best_mblocks = BLOCKS_TO_MBLOCKS(best->blocks);
319 bd = FIRST_BDESCR((StgWord8*)MBLOCK_ROUND_DOWN(best) +
320 (best_mblocks-mblocks)*MBLOCK_SIZE);
321
322 best->blocks = MBLOCK_GROUP_BLOCKS(best_mblocks - mblocks);
323 initMBlock(MBLOCK_ROUND_DOWN(bd));
324 }
325 else
326 {
327 void *mblock = getMBlocks(mblocks);
328 initMBlock(mblock); // only need to init the 1st one
329 bd = FIRST_BDESCR(mblock);
330 }
331 bd->blocks = MBLOCK_GROUP_BLOCKS(mblocks);
332 return bd;
333 }
334
335 bdescr *
336 allocGroup (W_ n)
337 {
338 bdescr *bd, *rem;
339 StgWord ln;
340
341 if (n == 0) barf("allocGroup: requested zero blocks");
342
343 if (n >= BLOCKS_PER_MBLOCK)
344 {
345 StgWord mblocks;
346
347 mblocks = BLOCKS_TO_MBLOCKS(n);
348
349 // n_alloc_blocks doesn't count the extra blocks we get in a
350 // megablock group.
351 n_alloc_blocks += mblocks * BLOCKS_PER_MBLOCK;
352 if (n_alloc_blocks > hw_alloc_blocks) hw_alloc_blocks = n_alloc_blocks;
353
354 bd = alloc_mega_group(mblocks);
355 // only the bdescrs of the first MB are required to be initialised
356 initGroup(bd);
357 goto finish;
358 }
359
360 n_alloc_blocks += n;
361 if (n_alloc_blocks > hw_alloc_blocks) hw_alloc_blocks = n_alloc_blocks;
362
363 ln = log_2_ceil(n);
364
365 while (ln < MAX_FREE_LIST && free_list[ln] == NULL) {
366 ln++;
367 }
368
369 if (ln == MAX_FREE_LIST) {
370 #if 0 /* useful for debugging fragmentation */
371 if ((W_)mblocks_allocated * BLOCKS_PER_MBLOCK * BLOCK_SIZE_W
372 - (W_)((n_alloc_blocks - n) * BLOCK_SIZE_W) > (2*1024*1024)/sizeof(W_)) {
373 debugBelch("Fragmentation, wanted %d blocks, %ld MB free\n", n, ((mblocks_allocated * BLOCKS_PER_MBLOCK) - n_alloc_blocks) / BLOCKS_PER_MBLOCK);
374 RtsFlags.DebugFlags.block_alloc = 1;
375 checkFreeListSanity();
376 }
377 #endif
378
379 bd = alloc_mega_group(1);
380 bd->blocks = n;
381 initGroup(bd); // we know the group will fit
382 rem = bd + n;
383 rem->blocks = BLOCKS_PER_MBLOCK-n;
384 initGroup(rem); // init the slop
385 n_alloc_blocks += rem->blocks;
386 freeGroup(rem); // add the slop on to the free list
387 goto finish;
388 }
389
390 bd = free_list[ln];
391
392 if (bd->blocks == n) // exactly the right size!
393 {
394 dbl_link_remove(bd, &free_list[ln]);
395 initGroup(bd);
396 }
397 else if (bd->blocks > n) // block too big...
398 {
399 bd = split_free_block(bd, n, ln);
400 ASSERT(bd->blocks == n);
401 initGroup(bd);
402 }
403 else
404 {
405 barf("allocGroup: free list corrupted");
406 }
407
408 finish:
409 IF_DEBUG(sanity, memset(bd->start, 0xaa, bd->blocks * BLOCK_SIZE));
410 IF_DEBUG(sanity, checkFreeListSanity());
411 return bd;
412 }
413
414 //
415 // Allocate a chunk of blocks that is at least min and at most max
416 // blocks in size. This API is used by the nursery allocator that
417 // wants contiguous memory preferably, but doesn't require it. When
418 // memory is fragmented we might have lots of chunks that are
419 // less than a full megablock, so allowing the nursery allocator to
420 // use these reduces fragmentation considerably. e.g. on a GHC build
421 // with +RTS -H, I saw fragmentation go from 17MB down to 3MB on a
422 // single compile.
423 //
424 // Further to this: in #7257 there is a program that creates serious
425 // fragmentation such that the heap is full of tiny <4 block chains.
426 // The nursery allocator therefore has to use single blocks to avoid
427 // fragmentation, but we make sure that we allocate large blocks
428 // preferably if there are any.
429 //
430 bdescr *
431 allocLargeChunk (W_ min, W_ max)
432 {
433 bdescr *bd;
434 StgWord ln, lnmax;
435
436 if (min >= BLOCKS_PER_MBLOCK) {
437 return allocGroup(max);
438 }
439
440 ln = log_2_ceil(min);
441 lnmax = log_2_ceil(max); // tops out at MAX_FREE_LIST
442
443 while (ln < lnmax && free_list[ln] == NULL) {
444 ln++;
445 }
446 if (ln == lnmax) {
447 return allocGroup(max);
448 }
449 bd = free_list[ln];
450
451 if (bd->blocks <= max) // exactly the right size!
452 {
453 dbl_link_remove(bd, &free_list[ln]);
454 initGroup(bd);
455 }
456 else // block too big...
457 {
458 bd = split_free_block(bd, max, ln);
459 ASSERT(bd->blocks == max);
460 initGroup(bd);
461 }
462
463 n_alloc_blocks += bd->blocks;
464 if (n_alloc_blocks > hw_alloc_blocks) hw_alloc_blocks = n_alloc_blocks;
465
466 IF_DEBUG(sanity, memset(bd->start, 0xaa, bd->blocks * BLOCK_SIZE));
467 IF_DEBUG(sanity, checkFreeListSanity());
468 return bd;
469 }
470
471 bdescr *
472 allocGroup_lock(W_ n)
473 {
474 bdescr *bd;
475 ACQUIRE_SM_LOCK;
476 bd = allocGroup(n);
477 RELEASE_SM_LOCK;
478 return bd;
479 }
480
481 bdescr *
482 allocBlock(void)
483 {
484 return allocGroup(1);
485 }
486
487 bdescr *
488 allocBlock_lock(void)
489 {
490 bdescr *bd;
491 ACQUIRE_SM_LOCK;
492 bd = allocBlock();
493 RELEASE_SM_LOCK;
494 return bd;
495 }
496
497 /* -----------------------------------------------------------------------------
498 De-Allocation
499 -------------------------------------------------------------------------- */
500
501 STATIC_INLINE bdescr *
502 coalesce_mblocks (bdescr *p)
503 {
504 bdescr *q;
505
506 q = p->link;
507 if (q != NULL &&
508 MBLOCK_ROUND_DOWN(q) ==
509 (StgWord8*)MBLOCK_ROUND_DOWN(p) +
510 BLOCKS_TO_MBLOCKS(p->blocks) * MBLOCK_SIZE) {
511 // can coalesce
512 p->blocks = MBLOCK_GROUP_BLOCKS(BLOCKS_TO_MBLOCKS(p->blocks) +
513 BLOCKS_TO_MBLOCKS(q->blocks));
514 p->link = q->link;
515 return p;
516 }
517 return q;
518 }
519
520 static void
521 free_mega_group (bdescr *mg)
522 {
523 bdescr *bd, *prev;
524
525 // Find the right place in the free list. free_mblock_list is
526 // sorted by *address*, not by size as the free_list is.
527 prev = NULL;
528 bd = free_mblock_list;
529 while (bd && bd->start < mg->start) {
530 prev = bd;
531 bd = bd->link;
532 }
533
534 // coalesce backwards
535 if (prev)
536 {
537 mg->link = prev->link;
538 prev->link = mg;
539 mg = coalesce_mblocks(prev);
540 }
541 else
542 {
543 mg->link = free_mblock_list;
544 free_mblock_list = mg;
545 }
546 // coalesce forwards
547 coalesce_mblocks(mg);
548
549 IF_DEBUG(sanity, checkFreeListSanity());
550 }
551
552
553 void
554 freeGroup(bdescr *p)
555 {
556 StgWord ln;
557
558 // Todo: not true in multithreaded GC
559 // ASSERT_SM_LOCK();
560
561 ASSERT(p->free != (P_)-1);
562
563 p->free = (void *)-1; /* indicates that this block is free */
564 p->gen = NULL;
565 p->gen_no = 0;
566 /* fill the block group with garbage if sanity checking is on */
567 IF_DEBUG(sanity,memset(p->start, 0xaa, (W_)p->blocks * BLOCK_SIZE));
568
569 if (p->blocks == 0) barf("freeGroup: block size is zero");
570
571 if (p->blocks >= BLOCKS_PER_MBLOCK)
572 {
573 StgWord mblocks;
574
575 mblocks = BLOCKS_TO_MBLOCKS(p->blocks);
576 // If this is an mgroup, make sure it has the right number of blocks
577 ASSERT(p->blocks == MBLOCK_GROUP_BLOCKS(mblocks));
578
579 n_alloc_blocks -= mblocks * BLOCKS_PER_MBLOCK;
580
581 free_mega_group(p);
582 return;
583 }
584
585 ASSERT(n_alloc_blocks >= p->blocks);
586 n_alloc_blocks -= p->blocks;
587
588 // coalesce forwards
589 {
590 bdescr *next;
591 next = p + p->blocks;
592 if (next <= LAST_BDESCR(MBLOCK_ROUND_DOWN(p)) && next->free == (P_)-1)
593 {
594 p->blocks += next->blocks;
595 ln = log_2(next->blocks);
596 dbl_link_remove(next, &free_list[ln]);
597 if (p->blocks == BLOCKS_PER_MBLOCK)
598 {
599 free_mega_group(p);
600 return;
601 }
602 setup_tail(p);
603 }
604 }
605
606 // coalesce backwards
607 if (p != FIRST_BDESCR(MBLOCK_ROUND_DOWN(p)))
608 {
609 bdescr *prev;
610 prev = p - 1;
611 if (prev->blocks == 0) prev = prev->link; // find the head
612
613 if (prev->free == (P_)-1)
614 {
615 ln = log_2(prev->blocks);
616 dbl_link_remove(prev, &free_list[ln]);
617 prev->blocks += p->blocks;
618 if (prev->blocks >= BLOCKS_PER_MBLOCK)
619 {
620 free_mega_group(prev);
621 return;
622 }
623 p = prev;
624 }
625 }
626
627 setup_tail(p);
628 free_list_insert(p);
629
630 IF_DEBUG(sanity, checkFreeListSanity());
631 }
632
633 void
634 freeGroup_lock(bdescr *p)
635 {
636 ACQUIRE_SM_LOCK;
637 freeGroup(p);
638 RELEASE_SM_LOCK;
639 }
640
641 void
642 freeChain(bdescr *bd)
643 {
644 bdescr *next_bd;
645 while (bd != NULL) {
646 next_bd = bd->link;
647 freeGroup(bd);
648 bd = next_bd;
649 }
650 }
651
652 void
653 freeChain_lock(bdescr *bd)
654 {
655 ACQUIRE_SM_LOCK;
656 freeChain(bd);
657 RELEASE_SM_LOCK;
658 }
659
660 static void
661 initMBlock(void *mblock)
662 {
663 bdescr *bd;
664 StgWord8 *block;
665
666 /* the first few Bdescr's in a block are unused, so we don't want to
667 * put them all on the free list.
668 */
669 block = FIRST_BLOCK(mblock);
670 bd = FIRST_BDESCR(mblock);
671
672 /* Initialise the start field of each block descriptor
673 */
674 for (; block <= (StgWord8*)LAST_BLOCK(mblock); bd += 1,
675 block += BLOCK_SIZE) {
676 bd->start = (void*)block;
677 }
678 }
679
680 /* -----------------------------------------------------------------------------
681 Stats / metrics
682 -------------------------------------------------------------------------- */
683
684 W_
685 countBlocks(bdescr *bd)
686 {
687 W_ n;
688 for (n=0; bd != NULL; bd=bd->link) {
689 n += bd->blocks;
690 }
691 return n;
692 }
693
694 // (*1) Just like countBlocks, except that we adjust the count for a
695 // megablock group so that it doesn't include the extra few blocks
696 // that would be taken up by block descriptors in the second and
697 // subsequent megablock. This is so we can tally the count with the
698 // number of blocks allocated in the system, for memInventory().
699 W_
700 countAllocdBlocks(bdescr *bd)
701 {
702 W_ n;
703 for (n=0; bd != NULL; bd=bd->link) {
704 n += bd->blocks;
705 // hack for megablock groups: see (*1) above
706 if (bd->blocks > BLOCKS_PER_MBLOCK) {
707 n -= (MBLOCK_SIZE / BLOCK_SIZE - BLOCKS_PER_MBLOCK)
708 * (bd->blocks/(MBLOCK_SIZE/BLOCK_SIZE));
709 }
710 }
711 return n;
712 }
713
714 void returnMemoryToOS(nat n /* megablocks */)
715 {
716 static bdescr *bd;
717 StgWord size;
718
719 bd = free_mblock_list;
720 while ((n > 0) && (bd != NULL)) {
721 size = BLOCKS_TO_MBLOCKS(bd->blocks);
722 if (size > n) {
723 StgWord newSize = size - n;
724 char *freeAddr = MBLOCK_ROUND_DOWN(bd->start);
725 freeAddr += newSize * MBLOCK_SIZE;
726 bd->blocks = MBLOCK_GROUP_BLOCKS(newSize);
727 freeMBlocks(freeAddr, n);
728 n = 0;
729 }
730 else {
731 char *freeAddr = MBLOCK_ROUND_DOWN(bd->start);
732 n -= size;
733 bd = bd->link;
734 freeMBlocks(freeAddr, size);
735 }
736 }
737 free_mblock_list = bd;
738
739 // Ask the OS to release any address space portion
740 // that was associated with the just released MBlocks
741 //
742 // Historically, we used to ask the OS directly (via
743 // osReleaseFreeMemory()) - now the MBlock layer might
744 // have a reason to preserve the address space range,
745 // so we keep it
746 releaseFreeMemory();
747
748 IF_DEBUG(gc,
749 if (n != 0) {
750 debugBelch("Wanted to free %d more MBlocks than are freeable\n",
751 n);
752 }
753 );
754 }
755
756 /* -----------------------------------------------------------------------------
757 Debugging
758 -------------------------------------------------------------------------- */
759
760 #ifdef DEBUG
761 static void
762 check_tail (bdescr *bd)
763 {
764 bdescr *tail = tail_of(bd);
765
766 if (tail != bd)
767 {
768 ASSERT(tail->blocks == 0);
769 ASSERT(tail->free == 0);
770 ASSERT(tail->link == bd);
771 }
772 }
773
774 void
775 checkFreeListSanity(void)
776 {
777 bdescr *bd, *prev;
778 StgWord ln, min;
779
780
781 min = 1;
782 for (ln = 0; ln < MAX_FREE_LIST; ln++) {
783 IF_DEBUG(block_alloc,
784 debugBelch("free block list [%" FMT_Word "]:\n", ln));
785
786 prev = NULL;
787 for (bd = free_list[ln]; bd != NULL; prev = bd, bd = bd->link)
788 {
789 IF_DEBUG(block_alloc,
790 debugBelch("group at %p, length %ld blocks\n",
791 bd->start, (long)bd->blocks));
792 ASSERT(bd->free == (P_)-1);
793 ASSERT(bd->blocks > 0 && bd->blocks < BLOCKS_PER_MBLOCK);
794 ASSERT(bd->blocks >= min && bd->blocks <= (min*2 - 1));
795 ASSERT(bd->link != bd); // catch easy loops
796
797 check_tail(bd);
798
799 if (prev)
800 ASSERT(bd->u.back == prev);
801 else
802 ASSERT(bd->u.back == NULL);
803
804 {
805 bdescr *next;
806 next = bd + bd->blocks;
807 if (next <= LAST_BDESCR(MBLOCK_ROUND_DOWN(bd)))
808 {
809 ASSERT(next->free != (P_)-1);
810 }
811 }
812 }
813 min = min << 1;
814 }
815
816 prev = NULL;
817 for (bd = free_mblock_list; bd != NULL; prev = bd, bd = bd->link)
818 {
819 IF_DEBUG(block_alloc,
820 debugBelch("mega group at %p, length %ld blocks\n",
821 bd->start, (long)bd->blocks));
822
823 ASSERT(bd->link != bd); // catch easy loops
824
825 if (bd->link != NULL)
826 {
827 // make sure the list is sorted
828 ASSERT(bd->start < bd->link->start);
829 }
830
831 ASSERT(bd->blocks >= BLOCKS_PER_MBLOCK);
832 ASSERT(MBLOCK_GROUP_BLOCKS(BLOCKS_TO_MBLOCKS(bd->blocks))
833 == bd->blocks);
834
835 // make sure we're fully coalesced
836 if (bd->link != NULL)
837 {
838 ASSERT (MBLOCK_ROUND_DOWN(bd->link) !=
839 (StgWord8*)MBLOCK_ROUND_DOWN(bd) +
840 BLOCKS_TO_MBLOCKS(bd->blocks) * MBLOCK_SIZE);
841 }
842 }
843 }
844
845 W_ /* BLOCKS */
846 countFreeList(void)
847 {
848 bdescr *bd;
849 W_ total_blocks = 0;
850 StgWord ln;
851
852 for (ln=0; ln < MAX_FREE_LIST; ln++) {
853 for (bd = free_list[ln]; bd != NULL; bd = bd->link) {
854 total_blocks += bd->blocks;
855 }
856 }
857 for (bd = free_mblock_list; bd != NULL; bd = bd->link) {
858 total_blocks += BLOCKS_PER_MBLOCK * BLOCKS_TO_MBLOCKS(bd->blocks);
859 // The caller of this function, memInventory(), expects to match
860 // the total number of blocks in the system against mblocks *
861 // BLOCKS_PER_MBLOCK, so we must subtract the space for the
862 // block descriptors from *every* mblock.
863 }
864 return total_blocks;
865 }
866
867 void
868 markBlocks (bdescr *bd)
869 {
870 for (; bd != NULL; bd = bd->link) {
871 bd->flags |= BF_KNOWN;
872 }
873 }
874
875 void
876 reportUnmarkedBlocks (void)
877 {
878 void *mblock;
879 void *state;
880 bdescr *bd;
881
882 debugBelch("Unreachable blocks:\n");
883 for (mblock = getFirstMBlock(&state); mblock != NULL;
884 mblock = getNextMBlock(&state, mblock)) {
885 for (bd = FIRST_BDESCR(mblock); bd <= LAST_BDESCR(mblock); ) {
886 if (!(bd->flags & BF_KNOWN) && bd->free != (P_)-1) {
887 debugBelch(" %p\n",bd);
888 }
889 if (bd->blocks >= BLOCKS_PER_MBLOCK) {
890 mblock = (StgWord8*)mblock +
891 (BLOCKS_TO_MBLOCKS(bd->blocks) - 1) * MBLOCK_SIZE;
892 break;
893 } else {
894 bd += bd->blocks;
895 }
896 }
897 }
898 }
899
900 #endif