113031a20be3121db6497bbe21c350469e6e575e
[ghc.git] / rts / sm / BlockAlloc.c
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team 1998-2008
4 *
5 * The block allocator and free list manager.
6 *
7 * This is the architecture independent part of the block allocator.
8 * It requires only the following support from the operating system:
9 *
10 * void *getMBlock(nat n);
11 *
12 * returns the address of an n*MBLOCK_SIZE region of memory, aligned on
13 * an MBLOCK_SIZE boundary. There are no other restrictions on the
14 * addresses of memory returned by getMBlock().
15 *
16 * ---------------------------------------------------------------------------*/
17
18 #include "PosixSource.h"
19 #include "Rts.h"
20
21 #include "Storage.h"
22 #include "RtsUtils.h"
23 #include "BlockAlloc.h"
24 #include "OSMem.h"
25
26 #include <string.h>
27
28 static void initMBlock(void *mblock);
29
30 /* -----------------------------------------------------------------------------
31
32 Implementation notes
33 ~~~~~~~~~~~~~~~~~~~~
34
35 Terminology:
36 - bdescr = block descriptor
37 - bgroup = block group (1 or more adjacent blocks)
38 - mblock = mega block
39 - mgroup = mega group (1 or more adjacent mblocks)
40
41 Invariants on block descriptors
42 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
43 bd->start always points to the start of the block.
44
45 bd->free is either:
46 - zero for a non-group-head; bd->link points to the head
47 - (-1) for the head of a free block group
48 - or it points within the block
49
50 bd->blocks is either:
51 - zero for a non-group-head; bd->link points to the head
52 - number of blocks in this group otherwise
53
54 bd->link either points to a block descriptor or is NULL
55
56 The following fields are not used by the allocator:
57 bd->flags
58 bd->gen_no
59 bd->gen
60 bd->dest
61
62 Exceptions: we don't maintain invariants for all the blocks within a
63 group on the free list, because it is expensive to modify every
64 bdescr in a group when coalescing. Just the head and last bdescrs
65 will be correct for a group on the free list.
66
67
68 Free lists
69 ~~~~~~~~~~
70
71 Preliminaries:
72 - most allocations are for a small number of blocks
73 - sometimes the OS gives us new memory backwards in the address
74 space, sometimes forwards, so we should not be biased towards
75 any particular layout in the address space
76 - We want to avoid fragmentation
77 - We want allocation and freeing to be O(1) or close.
78
79 Coalescing trick: when a bgroup is freed (freeGroup()), we can check
80 whether it can be coalesced with other free bgroups by checking the
81 bdescrs for the blocks on either side of it. This means that we can
82 coalesce in O(1) time. Every free bgroup must have its head and tail
83 bdescrs initialised, the rest don't matter.
84
85 We keep the free list in buckets, using a heap-sort strategy.
86 Bucket N contains blocks with sizes 2^N - 2^(N+1)-1. The list of
87 blocks in each bucket is doubly-linked, so that if a block is
88 coalesced we can easily remove it from its current free list.
89
90 To allocate a new block of size S, grab a block from bucket
91 log2ceiling(S) (i.e. log2() rounded up), in which all blocks are at
92 least as big as S, and split it if necessary. If there are no
93 blocks in that bucket, look at bigger buckets until a block is found
94 Allocation is therefore O(logN) time.
95
96 To free a block:
97 - coalesce it with neighbours.
98 - remove coalesced neighbour(s) from free list(s)
99 - add the new (coalesced) block to the front of the appropriate
100 bucket, given by log2(S) where S is the size of the block.
101
102 Free is O(1).
103
104 We cannot play this coalescing trick with mblocks, because there is
105 no requirement that the bdescrs in the second and subsequent mblock
106 of an mgroup are initialised (the mgroup might be filled with a
107 large array, overwriting the bdescrs for example).
108
109 So there is a separate free list for megablocks, sorted in *address*
110 order, so that we can coalesce. Allocation in this list is best-fit
111 by traversing the whole list: we don't expect this list to be long,
112 and allocation/freeing of large blocks is rare; avoiding
113 fragmentation is more important than performance here.
114
115 freeGroup() might end up moving a block from free_list to
116 free_mblock_list, if after coalescing we end up with a full mblock.
117
118 checkFreeListSanity() checks all the invariants on the free lists.
119
120 --------------------------------------------------------------------------- */
121
122 /* ---------------------------------------------------------------------------
123 WATCH OUT FOR OVERFLOW
124
125 Be very careful with integer overflow here. If you have an
126 expression like (n_blocks * BLOCK_SIZE), and n_blocks is an int or
127 a nat, then it will very likely overflow on a 64-bit platform.
128 Always cast to StgWord (or W_ for short) first: ((W_)n_blocks * BLOCK_SIZE).
129
130 --------------------------------------------------------------------------- */
131
132 #define MAX_FREE_LIST 9
133
134 // In THREADED_RTS mode, the free list is protected by sm_mutex.
135
136 static bdescr *free_list[MAX_FREE_LIST];
137 static bdescr *free_mblock_list;
138
139 // free_list[i] contains blocks that are at least size 2^i, and at
140 // most size 2^(i+1) - 1.
141 //
142 // To find the free list in which to place a block, use log_2(size).
143 // To find a free block of the right size, use log_2_ceil(size).
144
145 W_ n_alloc_blocks; // currently allocated blocks
146 W_ hw_alloc_blocks; // high-water allocated blocks
147
148 /* -----------------------------------------------------------------------------
149 Initialisation
150 -------------------------------------------------------------------------- */
151
152 void initBlockAllocator(void)
153 {
154 nat i;
155 for (i=0; i < MAX_FREE_LIST; i++) {
156 free_list[i] = NULL;
157 }
158 free_mblock_list = NULL;
159 n_alloc_blocks = 0;
160 hw_alloc_blocks = 0;
161 }
162
163 /* -----------------------------------------------------------------------------
164 Allocation
165 -------------------------------------------------------------------------- */
166
167 STATIC_INLINE void
168 initGroup(bdescr *head)
169 {
170 bdescr *bd;
171 nat i, n;
172
173 n = head->blocks;
174 head->free = head->start;
175 head->link = NULL;
176 for (i=1, bd = head+1; i < n; i++, bd++) {
177 bd->free = 0;
178 bd->blocks = 0;
179 bd->link = head;
180 }
181 }
182
183 // There are quicker non-loopy ways to do log_2, but we expect n to be
184 // usually small, and MAX_FREE_LIST is also small, so the loop version
185 // might well be the best choice here.
186 STATIC_INLINE nat
187 log_2_ceil(nat n)
188 {
189 nat i, x;
190 x = 1;
191 for (i=0; i < MAX_FREE_LIST; i++) {
192 if (x >= n) return i;
193 x = x << 1;
194 }
195 return MAX_FREE_LIST;
196 }
197
198 STATIC_INLINE nat
199 log_2(nat n)
200 {
201 nat i, x;
202 x = n;
203 for (i=0; i < MAX_FREE_LIST; i++) {
204 x = x >> 1;
205 if (x == 0) return i;
206 }
207 return MAX_FREE_LIST;
208 }
209
210 STATIC_INLINE void
211 free_list_insert (bdescr *bd)
212 {
213 nat ln;
214
215 ASSERT(bd->blocks < BLOCKS_PER_MBLOCK);
216 ln = log_2(bd->blocks);
217
218 dbl_link_onto(bd, &free_list[ln]);
219 }
220
221
222 STATIC_INLINE bdescr *
223 tail_of (bdescr *bd)
224 {
225 return bd + bd->blocks - 1;
226 }
227
228 // After splitting a group, the last block of each group must have a
229 // tail that points to the head block, to keep our invariants for
230 // coalescing.
231 STATIC_INLINE void
232 setup_tail (bdescr *bd)
233 {
234 bdescr *tail;
235 tail = tail_of(bd);
236 if (tail != bd) {
237 tail->blocks = 0;
238 tail->free = 0;
239 tail->link = bd;
240 }
241 }
242
243
244 // Take a free block group bd, and split off a group of size n from
245 // it. Adjust the free list as necessary, and return the new group.
246 static bdescr *
247 split_free_block (bdescr *bd, nat n, nat ln)
248 {
249 bdescr *fg; // free group
250
251 ASSERT(bd->blocks > n);
252 dbl_link_remove(bd, &free_list[ln]);
253 fg = bd + bd->blocks - n; // take n blocks off the end
254 fg->blocks = n;
255 bd->blocks -= n;
256 setup_tail(bd);
257 ln = log_2(bd->blocks);
258 dbl_link_onto(bd, &free_list[ln]);
259 return fg;
260 }
261
262 static bdescr *
263 alloc_mega_group (nat mblocks)
264 {
265 bdescr *best, *bd, *prev;
266 nat n;
267
268 n = MBLOCK_GROUP_BLOCKS(mblocks);
269
270 best = NULL;
271 prev = NULL;
272 for (bd = free_mblock_list; bd != NULL; prev = bd, bd = bd->link)
273 {
274 if (bd->blocks == n)
275 {
276 if (prev) {
277 prev->link = bd->link;
278 } else {
279 free_mblock_list = bd->link;
280 }
281 initGroup(bd);
282 return bd;
283 }
284 else if (bd->blocks > n)
285 {
286 if (!best || bd->blocks < best->blocks)
287 {
288 best = bd;
289 }
290 }
291 }
292
293 if (best)
294 {
295 // we take our chunk off the end here.
296 StgWord best_mblocks = BLOCKS_TO_MBLOCKS(best->blocks);
297 bd = FIRST_BDESCR((StgWord8*)MBLOCK_ROUND_DOWN(best) +
298 (best_mblocks-mblocks)*MBLOCK_SIZE);
299
300 best->blocks = MBLOCK_GROUP_BLOCKS(best_mblocks - mblocks);
301 initMBlock(MBLOCK_ROUND_DOWN(bd));
302 }
303 else
304 {
305 void *mblock = getMBlocks(mblocks);
306 initMBlock(mblock); // only need to init the 1st one
307 bd = FIRST_BDESCR(mblock);
308 }
309 bd->blocks = MBLOCK_GROUP_BLOCKS(mblocks);
310 return bd;
311 }
312
313 bdescr *
314 allocGroup (nat n)
315 {
316 bdescr *bd, *rem;
317 nat ln;
318
319 if (n == 0) barf("allocGroup: requested zero blocks");
320
321 if (n >= BLOCKS_PER_MBLOCK)
322 {
323 nat mblocks;
324
325 mblocks = BLOCKS_TO_MBLOCKS(n);
326
327 // n_alloc_blocks doesn't count the extra blocks we get in a
328 // megablock group.
329 n_alloc_blocks += mblocks * BLOCKS_PER_MBLOCK;
330 if (n_alloc_blocks > hw_alloc_blocks) hw_alloc_blocks = n_alloc_blocks;
331
332 bd = alloc_mega_group(mblocks);
333 // only the bdescrs of the first MB are required to be initialised
334 initGroup(bd);
335 goto finish;
336 }
337
338 n_alloc_blocks += n;
339 if (n_alloc_blocks > hw_alloc_blocks) hw_alloc_blocks = n_alloc_blocks;
340
341 ln = log_2_ceil(n);
342
343 while (ln < MAX_FREE_LIST && free_list[ln] == NULL) {
344 ln++;
345 }
346
347 if (ln == MAX_FREE_LIST) {
348 #if 0 /* useful for debugging fragmentation */
349 if ((W_)mblocks_allocated * BLOCKS_PER_MBLOCK * BLOCK_SIZE_W
350 - (W_)((n_alloc_blocks - n) * BLOCK_SIZE_W) > (2*1024*1024)/sizeof(W_)) {
351 debugBelch("Fragmentation, wanted %d blocks, %ld MB free\n", n, ((mblocks_allocated * BLOCKS_PER_MBLOCK) - n_alloc_blocks) / BLOCKS_PER_MBLOCK);
352 RtsFlags.DebugFlags.block_alloc = 1;
353 checkFreeListSanity();
354 }
355 #endif
356
357 bd = alloc_mega_group(1);
358 bd->blocks = n;
359 initGroup(bd); // we know the group will fit
360 rem = bd + n;
361 rem->blocks = BLOCKS_PER_MBLOCK-n;
362 initGroup(rem); // init the slop
363 n_alloc_blocks += rem->blocks;
364 freeGroup(rem); // add the slop on to the free list
365 goto finish;
366 }
367
368 bd = free_list[ln];
369
370 if (bd->blocks == n) // exactly the right size!
371 {
372 dbl_link_remove(bd, &free_list[ln]);
373 initGroup(bd);
374 }
375 else if (bd->blocks > n) // block too big...
376 {
377 bd = split_free_block(bd, n, ln);
378 ASSERT(bd->blocks == n);
379 initGroup(bd);
380 }
381 else
382 {
383 barf("allocGroup: free list corrupted");
384 }
385
386 finish:
387 IF_DEBUG(sanity, memset(bd->start, 0xaa, bd->blocks * BLOCK_SIZE));
388 IF_DEBUG(sanity, checkFreeListSanity());
389 return bd;
390 }
391
392 //
393 // Allocate a chunk of blocks that is at least min and at most max
394 // blocks in size. This API is used by the nursery allocator that
395 // wants contiguous memory preferably, but doesn't require it. When
396 // memory is fragmented we might have lots of large chunks that are
397 // less than a full megablock, so allowing the nursery allocator to
398 // use these reduces fragmentation considerably. e.g. on a GHC build
399 // with +RTS -H, I saw fragmentation go from 17MB down to 3MB on a
400 // single compile.
401 //
402 bdescr *
403 allocLargeChunk (nat min, nat max)
404 {
405 bdescr *bd;
406 nat ln, lnmax;
407
408 if (min >= BLOCKS_PER_MBLOCK) {
409 return allocGroup(max);
410 }
411
412 ln = log_2_ceil(min);
413 lnmax = log_2_ceil(max); // tops out at MAX_FREE_LIST
414
415 while (ln < lnmax && free_list[ln] == NULL) {
416 ln++;
417 }
418 if (ln == lnmax) {
419 return allocGroup(max);
420 }
421 bd = free_list[ln];
422
423 if (bd->blocks <= max) // exactly the right size!
424 {
425 dbl_link_remove(bd, &free_list[ln]);
426 initGroup(bd);
427 }
428 else // block too big...
429 {
430 bd = split_free_block(bd, max, ln);
431 ASSERT(bd->blocks == max);
432 initGroup(bd);
433 }
434
435 n_alloc_blocks += bd->blocks;
436 if (n_alloc_blocks > hw_alloc_blocks) hw_alloc_blocks = n_alloc_blocks;
437
438 IF_DEBUG(sanity, memset(bd->start, 0xaa, bd->blocks * BLOCK_SIZE));
439 IF_DEBUG(sanity, checkFreeListSanity());
440 return bd;
441 }
442
443 bdescr *
444 allocGroup_lock(nat n)
445 {
446 bdescr *bd;
447 ACQUIRE_SM_LOCK;
448 bd = allocGroup(n);
449 RELEASE_SM_LOCK;
450 return bd;
451 }
452
453 bdescr *
454 allocBlock(void)
455 {
456 return allocGroup(1);
457 }
458
459 bdescr *
460 allocBlock_lock(void)
461 {
462 bdescr *bd;
463 ACQUIRE_SM_LOCK;
464 bd = allocBlock();
465 RELEASE_SM_LOCK;
466 return bd;
467 }
468
469 /* -----------------------------------------------------------------------------
470 De-Allocation
471 -------------------------------------------------------------------------- */
472
473 STATIC_INLINE bdescr *
474 coalesce_mblocks (bdescr *p)
475 {
476 bdescr *q;
477
478 q = p->link;
479 if (q != NULL &&
480 MBLOCK_ROUND_DOWN(q) ==
481 (StgWord8*)MBLOCK_ROUND_DOWN(p) +
482 BLOCKS_TO_MBLOCKS(p->blocks) * MBLOCK_SIZE) {
483 // can coalesce
484 p->blocks = MBLOCK_GROUP_BLOCKS(BLOCKS_TO_MBLOCKS(p->blocks) +
485 BLOCKS_TO_MBLOCKS(q->blocks));
486 p->link = q->link;
487 return p;
488 }
489 return q;
490 }
491
492 static void
493 free_mega_group (bdescr *mg)
494 {
495 bdescr *bd, *prev;
496
497 // Find the right place in the free list. free_mblock_list is
498 // sorted by *address*, not by size as the free_list is.
499 prev = NULL;
500 bd = free_mblock_list;
501 while (bd && bd->start < mg->start) {
502 prev = bd;
503 bd = bd->link;
504 }
505
506 // coalesce backwards
507 if (prev)
508 {
509 mg->link = prev->link;
510 prev->link = mg;
511 mg = coalesce_mblocks(prev);
512 }
513 else
514 {
515 mg->link = free_mblock_list;
516 free_mblock_list = mg;
517 }
518 // coalesce forwards
519 coalesce_mblocks(mg);
520
521 IF_DEBUG(sanity, checkFreeListSanity());
522 }
523
524
525 void
526 freeGroup(bdescr *p)
527 {
528 nat ln;
529
530 // Todo: not true in multithreaded GC
531 // ASSERT_SM_LOCK();
532
533 ASSERT(p->free != (P_)-1);
534
535 p->free = (void *)-1; /* indicates that this block is free */
536 p->gen = NULL;
537 p->gen_no = 0;
538 /* fill the block group with garbage if sanity checking is on */
539 IF_DEBUG(sanity,memset(p->start, 0xaa, (W_)p->blocks * BLOCK_SIZE));
540
541 if (p->blocks == 0) barf("freeGroup: block size is zero");
542
543 if (p->blocks >= BLOCKS_PER_MBLOCK)
544 {
545 nat mblocks;
546
547 mblocks = BLOCKS_TO_MBLOCKS(p->blocks);
548 // If this is an mgroup, make sure it has the right number of blocks
549 ASSERT(p->blocks == MBLOCK_GROUP_BLOCKS(mblocks));
550
551 n_alloc_blocks -= mblocks * BLOCKS_PER_MBLOCK;
552
553 free_mega_group(p);
554 return;
555 }
556
557 ASSERT(n_alloc_blocks >= p->blocks);
558 n_alloc_blocks -= p->blocks;
559
560 // coalesce forwards
561 {
562 bdescr *next;
563 next = p + p->blocks;
564 if (next <= LAST_BDESCR(MBLOCK_ROUND_DOWN(p)) && next->free == (P_)-1)
565 {
566 p->blocks += next->blocks;
567 ln = log_2(next->blocks);
568 dbl_link_remove(next, &free_list[ln]);
569 if (p->blocks == BLOCKS_PER_MBLOCK)
570 {
571 free_mega_group(p);
572 return;
573 }
574 setup_tail(p);
575 }
576 }
577
578 // coalesce backwards
579 if (p != FIRST_BDESCR(MBLOCK_ROUND_DOWN(p)))
580 {
581 bdescr *prev;
582 prev = p - 1;
583 if (prev->blocks == 0) prev = prev->link; // find the head
584
585 if (prev->free == (P_)-1)
586 {
587 ln = log_2(prev->blocks);
588 dbl_link_remove(prev, &free_list[ln]);
589 prev->blocks += p->blocks;
590 if (prev->blocks >= BLOCKS_PER_MBLOCK)
591 {
592 free_mega_group(prev);
593 return;
594 }
595 p = prev;
596 }
597 }
598
599 setup_tail(p);
600 free_list_insert(p);
601
602 IF_DEBUG(sanity, checkFreeListSanity());
603 }
604
605 void
606 freeGroup_lock(bdescr *p)
607 {
608 ACQUIRE_SM_LOCK;
609 freeGroup(p);
610 RELEASE_SM_LOCK;
611 }
612
613 void
614 freeChain(bdescr *bd)
615 {
616 bdescr *next_bd;
617 while (bd != NULL) {
618 next_bd = bd->link;
619 freeGroup(bd);
620 bd = next_bd;
621 }
622 }
623
624 void
625 freeChain_lock(bdescr *bd)
626 {
627 ACQUIRE_SM_LOCK;
628 freeChain(bd);
629 RELEASE_SM_LOCK;
630 }
631
632 static void
633 initMBlock(void *mblock)
634 {
635 bdescr *bd;
636 StgWord8 *block;
637
638 /* the first few Bdescr's in a block are unused, so we don't want to
639 * put them all on the free list.
640 */
641 block = FIRST_BLOCK(mblock);
642 bd = FIRST_BDESCR(mblock);
643
644 /* Initialise the start field of each block descriptor
645 */
646 for (; block <= (StgWord8*)LAST_BLOCK(mblock); bd += 1,
647 block += BLOCK_SIZE) {
648 bd->start = (void*)block;
649 }
650 }
651
652 /* -----------------------------------------------------------------------------
653 Stats / metrics
654 -------------------------------------------------------------------------- */
655
656 nat
657 countBlocks(bdescr *bd)
658 {
659 nat n;
660 for (n=0; bd != NULL; bd=bd->link) {
661 n += bd->blocks;
662 }
663 return n;
664 }
665
666 // (*1) Just like countBlocks, except that we adjust the count for a
667 // megablock group so that it doesn't include the extra few blocks
668 // that would be taken up by block descriptors in the second and
669 // subsequent megablock. This is so we can tally the count with the
670 // number of blocks allocated in the system, for memInventory().
671 nat
672 countAllocdBlocks(bdescr *bd)
673 {
674 nat n;
675 for (n=0; bd != NULL; bd=bd->link) {
676 n += bd->blocks;
677 // hack for megablock groups: see (*1) above
678 if (bd->blocks > BLOCKS_PER_MBLOCK) {
679 n -= (MBLOCK_SIZE / BLOCK_SIZE - BLOCKS_PER_MBLOCK)
680 * (bd->blocks/(MBLOCK_SIZE/BLOCK_SIZE));
681 }
682 }
683 return n;
684 }
685
686 void returnMemoryToOS(nat n /* megablocks */)
687 {
688 static bdescr *bd;
689 nat size;
690
691 bd = free_mblock_list;
692 while ((n > 0) && (bd != NULL)) {
693 size = BLOCKS_TO_MBLOCKS(bd->blocks);
694 if (size > n) {
695 nat newSize = size - n;
696 char *freeAddr = MBLOCK_ROUND_DOWN(bd->start);
697 freeAddr += newSize * MBLOCK_SIZE;
698 bd->blocks = MBLOCK_GROUP_BLOCKS(newSize);
699 freeMBlocks(freeAddr, n);
700 n = 0;
701 }
702 else {
703 char *freeAddr = MBLOCK_ROUND_DOWN(bd->start);
704 n -= size;
705 bd = bd->link;
706 freeMBlocks(freeAddr, size);
707 }
708 }
709 free_mblock_list = bd;
710
711 osReleaseFreeMemory();
712
713 IF_DEBUG(gc,
714 if (n != 0) {
715 debugBelch("Wanted to free %d more MBlocks than are freeable\n",
716 n);
717 }
718 );
719 }
720
721 /* -----------------------------------------------------------------------------
722 Debugging
723 -------------------------------------------------------------------------- */
724
725 #ifdef DEBUG
726 static void
727 check_tail (bdescr *bd)
728 {
729 bdescr *tail = tail_of(bd);
730
731 if (tail != bd)
732 {
733 ASSERT(tail->blocks == 0);
734 ASSERT(tail->free == 0);
735 ASSERT(tail->link == bd);
736 }
737 }
738
739 void
740 checkFreeListSanity(void)
741 {
742 bdescr *bd, *prev;
743 nat ln, min;
744
745
746 min = 1;
747 for (ln = 0; ln < MAX_FREE_LIST; ln++) {
748 IF_DEBUG(block_alloc, debugBelch("free block list [%d]:\n", ln));
749
750 prev = NULL;
751 for (bd = free_list[ln]; bd != NULL; prev = bd, bd = bd->link)
752 {
753 IF_DEBUG(block_alloc,
754 debugBelch("group at %p, length %ld blocks\n",
755 bd->start, (long)bd->blocks));
756 ASSERT(bd->free == (P_)-1);
757 ASSERT(bd->blocks > 0 && bd->blocks < BLOCKS_PER_MBLOCK);
758 ASSERT(bd->blocks >= min && bd->blocks <= (min*2 - 1));
759 ASSERT(bd->link != bd); // catch easy loops
760
761 check_tail(bd);
762
763 if (prev)
764 ASSERT(bd->u.back == prev);
765 else
766 ASSERT(bd->u.back == NULL);
767
768 {
769 bdescr *next;
770 next = bd + bd->blocks;
771 if (next <= LAST_BDESCR(MBLOCK_ROUND_DOWN(bd)))
772 {
773 ASSERT(next->free != (P_)-1);
774 }
775 }
776 }
777 min = min << 1;
778 }
779
780 prev = NULL;
781 for (bd = free_mblock_list; bd != NULL; prev = bd, bd = bd->link)
782 {
783 IF_DEBUG(block_alloc,
784 debugBelch("mega group at %p, length %ld blocks\n",
785 bd->start, (long)bd->blocks));
786
787 ASSERT(bd->link != bd); // catch easy loops
788
789 if (bd->link != NULL)
790 {
791 // make sure the list is sorted
792 ASSERT(bd->start < bd->link->start);
793 }
794
795 ASSERT(bd->blocks >= BLOCKS_PER_MBLOCK);
796 ASSERT(MBLOCK_GROUP_BLOCKS(BLOCKS_TO_MBLOCKS(bd->blocks))
797 == bd->blocks);
798
799 // make sure we're fully coalesced
800 if (bd->link != NULL)
801 {
802 ASSERT (MBLOCK_ROUND_DOWN(bd->link) !=
803 (StgWord8*)MBLOCK_ROUND_DOWN(bd) +
804 BLOCKS_TO_MBLOCKS(bd->blocks) * MBLOCK_SIZE);
805 }
806 }
807 }
808
809 nat /* BLOCKS */
810 countFreeList(void)
811 {
812 bdescr *bd;
813 W_ total_blocks = 0;
814 nat ln;
815
816 for (ln=0; ln < MAX_FREE_LIST; ln++) {
817 for (bd = free_list[ln]; bd != NULL; bd = bd->link) {
818 total_blocks += bd->blocks;
819 }
820 }
821 for (bd = free_mblock_list; bd != NULL; bd = bd->link) {
822 total_blocks += BLOCKS_PER_MBLOCK * BLOCKS_TO_MBLOCKS(bd->blocks);
823 // The caller of this function, memInventory(), expects to match
824 // the total number of blocks in the system against mblocks *
825 // BLOCKS_PER_MBLOCK, so we must subtract the space for the
826 // block descriptors from *every* mblock.
827 }
828 return total_blocks;
829 }
830
831 void
832 markBlocks (bdescr *bd)
833 {
834 for (; bd != NULL; bd = bd->link) {
835 bd->flags |= BF_KNOWN;
836 }
837 }
838
839 void
840 reportUnmarkedBlocks (void)
841 {
842 void *mblock;
843 bdescr *bd;
844
845 debugBelch("Unreachable blocks:\n");
846 for (mblock = getFirstMBlock(); mblock != NULL;
847 mblock = getNextMBlock(mblock)) {
848 for (bd = FIRST_BDESCR(mblock); bd <= LAST_BDESCR(mblock); ) {
849 if (!(bd->flags & BF_KNOWN) && bd->free != (P_)-1) {
850 debugBelch(" %p\n",bd);
851 }
852 if (bd->blocks >= BLOCKS_PER_MBLOCK) {
853 mblock = (StgWord8*)mblock +
854 (BLOCKS_TO_MBLOCKS(bd->blocks) - 1) * MBLOCK_SIZE;
855 break;
856 } else {
857 bd += bd->blocks;
858 }
859 }
860 }
861 }
862
863 #endif