Revert "rts: add Emacs 'Local Variables' to every .c file"
[ghc.git] / rts / sm / BlockAlloc.c
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team 1998-2008
4 *
5 * The block allocator and free list manager.
6 *
7 * This is the architecture independent part of the block allocator.
8 * It requires only the following support from the operating system:
9 *
10 * void *getMBlock(nat n);
11 *
12 * returns the address of an n*MBLOCK_SIZE region of memory, aligned on
13 * an MBLOCK_SIZE boundary. There are no other restrictions on the
14 * addresses of memory returned by getMBlock().
15 *
16 * ---------------------------------------------------------------------------*/
17
18 #include "PosixSource.h"
19 #include "Rts.h"
20
21 #include "Storage.h"
22 #include "RtsUtils.h"
23 #include "BlockAlloc.h"
24 #include "OSMem.h"
25
26 #include <string.h>
27
28 static void initMBlock(void *mblock);
29
30 /* -----------------------------------------------------------------------------
31
32 Implementation notes
33 ~~~~~~~~~~~~~~~~~~~~
34
35 Terminology:
36 - bdescr = block descriptor
37 - bgroup = block group (1 or more adjacent blocks)
38 - mblock = mega block
39 - mgroup = mega group (1 or more adjacent mblocks)
40
41 Invariants on block descriptors
42 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
43 bd->start always points to the start of the block.
44
45 bd->free is either:
46 - zero for a non-group-head; bd->link points to the head
47 - (-1) for the head of a free block group
48 - or it points within the block (group)
49
50 bd->blocks is either:
51 - zero for a non-group-head; bd->link points to the head
52 - number of blocks in this group otherwise
53
54 bd->link either points to a block descriptor or is NULL
55
56 The following fields are not used by the allocator:
57 bd->flags
58 bd->gen_no
59 bd->gen
60 bd->dest
61
62 Exceptions: we don't maintain invariants for all the blocks within a
63 group on the free list, because it is expensive to modify every
64 bdescr in a group when coalescing. Just the head and last bdescrs
65 will be correct for a group on the free list.
66
67
68 Free lists
69 ~~~~~~~~~~
70
71 Preliminaries:
72 - most allocations are for a small number of blocks
73 - sometimes the OS gives us new memory backwards in the address
74 space, sometimes forwards, so we should not be biased towards
75 any particular layout in the address space
76 - We want to avoid fragmentation
77 - We want allocation and freeing to be O(1) or close.
78
79 Coalescing trick: when a bgroup is freed (freeGroup()), we can check
80 whether it can be coalesced with other free bgroups by checking the
81 bdescrs for the blocks on either side of it. This means that we can
82 coalesce in O(1) time. Every free bgroup must have its head and tail
83 bdescrs initialised, the rest don't matter.
84
85 We keep the free list in buckets, using a heap-sort strategy.
86 Bucket N contains blocks with sizes 2^N - 2^(N+1)-1. The list of
87 blocks in each bucket is doubly-linked, so that if a block is
88 coalesced we can easily remove it from its current free list.
89
90 To allocate a new block of size S, grab a block from bucket
91 log2ceiling(S) (i.e. log2() rounded up), in which all blocks are at
92 least as big as S, and split it if necessary. If there are no
93 blocks in that bucket, look at bigger buckets until a block is found
94 Allocation is therefore O(logN) time.
95
96 To free a block:
97 - coalesce it with neighbours.
98 - remove coalesced neighbour(s) from free list(s)
99 - add the new (coalesced) block to the front of the appropriate
100 bucket, given by log2(S) where S is the size of the block.
101
102 Free is O(1).
103
104 Megablocks
105 ~~~~~~~~~~
106
107 Separately from the free list of block groups, which are smaller than
108 an mblock, we maintain a free list of mblock groups. This is the unit
109 of memory the operating system gives us, and we may either split mblocks
110 into blocks or allocate them directly (when very large contiguous regions
111 of memory). mblocks have a different set of invariants than blocks:
112
113 bd->start points to the start of the block IF the block is in the first mblock
114 bd->blocks and bd->link are only valid IF this block is the first block
115 of the first mblock
116 No other fields are used (in particular, free is not used, meaning that
117 space that is not used by the (single) object is wasted.
118
119 This has implications for the free list as well:
120 We cannot play the coalescing trick with mblocks, because there is
121 no requirement that the bdescrs in the second and subsequent mblock
122 of an mgroup are initialised (the mgroup might be filled with a
123 large array, overwriting the bdescrs for example).
124
125 The separate free list for megablocks is thus sorted in *address*
126 order, so that we can coalesce. Allocation in this list is best-fit
127 by traversing the whole list: we don't expect this list to be long,
128 and allocation/freeing of large blocks is rare; avoiding
129 fragmentation is more important than performance here.
130
131 freeGroup() might end up moving a block from free_list to
132 free_mblock_list, if after coalescing we end up with a full mblock.
133
134 checkFreeListSanity() checks all the invariants on the free lists.
135
136 --------------------------------------------------------------------------- */
137
138 /* ---------------------------------------------------------------------------
139 WATCH OUT FOR OVERFLOW
140
141 Be very careful with integer overflow here. If you have an
142 expression like (n_blocks * BLOCK_SIZE), and n_blocks is an int or
143 a nat, then it will very likely overflow on a 64-bit platform.
144 Always cast to StgWord (or W_ for short) first: ((W_)n_blocks * BLOCK_SIZE).
145
146 --------------------------------------------------------------------------- */
147
148 #define MAX_FREE_LIST 9
149
150 // In THREADED_RTS mode, the free list is protected by sm_mutex.
151
152 static bdescr *free_list[MAX_FREE_LIST];
153 static bdescr *free_mblock_list;
154
155 // free_list[i] contains blocks that are at least size 2^i, and at
156 // most size 2^(i+1) - 1.
157 //
158 // To find the free list in which to place a block, use log_2(size).
159 // To find a free block of the right size, use log_2_ceil(size).
160
161 W_ n_alloc_blocks; // currently allocated blocks
162 W_ hw_alloc_blocks; // high-water allocated blocks
163
164 /* -----------------------------------------------------------------------------
165 Initialisation
166 -------------------------------------------------------------------------- */
167
168 void initBlockAllocator(void)
169 {
170 nat i;
171 for (i=0; i < MAX_FREE_LIST; i++) {
172 free_list[i] = NULL;
173 }
174 free_mblock_list = NULL;
175 n_alloc_blocks = 0;
176 hw_alloc_blocks = 0;
177 }
178
179 /* -----------------------------------------------------------------------------
180 Allocation
181 -------------------------------------------------------------------------- */
182
183 STATIC_INLINE void
184 initGroup(bdescr *head)
185 {
186 bdescr *bd;
187 W_ i, n;
188
189 // If this block group fits in a single megablock, initialize
190 // all of the block descriptors. Otherwise, initialize *only*
191 // the first block descriptor, since for large allocations we don't
192 // need to give the invariant that Bdescr(p) is valid for any p in the
193 // block group. (This is because it is impossible to do, as the
194 // block descriptor table for the second mblock will get overwritten
195 // by contiguous user data.)
196 n = head->blocks > BLOCKS_PER_MBLOCK ? 1 : head->blocks;
197 head->free = head->start;
198 head->link = NULL;
199 for (i=1, bd = head+1; i < n; i++, bd++) {
200 bd->free = 0;
201 bd->blocks = 0;
202 bd->link = head;
203 }
204 }
205
206 // There are quicker non-loopy ways to do log_2, but we expect n to be
207 // usually small, and MAX_FREE_LIST is also small, so the loop version
208 // might well be the best choice here.
209 STATIC_INLINE nat
210 log_2_ceil(W_ n)
211 {
212 W_ i, x;
213 x = 1;
214 for (i=0; i < MAX_FREE_LIST; i++) {
215 if (x >= n) return i;
216 x = x << 1;
217 }
218 return MAX_FREE_LIST;
219 }
220
221 STATIC_INLINE nat
222 log_2(W_ n)
223 {
224 W_ i, x;
225 x = n;
226 for (i=0; i < MAX_FREE_LIST; i++) {
227 x = x >> 1;
228 if (x == 0) return i;
229 }
230 return MAX_FREE_LIST;
231 }
232
233 STATIC_INLINE void
234 free_list_insert (bdescr *bd)
235 {
236 nat ln;
237
238 ASSERT(bd->blocks < BLOCKS_PER_MBLOCK);
239 ln = log_2(bd->blocks);
240
241 dbl_link_onto(bd, &free_list[ln]);
242 }
243
244
245 STATIC_INLINE bdescr *
246 tail_of (bdescr *bd)
247 {
248 return bd + bd->blocks - 1;
249 }
250
251 // After splitting a group, the last block of each group must have a
252 // tail that points to the head block, to keep our invariants for
253 // coalescing.
254 STATIC_INLINE void
255 setup_tail (bdescr *bd)
256 {
257 bdescr *tail;
258 tail = tail_of(bd);
259 if (tail != bd) {
260 tail->blocks = 0;
261 tail->free = 0;
262 tail->link = bd;
263 }
264 }
265
266
267 // Take a free block group bd, and split off a group of size n from
268 // it. Adjust the free list as necessary, and return the new group.
269 static bdescr *
270 split_free_block (bdescr *bd, W_ n, nat ln)
271 {
272 bdescr *fg; // free group
273
274 ASSERT(bd->blocks > n);
275 dbl_link_remove(bd, &free_list[ln]);
276 fg = bd + bd->blocks - n; // take n blocks off the end
277 fg->blocks = n;
278 bd->blocks -= n;
279 setup_tail(bd);
280 ln = log_2(bd->blocks);
281 dbl_link_onto(bd, &free_list[ln]);
282 return fg;
283 }
284
285 /* Only initializes the start pointers on the first megablock and the
286 * blocks field of the first bdescr; callers are responsible for calling
287 * initGroup afterwards.
288 */
289 static bdescr *
290 alloc_mega_group (StgWord mblocks)
291 {
292 bdescr *best, *bd, *prev;
293 StgWord n;
294
295 n = MBLOCK_GROUP_BLOCKS(mblocks);
296
297 best = NULL;
298 prev = NULL;
299 for (bd = free_mblock_list; bd != NULL; prev = bd, bd = bd->link)
300 {
301 if (bd->blocks == n)
302 {
303 if (prev) {
304 prev->link = bd->link;
305 } else {
306 free_mblock_list = bd->link;
307 }
308 return bd;
309 }
310 else if (bd->blocks > n)
311 {
312 if (!best || bd->blocks < best->blocks)
313 {
314 best = bd;
315 }
316 }
317 }
318
319 if (best)
320 {
321 // we take our chunk off the end here.
322 StgWord best_mblocks = BLOCKS_TO_MBLOCKS(best->blocks);
323 bd = FIRST_BDESCR((StgWord8*)MBLOCK_ROUND_DOWN(best) +
324 (best_mblocks-mblocks)*MBLOCK_SIZE);
325
326 best->blocks = MBLOCK_GROUP_BLOCKS(best_mblocks - mblocks);
327 initMBlock(MBLOCK_ROUND_DOWN(bd));
328 }
329 else
330 {
331 void *mblock = getMBlocks(mblocks);
332 initMBlock(mblock); // only need to init the 1st one
333 bd = FIRST_BDESCR(mblock);
334 }
335 bd->blocks = MBLOCK_GROUP_BLOCKS(mblocks);
336 return bd;
337 }
338
339 bdescr *
340 allocGroup (W_ n)
341 {
342 bdescr *bd, *rem;
343 StgWord ln;
344
345 if (n == 0) barf("allocGroup: requested zero blocks");
346
347 if (n >= BLOCKS_PER_MBLOCK)
348 {
349 StgWord mblocks;
350
351 mblocks = BLOCKS_TO_MBLOCKS(n);
352
353 // n_alloc_blocks doesn't count the extra blocks we get in a
354 // megablock group.
355 n_alloc_blocks += mblocks * BLOCKS_PER_MBLOCK;
356 if (n_alloc_blocks > hw_alloc_blocks) hw_alloc_blocks = n_alloc_blocks;
357
358 bd = alloc_mega_group(mblocks);
359 // only the bdescrs of the first MB are required to be initialised
360 initGroup(bd);
361 goto finish;
362 }
363
364 n_alloc_blocks += n;
365 if (n_alloc_blocks > hw_alloc_blocks) hw_alloc_blocks = n_alloc_blocks;
366
367 ln = log_2_ceil(n);
368
369 while (ln < MAX_FREE_LIST && free_list[ln] == NULL) {
370 ln++;
371 }
372
373 if (ln == MAX_FREE_LIST) {
374 #if 0 /* useful for debugging fragmentation */
375 if ((W_)mblocks_allocated * BLOCKS_PER_MBLOCK * BLOCK_SIZE_W
376 - (W_)((n_alloc_blocks - n) * BLOCK_SIZE_W) > (2*1024*1024)/sizeof(W_)) {
377 debugBelch("Fragmentation, wanted %d blocks, %ld MB free\n", n, ((mblocks_allocated * BLOCKS_PER_MBLOCK) - n_alloc_blocks) / BLOCKS_PER_MBLOCK);
378 RtsFlags.DebugFlags.block_alloc = 1;
379 checkFreeListSanity();
380 }
381 #endif
382
383 bd = alloc_mega_group(1);
384 bd->blocks = n;
385 initGroup(bd); // we know the group will fit
386 rem = bd + n;
387 rem->blocks = BLOCKS_PER_MBLOCK-n;
388 initGroup(rem); // init the slop
389 n_alloc_blocks += rem->blocks;
390 freeGroup(rem); // add the slop on to the free list
391 goto finish;
392 }
393
394 bd = free_list[ln];
395
396 if (bd->blocks == n) // exactly the right size!
397 {
398 dbl_link_remove(bd, &free_list[ln]);
399 initGroup(bd);
400 }
401 else if (bd->blocks > n) // block too big...
402 {
403 bd = split_free_block(bd, n, ln);
404 ASSERT(bd->blocks == n);
405 initGroup(bd);
406 }
407 else
408 {
409 barf("allocGroup: free list corrupted");
410 }
411
412 finish:
413 IF_DEBUG(sanity, memset(bd->start, 0xaa, bd->blocks * BLOCK_SIZE));
414 IF_DEBUG(sanity, checkFreeListSanity());
415 return bd;
416 }
417
418 //
419 // Allocate a chunk of blocks that is at least min and at most max
420 // blocks in size. This API is used by the nursery allocator that
421 // wants contiguous memory preferably, but doesn't require it. When
422 // memory is fragmented we might have lots of chunks that are
423 // less than a full megablock, so allowing the nursery allocator to
424 // use these reduces fragmentation considerably. e.g. on a GHC build
425 // with +RTS -H, I saw fragmentation go from 17MB down to 3MB on a
426 // single compile.
427 //
428 // Further to this: in #7257 there is a program that creates serious
429 // fragmentation such that the heap is full of tiny <4 block chains.
430 // The nursery allocator therefore has to use single blocks to avoid
431 // fragmentation, but we make sure that we allocate large blocks
432 // preferably if there are any.
433 //
434 bdescr *
435 allocLargeChunk (W_ min, W_ max)
436 {
437 bdescr *bd;
438 StgWord ln, lnmax;
439
440 if (min >= BLOCKS_PER_MBLOCK) {
441 return allocGroup(max);
442 }
443
444 ln = log_2_ceil(min);
445 lnmax = log_2_ceil(max); // tops out at MAX_FREE_LIST
446
447 while (ln < lnmax && free_list[ln] == NULL) {
448 ln++;
449 }
450 if (ln == lnmax) {
451 return allocGroup(max);
452 }
453 bd = free_list[ln];
454
455 if (bd->blocks <= max) // exactly the right size!
456 {
457 dbl_link_remove(bd, &free_list[ln]);
458 initGroup(bd);
459 }
460 else // block too big...
461 {
462 bd = split_free_block(bd, max, ln);
463 ASSERT(bd->blocks == max);
464 initGroup(bd);
465 }
466
467 n_alloc_blocks += bd->blocks;
468 if (n_alloc_blocks > hw_alloc_blocks) hw_alloc_blocks = n_alloc_blocks;
469
470 IF_DEBUG(sanity, memset(bd->start, 0xaa, bd->blocks * BLOCK_SIZE));
471 IF_DEBUG(sanity, checkFreeListSanity());
472 return bd;
473 }
474
475 bdescr *
476 allocGroup_lock(W_ n)
477 {
478 bdescr *bd;
479 ACQUIRE_SM_LOCK;
480 bd = allocGroup(n);
481 RELEASE_SM_LOCK;
482 return bd;
483 }
484
485 bdescr *
486 allocBlock(void)
487 {
488 return allocGroup(1);
489 }
490
491 bdescr *
492 allocBlock_lock(void)
493 {
494 bdescr *bd;
495 ACQUIRE_SM_LOCK;
496 bd = allocBlock();
497 RELEASE_SM_LOCK;
498 return bd;
499 }
500
501 /* -----------------------------------------------------------------------------
502 De-Allocation
503 -------------------------------------------------------------------------- */
504
505 STATIC_INLINE bdescr *
506 coalesce_mblocks (bdescr *p)
507 {
508 bdescr *q;
509
510 q = p->link;
511 if (q != NULL &&
512 MBLOCK_ROUND_DOWN(q) ==
513 (StgWord8*)MBLOCK_ROUND_DOWN(p) +
514 BLOCKS_TO_MBLOCKS(p->blocks) * MBLOCK_SIZE) {
515 // can coalesce
516 p->blocks = MBLOCK_GROUP_BLOCKS(BLOCKS_TO_MBLOCKS(p->blocks) +
517 BLOCKS_TO_MBLOCKS(q->blocks));
518 p->link = q->link;
519 return p;
520 }
521 return q;
522 }
523
524 static void
525 free_mega_group (bdescr *mg)
526 {
527 bdescr *bd, *prev;
528
529 // Find the right place in the free list. free_mblock_list is
530 // sorted by *address*, not by size as the free_list is.
531 prev = NULL;
532 bd = free_mblock_list;
533 while (bd && bd->start < mg->start) {
534 prev = bd;
535 bd = bd->link;
536 }
537
538 // coalesce backwards
539 if (prev)
540 {
541 mg->link = prev->link;
542 prev->link = mg;
543 mg = coalesce_mblocks(prev);
544 }
545 else
546 {
547 mg->link = free_mblock_list;
548 free_mblock_list = mg;
549 }
550 // coalesce forwards
551 coalesce_mblocks(mg);
552
553 IF_DEBUG(sanity, checkFreeListSanity());
554 }
555
556
557 void
558 freeGroup(bdescr *p)
559 {
560 StgWord ln;
561
562 // Todo: not true in multithreaded GC
563 // ASSERT_SM_LOCK();
564
565 ASSERT(p->free != (P_)-1);
566
567 p->free = (void *)-1; /* indicates that this block is free */
568 p->gen = NULL;
569 p->gen_no = 0;
570 /* fill the block group with garbage if sanity checking is on */
571 IF_DEBUG(sanity,memset(p->start, 0xaa, (W_)p->blocks * BLOCK_SIZE));
572
573 if (p->blocks == 0) barf("freeGroup: block size is zero");
574
575 if (p->blocks >= BLOCKS_PER_MBLOCK)
576 {
577 StgWord mblocks;
578
579 mblocks = BLOCKS_TO_MBLOCKS(p->blocks);
580 // If this is an mgroup, make sure it has the right number of blocks
581 ASSERT(p->blocks == MBLOCK_GROUP_BLOCKS(mblocks));
582
583 n_alloc_blocks -= mblocks * BLOCKS_PER_MBLOCK;
584
585 free_mega_group(p);
586 return;
587 }
588
589 ASSERT(n_alloc_blocks >= p->blocks);
590 n_alloc_blocks -= p->blocks;
591
592 // coalesce forwards
593 {
594 bdescr *next;
595 next = p + p->blocks;
596 if (next <= LAST_BDESCR(MBLOCK_ROUND_DOWN(p)) && next->free == (P_)-1)
597 {
598 p->blocks += next->blocks;
599 ln = log_2(next->blocks);
600 dbl_link_remove(next, &free_list[ln]);
601 if (p->blocks == BLOCKS_PER_MBLOCK)
602 {
603 free_mega_group(p);
604 return;
605 }
606 setup_tail(p);
607 }
608 }
609
610 // coalesce backwards
611 if (p != FIRST_BDESCR(MBLOCK_ROUND_DOWN(p)))
612 {
613 bdescr *prev;
614 prev = p - 1;
615 if (prev->blocks == 0) prev = prev->link; // find the head
616
617 if (prev->free == (P_)-1)
618 {
619 ln = log_2(prev->blocks);
620 dbl_link_remove(prev, &free_list[ln]);
621 prev->blocks += p->blocks;
622 if (prev->blocks >= BLOCKS_PER_MBLOCK)
623 {
624 free_mega_group(prev);
625 return;
626 }
627 p = prev;
628 }
629 }
630
631 setup_tail(p);
632 free_list_insert(p);
633
634 IF_DEBUG(sanity, checkFreeListSanity());
635 }
636
637 void
638 freeGroup_lock(bdescr *p)
639 {
640 ACQUIRE_SM_LOCK;
641 freeGroup(p);
642 RELEASE_SM_LOCK;
643 }
644
645 void
646 freeChain(bdescr *bd)
647 {
648 bdescr *next_bd;
649 while (bd != NULL) {
650 next_bd = bd->link;
651 freeGroup(bd);
652 bd = next_bd;
653 }
654 }
655
656 void
657 freeChain_lock(bdescr *bd)
658 {
659 ACQUIRE_SM_LOCK;
660 freeChain(bd);
661 RELEASE_SM_LOCK;
662 }
663
664 static void
665 initMBlock(void *mblock)
666 {
667 bdescr *bd;
668 StgWord8 *block;
669
670 /* the first few Bdescr's in a block are unused, so we don't want to
671 * put them all on the free list.
672 */
673 block = FIRST_BLOCK(mblock);
674 bd = FIRST_BDESCR(mblock);
675
676 /* Initialise the start field of each block descriptor
677 */
678 for (; block <= (StgWord8*)LAST_BLOCK(mblock); bd += 1,
679 block += BLOCK_SIZE) {
680 bd->start = (void*)block;
681 }
682 }
683
684 /* -----------------------------------------------------------------------------
685 Stats / metrics
686 -------------------------------------------------------------------------- */
687
688 W_
689 countBlocks(bdescr *bd)
690 {
691 W_ n;
692 for (n=0; bd != NULL; bd=bd->link) {
693 n += bd->blocks;
694 }
695 return n;
696 }
697
698 // (*1) Just like countBlocks, except that we adjust the count for a
699 // megablock group so that it doesn't include the extra few blocks
700 // that would be taken up by block descriptors in the second and
701 // subsequent megablock. This is so we can tally the count with the
702 // number of blocks allocated in the system, for memInventory().
703 W_
704 countAllocdBlocks(bdescr *bd)
705 {
706 W_ n;
707 for (n=0; bd != NULL; bd=bd->link) {
708 n += bd->blocks;
709 // hack for megablock groups: see (*1) above
710 if (bd->blocks > BLOCKS_PER_MBLOCK) {
711 n -= (MBLOCK_SIZE / BLOCK_SIZE - BLOCKS_PER_MBLOCK)
712 * (bd->blocks/(MBLOCK_SIZE/BLOCK_SIZE));
713 }
714 }
715 return n;
716 }
717
718 void returnMemoryToOS(nat n /* megablocks */)
719 {
720 static bdescr *bd;
721 StgWord size;
722
723 bd = free_mblock_list;
724 while ((n > 0) && (bd != NULL)) {
725 size = BLOCKS_TO_MBLOCKS(bd->blocks);
726 if (size > n) {
727 StgWord newSize = size - n;
728 char *freeAddr = MBLOCK_ROUND_DOWN(bd->start);
729 freeAddr += newSize * MBLOCK_SIZE;
730 bd->blocks = MBLOCK_GROUP_BLOCKS(newSize);
731 freeMBlocks(freeAddr, n);
732 n = 0;
733 }
734 else {
735 char *freeAddr = MBLOCK_ROUND_DOWN(bd->start);
736 n -= size;
737 bd = bd->link;
738 freeMBlocks(freeAddr, size);
739 }
740 }
741 free_mblock_list = bd;
742
743 osReleaseFreeMemory();
744
745 IF_DEBUG(gc,
746 if (n != 0) {
747 debugBelch("Wanted to free %d more MBlocks than are freeable\n",
748 n);
749 }
750 );
751 }
752
753 /* -----------------------------------------------------------------------------
754 Debugging
755 -------------------------------------------------------------------------- */
756
757 #ifdef DEBUG
758 static void
759 check_tail (bdescr *bd)
760 {
761 bdescr *tail = tail_of(bd);
762
763 if (tail != bd)
764 {
765 ASSERT(tail->blocks == 0);
766 ASSERT(tail->free == 0);
767 ASSERT(tail->link == bd);
768 }
769 }
770
771 void
772 checkFreeListSanity(void)
773 {
774 bdescr *bd, *prev;
775 StgWord ln, min;
776
777
778 min = 1;
779 for (ln = 0; ln < MAX_FREE_LIST; ln++) {
780 IF_DEBUG(block_alloc,
781 debugBelch("free block list [%" FMT_Word "]:\n", ln));
782
783 prev = NULL;
784 for (bd = free_list[ln]; bd != NULL; prev = bd, bd = bd->link)
785 {
786 IF_DEBUG(block_alloc,
787 debugBelch("group at %p, length %ld blocks\n",
788 bd->start, (long)bd->blocks));
789 ASSERT(bd->free == (P_)-1);
790 ASSERT(bd->blocks > 0 && bd->blocks < BLOCKS_PER_MBLOCK);
791 ASSERT(bd->blocks >= min && bd->blocks <= (min*2 - 1));
792 ASSERT(bd->link != bd); // catch easy loops
793
794 check_tail(bd);
795
796 if (prev)
797 ASSERT(bd->u.back == prev);
798 else
799 ASSERT(bd->u.back == NULL);
800
801 {
802 bdescr *next;
803 next = bd + bd->blocks;
804 if (next <= LAST_BDESCR(MBLOCK_ROUND_DOWN(bd)))
805 {
806 ASSERT(next->free != (P_)-1);
807 }
808 }
809 }
810 min = min << 1;
811 }
812
813 prev = NULL;
814 for (bd = free_mblock_list; bd != NULL; prev = bd, bd = bd->link)
815 {
816 IF_DEBUG(block_alloc,
817 debugBelch("mega group at %p, length %ld blocks\n",
818 bd->start, (long)bd->blocks));
819
820 ASSERT(bd->link != bd); // catch easy loops
821
822 if (bd->link != NULL)
823 {
824 // make sure the list is sorted
825 ASSERT(bd->start < bd->link->start);
826 }
827
828 ASSERT(bd->blocks >= BLOCKS_PER_MBLOCK);
829 ASSERT(MBLOCK_GROUP_BLOCKS(BLOCKS_TO_MBLOCKS(bd->blocks))
830 == bd->blocks);
831
832 // make sure we're fully coalesced
833 if (bd->link != NULL)
834 {
835 ASSERT (MBLOCK_ROUND_DOWN(bd->link) !=
836 (StgWord8*)MBLOCK_ROUND_DOWN(bd) +
837 BLOCKS_TO_MBLOCKS(bd->blocks) * MBLOCK_SIZE);
838 }
839 }
840 }
841
842 W_ /* BLOCKS */
843 countFreeList(void)
844 {
845 bdescr *bd;
846 W_ total_blocks = 0;
847 StgWord ln;
848
849 for (ln=0; ln < MAX_FREE_LIST; ln++) {
850 for (bd = free_list[ln]; bd != NULL; bd = bd->link) {
851 total_blocks += bd->blocks;
852 }
853 }
854 for (bd = free_mblock_list; bd != NULL; bd = bd->link) {
855 total_blocks += BLOCKS_PER_MBLOCK * BLOCKS_TO_MBLOCKS(bd->blocks);
856 // The caller of this function, memInventory(), expects to match
857 // the total number of blocks in the system against mblocks *
858 // BLOCKS_PER_MBLOCK, so we must subtract the space for the
859 // block descriptors from *every* mblock.
860 }
861 return total_blocks;
862 }
863
864 void
865 markBlocks (bdescr *bd)
866 {
867 for (; bd != NULL; bd = bd->link) {
868 bd->flags |= BF_KNOWN;
869 }
870 }
871
872 void
873 reportUnmarkedBlocks (void)
874 {
875 void *mblock;
876 bdescr *bd;
877
878 debugBelch("Unreachable blocks:\n");
879 for (mblock = getFirstMBlock(); mblock != NULL;
880 mblock = getNextMBlock(mblock)) {
881 for (bd = FIRST_BDESCR(mblock); bd <= LAST_BDESCR(mblock); ) {
882 if (!(bd->flags & BF_KNOWN) && bd->free != (P_)-1) {
883 debugBelch(" %p\n",bd);
884 }
885 if (bd->blocks >= BLOCKS_PER_MBLOCK) {
886 mblock = (StgWord8*)mblock +
887 (BLOCKS_TO_MBLOCKS(bd->blocks) - 1) * MBLOCK_SIZE;
888 break;
889 } else {
890 bd += bd->blocks;
891 }
892 }
893 }
894 }
895
896 #endif