copyright updates and add Commentary links
[ghc.git] / rts / sm / Compact.c
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team 2001-2006
4 *
5 * Compacting garbage collector
6 *
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
9 *
10 * http://hackage.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/GC
11 *
12 * ---------------------------------------------------------------------------*/
13
14 #include "PosixSource.h"
15 #include "Rts.h"
16 #include "RtsUtils.h"
17 #include "RtsFlags.h"
18 #include "OSThreads.h"
19 #include "Storage.h"
20 #include "Stable.h"
21 #include "BlockAlloc.h"
22 #include "MBlock.h"
23 #include "GC.h"
24 #include "Compact.h"
25 #include "Schedule.h"
26 #include "Apply.h"
27 #include "Trace.h"
28
29 // Turn off inlining when debugging - it obfuscates things
30 #ifdef DEBUG
31 # undef STATIC_INLINE
32 # define STATIC_INLINE static
33 #endif
34
35 /* -----------------------------------------------------------------------------
36 Threading / unthreading pointers.
37
38 The basic idea here is to chain together all the fields pointing at
39 a particular object, with the root of the chain in the object's
40 info table field. The original contents of the info pointer goes
41 at the end of the chain.
42
43 Adding a new field to the chain is a matter of swapping the
44 contents of the field with the contents of the object's info table
45 field.
46
47 To unthread the chain, we walk down it updating all the fields on
48 the chain with the new location of the object. We stop when we
49 reach the info pointer at the end.
50
51 We use a trick to identify the info pointer: when swapping pointers
52 for threading, we set the low bit of the original pointer, with the
53 result that all the pointers in the chain have their low bits set
54 except for the info pointer.
55 -------------------------------------------------------------------------- */
56
57 STATIC_INLINE void
58 thread (StgClosure **p)
59 {
60 StgPtr q = *(StgPtr *)p;
61 bdescr *bd;
62
63 // It doesn't look like a closure at the moment, because the info
64 // ptr is possibly threaded:
65 // ASSERT(LOOKS_LIKE_CLOSURE_PTR(q));
66
67 if (HEAP_ALLOCED(q)) {
68 bd = Bdescr(q);
69 // a handy way to discover whether the ptr is into the
70 // compacted area of the old gen, is that the EVACUATED flag
71 // is zero (it's non-zero for all the other areas of live
72 // memory).
73 if ((bd->flags & BF_EVACUATED) == 0) {
74
75 *(StgPtr)p = (StgWord)*q;
76 *q = (StgWord)p + 1; // set the low bit
77 }
78 }
79 }
80
81 // This version of thread() takes a (void *), used to circumvent
82 // warnings from gcc about pointer punning and strict aliasing.
83 STATIC_INLINE void thread_ (void *p) { thread((StgClosure **)p); }
84
85 STATIC_INLINE void
86 unthread( StgPtr p, StgPtr free )
87 {
88 StgWord q = *p, r;
89
90 while ((q & 1) != 0) {
91 q -= 1; // unset the low bit again
92 r = *((StgPtr)q);
93 *((StgPtr)q) = (StgWord)free;
94 q = r;
95 }
96 *p = q;
97 }
98
99 STATIC_INLINE StgInfoTable *
100 get_threaded_info( StgPtr p )
101 {
102 StgPtr q = (P_)GET_INFO((StgClosure *)p);
103
104 while (((StgWord)q & 1) != 0) {
105 q = (P_)*((StgPtr)((StgWord)q-1));
106 }
107
108 ASSERT(LOOKS_LIKE_INFO_PTR(q));
109 return INFO_PTR_TO_STRUCT((StgInfoTable *)q);
110 }
111
112 // A word-aligned memmove will be faster for small objects than libc's or gcc's.
113 // Remember, the two regions *might* overlap, but: to <= from.
114 STATIC_INLINE void
115 move(StgPtr to, StgPtr from, nat size)
116 {
117 for(; size > 0; --size) {
118 *to++ = *from++;
119 }
120 }
121
122 static void
123 thread_static( StgClosure* p )
124 {
125 const StgInfoTable *info;
126
127 // keep going until we've threaded all the objects on the linked
128 // list...
129 while (p != END_OF_STATIC_LIST) {
130
131 info = get_itbl(p);
132 switch (info->type) {
133
134 case IND_STATIC:
135 thread(&((StgInd *)p)->indirectee);
136 p = *IND_STATIC_LINK(p);
137 continue;
138
139 case THUNK_STATIC:
140 p = *THUNK_STATIC_LINK(p);
141 continue;
142 case FUN_STATIC:
143 p = *FUN_STATIC_LINK(p);
144 continue;
145 case CONSTR_STATIC:
146 p = *STATIC_LINK(info,p);
147 continue;
148
149 default:
150 barf("thread_static: strange closure %d", (int)(info->type));
151 }
152
153 }
154 }
155
156 STATIC_INLINE void
157 thread_large_bitmap( StgPtr p, StgLargeBitmap *large_bitmap, nat size )
158 {
159 nat i, b;
160 StgWord bitmap;
161
162 b = 0;
163 bitmap = large_bitmap->bitmap[b];
164 for (i = 0; i < size; ) {
165 if ((bitmap & 1) == 0) {
166 thread((StgClosure **)p);
167 }
168 i++;
169 p++;
170 if (i % BITS_IN(W_) == 0) {
171 b++;
172 bitmap = large_bitmap->bitmap[b];
173 } else {
174 bitmap = bitmap >> 1;
175 }
176 }
177 }
178
179 STATIC_INLINE StgPtr
180 thread_arg_block (StgFunInfoTable *fun_info, StgClosure **args)
181 {
182 StgPtr p;
183 StgWord bitmap;
184 nat size;
185
186 p = (StgPtr)args;
187 switch (fun_info->f.fun_type) {
188 case ARG_GEN:
189 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
190 size = BITMAP_SIZE(fun_info->f.b.bitmap);
191 goto small_bitmap;
192 case ARG_GEN_BIG:
193 size = GET_FUN_LARGE_BITMAP(fun_info)->size;
194 thread_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
195 p += size;
196 break;
197 default:
198 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
199 size = BITMAP_SIZE(stg_arg_bitmaps[fun_info->f.fun_type]);
200 small_bitmap:
201 while (size > 0) {
202 if ((bitmap & 1) == 0) {
203 thread((StgClosure **)p);
204 }
205 p++;
206 bitmap = bitmap >> 1;
207 size--;
208 }
209 break;
210 }
211 return p;
212 }
213
214 static void
215 thread_stack(StgPtr p, StgPtr stack_end)
216 {
217 const StgRetInfoTable* info;
218 StgWord bitmap;
219 nat size;
220
221 // highly similar to scavenge_stack, but we do pointer threading here.
222
223 while (p < stack_end) {
224
225 // *p must be the info pointer of an activation
226 // record. All activation records have 'bitmap' style layout
227 // info.
228 //
229 info = get_ret_itbl((StgClosure *)p);
230
231 switch (info->i.type) {
232
233 // Dynamic bitmap: the mask is stored on the stack
234 case RET_DYN:
235 {
236 StgWord dyn;
237 dyn = ((StgRetDyn *)p)->liveness;
238
239 // traverse the bitmap first
240 bitmap = RET_DYN_LIVENESS(dyn);
241 p = (P_)&((StgRetDyn *)p)->payload[0];
242 size = RET_DYN_BITMAP_SIZE;
243 while (size > 0) {
244 if ((bitmap & 1) == 0) {
245 thread((StgClosure **)p);
246 }
247 p++;
248 bitmap = bitmap >> 1;
249 size--;
250 }
251
252 // skip over the non-ptr words
253 p += RET_DYN_NONPTRS(dyn) + RET_DYN_NONPTR_REGS_SIZE;
254
255 // follow the ptr words
256 for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
257 thread((StgClosure **)p);
258 p++;
259 }
260 continue;
261 }
262
263 // small bitmap (<= 32 entries, or 64 on a 64-bit machine)
264 case CATCH_RETRY_FRAME:
265 case CATCH_STM_FRAME:
266 case ATOMICALLY_FRAME:
267 case UPDATE_FRAME:
268 case STOP_FRAME:
269 case CATCH_FRAME:
270 case RET_SMALL:
271 case RET_VEC_SMALL:
272 bitmap = BITMAP_BITS(info->i.layout.bitmap);
273 size = BITMAP_SIZE(info->i.layout.bitmap);
274 p++;
275 // NOTE: the payload starts immediately after the info-ptr, we
276 // don't have an StgHeader in the same sense as a heap closure.
277 while (size > 0) {
278 if ((bitmap & 1) == 0) {
279 thread((StgClosure **)p);
280 }
281 p++;
282 bitmap = bitmap >> 1;
283 size--;
284 }
285 continue;
286
287 case RET_BCO: {
288 StgBCO *bco;
289 nat size;
290
291 p++;
292 bco = (StgBCO *)*p;
293 thread((StgClosure **)p);
294 p++;
295 size = BCO_BITMAP_SIZE(bco);
296 thread_large_bitmap(p, BCO_BITMAP(bco), size);
297 p += size;
298 continue;
299 }
300
301 // large bitmap (> 32 entries, or 64 on a 64-bit machine)
302 case RET_BIG:
303 case RET_VEC_BIG:
304 p++;
305 size = GET_LARGE_BITMAP(&info->i)->size;
306 thread_large_bitmap(p, GET_LARGE_BITMAP(&info->i), size);
307 p += size;
308 continue;
309
310 case RET_FUN:
311 {
312 StgRetFun *ret_fun = (StgRetFun *)p;
313 StgFunInfoTable *fun_info;
314
315 fun_info = itbl_to_fun_itbl(
316 get_threaded_info((StgPtr)ret_fun->fun));
317 // *before* threading it!
318 thread(&ret_fun->fun);
319 p = thread_arg_block(fun_info, ret_fun->payload);
320 continue;
321 }
322
323 default:
324 barf("thread_stack: weird activation record found on stack: %d",
325 (int)(info->i.type));
326 }
327 }
328 }
329
330 STATIC_INLINE StgPtr
331 thread_PAP_payload (StgClosure *fun, StgClosure **payload, StgWord size)
332 {
333 StgPtr p;
334 StgWord bitmap;
335 StgFunInfoTable *fun_info;
336
337 fun_info = itbl_to_fun_itbl(get_threaded_info((StgPtr)fun));
338 ASSERT(fun_info->i.type != PAP);
339
340 p = (StgPtr)payload;
341
342 switch (fun_info->f.fun_type) {
343 case ARG_GEN:
344 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
345 goto small_bitmap;
346 case ARG_GEN_BIG:
347 thread_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
348 p += size;
349 break;
350 case ARG_BCO:
351 thread_large_bitmap((StgPtr)payload, BCO_BITMAP(fun), size);
352 p += size;
353 break;
354 default:
355 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
356 small_bitmap:
357 while (size > 0) {
358 if ((bitmap & 1) == 0) {
359 thread((StgClosure **)p);
360 }
361 p++;
362 bitmap = bitmap >> 1;
363 size--;
364 }
365 break;
366 }
367
368 return p;
369 }
370
371 STATIC_INLINE StgPtr
372 thread_PAP (StgPAP *pap)
373 {
374 StgPtr p;
375 p = thread_PAP_payload(pap->fun, pap->payload, pap->n_args);
376 thread(&pap->fun);
377 return p;
378 }
379
380 STATIC_INLINE StgPtr
381 thread_AP (StgAP *ap)
382 {
383 StgPtr p;
384 p = thread_PAP_payload(ap->fun, ap->payload, ap->n_args);
385 thread(&ap->fun);
386 return p;
387 }
388
389 STATIC_INLINE StgPtr
390 thread_AP_STACK (StgAP_STACK *ap)
391 {
392 thread(&ap->fun);
393 thread_stack((P_)ap->payload, (P_)ap->payload + ap->size);
394 return (P_)ap + sizeofW(StgAP_STACK) + ap->size;
395 }
396
397 static StgPtr
398 thread_TSO (StgTSO *tso)
399 {
400 thread_(&tso->link);
401 thread_(&tso->global_link);
402
403 if ( tso->why_blocked == BlockedOnMVar
404 || tso->why_blocked == BlockedOnBlackHole
405 || tso->why_blocked == BlockedOnException
406 ) {
407 thread_(&tso->block_info.closure);
408 }
409 thread_(&tso->blocked_exceptions);
410
411 thread_(&tso->trec);
412
413 thread_stack(tso->sp, &(tso->stack[tso->stack_size]));
414 return (StgPtr)tso + tso_sizeW(tso);
415 }
416
417
418 static void
419 update_fwd_large( bdescr *bd )
420 {
421 StgPtr p;
422 const StgInfoTable* info;
423
424 for (; bd != NULL; bd = bd->link) {
425
426 p = bd->start;
427 info = get_itbl((StgClosure *)p);
428
429 switch (info->type) {
430
431 case ARR_WORDS:
432 // nothing to follow
433 continue;
434
435 case MUT_ARR_PTRS_CLEAN:
436 case MUT_ARR_PTRS_DIRTY:
437 case MUT_ARR_PTRS_FROZEN:
438 case MUT_ARR_PTRS_FROZEN0:
439 // follow everything
440 {
441 StgPtr next;
442
443 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
444 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
445 thread((StgClosure **)p);
446 }
447 continue;
448 }
449
450 case TSO:
451 thread_TSO((StgTSO *)p);
452 continue;
453
454 case AP_STACK:
455 thread_AP_STACK((StgAP_STACK *)p);
456 continue;
457
458 case PAP:
459 thread_PAP((StgPAP *)p);
460 continue;
461
462 case TREC_CHUNK:
463 {
464 StgWord i;
465 StgTRecChunk *tc = (StgTRecChunk *)p;
466 TRecEntry *e = &(tc -> entries[0]);
467 thread_(&tc->prev_chunk);
468 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
469 thread_(&e->tvar);
470 thread(&e->expected_value);
471 thread(&e->new_value);
472 }
473 continue;
474 }
475
476 default:
477 barf("update_fwd_large: unknown/strange object %d", (int)(info->type));
478 }
479 }
480 }
481
482 // ToDo: too big to inline
483 static /* STATIC_INLINE */ StgPtr
484 thread_obj (StgInfoTable *info, StgPtr p)
485 {
486 switch (info->type) {
487 case THUNK_0_1:
488 return p + sizeofW(StgThunk) + 1;
489
490 case FUN_0_1:
491 case CONSTR_0_1:
492 return p + sizeofW(StgHeader) + 1;
493
494 case FUN_1_0:
495 case CONSTR_1_0:
496 thread(&((StgClosure *)p)->payload[0]);
497 return p + sizeofW(StgHeader) + 1;
498
499 case THUNK_1_0:
500 thread(&((StgThunk *)p)->payload[0]);
501 return p + sizeofW(StgThunk) + 1;
502
503 case THUNK_0_2:
504 return p + sizeofW(StgThunk) + 2;
505
506 case FUN_0_2:
507 case CONSTR_0_2:
508 return p + sizeofW(StgHeader) + 2;
509
510 case THUNK_1_1:
511 thread(&((StgThunk *)p)->payload[0]);
512 return p + sizeofW(StgThunk) + 2;
513
514 case FUN_1_1:
515 case CONSTR_1_1:
516 thread(&((StgClosure *)p)->payload[0]);
517 return p + sizeofW(StgHeader) + 2;
518
519 case THUNK_2_0:
520 thread(&((StgThunk *)p)->payload[0]);
521 thread(&((StgThunk *)p)->payload[1]);
522 return p + sizeofW(StgThunk) + 2;
523
524 case FUN_2_0:
525 case CONSTR_2_0:
526 thread(&((StgClosure *)p)->payload[0]);
527 thread(&((StgClosure *)p)->payload[1]);
528 return p + sizeofW(StgHeader) + 2;
529
530 case BCO: {
531 StgBCO *bco = (StgBCO *)p;
532 thread_(&bco->instrs);
533 thread_(&bco->literals);
534 thread_(&bco->ptrs);
535 thread_(&bco->itbls);
536 return p + bco_sizeW(bco);
537 }
538
539 case THUNK:
540 {
541 StgPtr end;
542
543 end = (P_)((StgThunk *)p)->payload +
544 info->layout.payload.ptrs;
545 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
546 thread((StgClosure **)p);
547 }
548 return p + info->layout.payload.nptrs;
549 }
550
551 case FUN:
552 case CONSTR:
553 case STABLE_NAME:
554 case IND_PERM:
555 case MUT_VAR_CLEAN:
556 case MUT_VAR_DIRTY:
557 case CAF_BLACKHOLE:
558 case SE_CAF_BLACKHOLE:
559 case SE_BLACKHOLE:
560 case BLACKHOLE:
561 {
562 StgPtr end;
563
564 end = (P_)((StgClosure *)p)->payload +
565 info->layout.payload.ptrs;
566 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
567 thread((StgClosure **)p);
568 }
569 return p + info->layout.payload.nptrs;
570 }
571
572 case WEAK:
573 {
574 StgWeak *w = (StgWeak *)p;
575 thread(&w->key);
576 thread(&w->value);
577 thread(&w->finalizer);
578 if (w->link != NULL) {
579 thread_(&w->link);
580 }
581 return p + sizeofW(StgWeak);
582 }
583
584 case MVAR:
585 {
586 StgMVar *mvar = (StgMVar *)p;
587 thread_(&mvar->head);
588 thread_(&mvar->tail);
589 thread(&mvar->value);
590 return p + sizeofW(StgMVar);
591 }
592
593 case IND_OLDGEN:
594 case IND_OLDGEN_PERM:
595 thread(&((StgInd *)p)->indirectee);
596 return p + sizeofW(StgInd);
597
598 case THUNK_SELECTOR:
599 {
600 StgSelector *s = (StgSelector *)p;
601 thread(&s->selectee);
602 return p + THUNK_SELECTOR_sizeW();
603 }
604
605 case AP_STACK:
606 return thread_AP_STACK((StgAP_STACK *)p);
607
608 case PAP:
609 return thread_PAP((StgPAP *)p);
610
611 case AP:
612 return thread_AP((StgAP *)p);
613
614 case ARR_WORDS:
615 return p + arr_words_sizeW((StgArrWords *)p);
616
617 case MUT_ARR_PTRS_CLEAN:
618 case MUT_ARR_PTRS_DIRTY:
619 case MUT_ARR_PTRS_FROZEN:
620 case MUT_ARR_PTRS_FROZEN0:
621 // follow everything
622 {
623 StgPtr next;
624
625 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
626 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
627 thread((StgClosure **)p);
628 }
629 return p;
630 }
631
632 case TSO:
633 return thread_TSO((StgTSO *)p);
634
635 case TVAR_WATCH_QUEUE:
636 {
637 StgTVarWatchQueue *wq = (StgTVarWatchQueue *)p;
638 thread_(&wq->closure);
639 thread_(&wq->next_queue_entry);
640 thread_(&wq->prev_queue_entry);
641 return p + sizeofW(StgTVarWatchQueue);
642 }
643
644 case TVAR:
645 {
646 StgTVar *tvar = (StgTVar *)p;
647 thread((void *)&tvar->current_value);
648 thread((void *)&tvar->first_watch_queue_entry);
649 return p + sizeofW(StgTVar);
650 }
651
652 case TREC_HEADER:
653 {
654 StgTRecHeader *trec = (StgTRecHeader *)p;
655 thread_(&trec->enclosing_trec);
656 thread_(&trec->current_chunk);
657 thread_(&trec->invariants_to_check);
658 return p + sizeofW(StgTRecHeader);
659 }
660
661 case TREC_CHUNK:
662 {
663 StgWord i;
664 StgTRecChunk *tc = (StgTRecChunk *)p;
665 TRecEntry *e = &(tc -> entries[0]);
666 thread_(&tc->prev_chunk);
667 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
668 thread_(&e->tvar);
669 thread(&e->expected_value);
670 thread(&e->new_value);
671 }
672 return p + sizeofW(StgTRecChunk);
673 }
674
675 case ATOMIC_INVARIANT:
676 {
677 StgAtomicInvariant *invariant = (StgAtomicInvariant *)p;
678 thread_(&invariant->code);
679 thread_(&invariant->last_execution);
680 return p + sizeofW(StgAtomicInvariant);
681 }
682
683 case INVARIANT_CHECK_QUEUE:
684 {
685 StgInvariantCheckQueue *queue = (StgInvariantCheckQueue *)p;
686 thread_(&queue->invariant);
687 thread_(&queue->my_execution);
688 thread_(&queue->next_queue_entry);
689 return p + sizeofW(StgInvariantCheckQueue);
690 }
691
692 default:
693 barf("update_fwd: unknown/strange object %d", (int)(info->type));
694 return NULL;
695 }
696 }
697
698 static void
699 update_fwd( bdescr *blocks )
700 {
701 StgPtr p;
702 bdescr *bd;
703 StgInfoTable *info;
704
705 bd = blocks;
706
707 // cycle through all the blocks in the step
708 for (; bd != NULL; bd = bd->link) {
709 p = bd->start;
710
711 // linearly scan the objects in this block
712 while (p < bd->free) {
713 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
714 info = get_itbl((StgClosure *)p);
715 p = thread_obj(info, p);
716 }
717 }
718 }
719
720 static void
721 update_fwd_compact( bdescr *blocks )
722 {
723 StgPtr p, q, free;
724 #if 0
725 StgWord m;
726 #endif
727 bdescr *bd, *free_bd;
728 StgInfoTable *info;
729 nat size;
730
731 bd = blocks;
732 free_bd = blocks;
733 free = free_bd->start;
734
735 // cycle through all the blocks in the step
736 for (; bd != NULL; bd = bd->link) {
737 p = bd->start;
738
739 while (p < bd->free ) {
740
741 while ( p < bd->free && !is_marked(p,bd) ) {
742 p++;
743 }
744 if (p >= bd->free) {
745 break;
746 }
747
748 #if 0
749 next:
750 m = * ((StgPtr)bd->u.bitmap + ((p - bd->start) / (BITS_IN(StgWord))));
751 m >>= ((p - bd->start) & (BITS_IN(StgWord) - 1));
752
753 while ( p < bd->free ) {
754
755 if ((m & 1) == 0) {
756 m >>= 1;
757 p++;
758 if (((StgWord)p & (sizeof(W_) * BITS_IN(StgWord))) == 0) {
759 goto next;
760 } else {
761 continue;
762 }
763 }
764 #endif
765
766 // Problem: we need to know the destination for this cell
767 // in order to unthread its info pointer. But we can't
768 // know the destination without the size, because we may
769 // spill into the next block. So we have to run down the
770 // threaded list and get the info ptr first.
771 info = get_threaded_info(p);
772
773 q = p;
774
775 p = thread_obj(info, p);
776
777 size = p - q;
778 if (free + size > free_bd->start + BLOCK_SIZE_W) {
779 // unset the next bit in the bitmap to indicate that
780 // this object needs to be pushed into the next
781 // block. This saves us having to run down the
782 // threaded info pointer list twice during the next pass.
783 unmark(q+1,bd);
784 free_bd = free_bd->link;
785 free = free_bd->start;
786 } else {
787 ASSERT(is_marked(q+1,bd));
788 }
789
790 unthread(q,free);
791 free += size;
792 #if 0
793 goto next;
794 #endif
795 }
796 }
797 }
798
799 static nat
800 update_bkwd_compact( step *stp )
801 {
802 StgPtr p, free;
803 #if 0
804 StgWord m;
805 #endif
806 bdescr *bd, *free_bd;
807 StgInfoTable *info;
808 nat size, free_blocks;
809
810 bd = free_bd = stp->old_blocks;
811 free = free_bd->start;
812 free_blocks = 1;
813
814 // cycle through all the blocks in the step
815 for (; bd != NULL; bd = bd->link) {
816 p = bd->start;
817
818 while (p < bd->free ) {
819
820 while ( p < bd->free && !is_marked(p,bd) ) {
821 p++;
822 }
823 if (p >= bd->free) {
824 break;
825 }
826
827 #if 0
828 next:
829 m = * ((StgPtr)bd->u.bitmap + ((p - bd->start) / (BITS_IN(StgWord))));
830 m >>= ((p - bd->start) & (BITS_IN(StgWord) - 1));
831
832 while ( p < bd->free ) {
833
834 if ((m & 1) == 0) {
835 m >>= 1;
836 p++;
837 if (((StgWord)p & (sizeof(W_) * BITS_IN(StgWord))) == 0) {
838 goto next;
839 } else {
840 continue;
841 }
842 }
843 #endif
844
845 if (!is_marked(p+1,bd)) {
846 // don't forget to update the free ptr in the block desc.
847 free_bd->free = free;
848 free_bd = free_bd->link;
849 free = free_bd->start;
850 free_blocks++;
851 }
852
853 unthread(p,free);
854 ASSERT(LOOKS_LIKE_INFO_PTR(((StgClosure *)p)->header.info));
855 info = get_itbl((StgClosure *)p);
856 size = closure_sizeW_((StgClosure *)p,info);
857
858 if (free != p) {
859 move(free,p,size);
860 }
861
862 // relocate TSOs
863 if (info->type == TSO) {
864 move_TSO((StgTSO *)p, (StgTSO *)free);
865 }
866
867 free += size;
868 p += size;
869 #if 0
870 goto next;
871 #endif
872 }
873 }
874
875 // free the remaining blocks and count what's left.
876 free_bd->free = free;
877 if (free_bd->link != NULL) {
878 freeChain(free_bd->link);
879 free_bd->link = NULL;
880 }
881
882 return free_blocks;
883 }
884
885 void
886 compact(void)
887 {
888 nat g, s, blocks;
889 step *stp;
890
891 // 1. thread the roots
892 GetRoots((evac_fn)thread);
893
894 // the weak pointer lists...
895 if (weak_ptr_list != NULL) {
896 thread((void *)&weak_ptr_list);
897 }
898 if (old_weak_ptr_list != NULL) {
899 thread((void *)&old_weak_ptr_list); // tmp
900 }
901
902 // mutable lists
903 for (g = 1; g < RtsFlags.GcFlags.generations; g++) {
904 bdescr *bd;
905 StgPtr p;
906 for (bd = generations[g].mut_list; bd != NULL; bd = bd->link) {
907 for (p = bd->start; p < bd->free; p++) {
908 thread((StgClosure **)p);
909 }
910 }
911 }
912
913 // the global thread list
914 thread((void *)&all_threads);
915
916 // any threads resurrected during this GC
917 thread((void *)&resurrected_threads);
918
919 // the task list
920 {
921 Task *task;
922 for (task = all_tasks; task != NULL; task = task->all_link) {
923 if (task->tso) {
924 thread_(&task->tso);
925 }
926 }
927 }
928
929 // the static objects
930 thread_static(scavenged_static_objects);
931
932 // the stable pointer table
933 threadStablePtrTable((evac_fn)thread);
934
935 // the CAF list (used by GHCi)
936 markCAFs((evac_fn)thread);
937
938 // 2. update forward ptrs
939 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
940 for (s = 0; s < generations[g].n_steps; s++) {
941 if (g==0 && s ==0) continue;
942 stp = &generations[g].steps[s];
943 debugTrace(DEBUG_gc, "update_fwd: %d.%d",
944 stp->gen->no, stp->no);
945
946 update_fwd(stp->blocks);
947 update_fwd_large(stp->scavenged_large_objects);
948 if (g == RtsFlags.GcFlags.generations-1 && stp->old_blocks != NULL) {
949 debugTrace(DEBUG_gc, "update_fwd: %d.%d (compact)",
950 stp->gen->no, stp->no);
951 update_fwd_compact(stp->old_blocks);
952 }
953 }
954 }
955
956 // 3. update backward ptrs
957 stp = &oldest_gen->steps[0];
958 if (stp->old_blocks != NULL) {
959 blocks = update_bkwd_compact(stp);
960 debugTrace(DEBUG_gc,
961 "update_bkwd: %d.%d (compact, old: %d blocks, now %d blocks)",
962 stp->gen->no, stp->no,
963 stp->n_old_blocks, blocks);
964 stp->n_old_blocks = blocks;
965 }
966 }