remove unused includes, now that Storage.h & Stable.h are included by Rts.h
[ghc.git] / rts / sm / Compact.c
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team 2001-2006
4 *
5 * Compacting garbage collector
6 *
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
9 *
10 * http://hackage.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/GC
11 *
12 * ---------------------------------------------------------------------------*/
13
14 #include "PosixSource.h"
15 #include "Rts.h"
16 #include "RtsUtils.h"
17 #include "RtsFlags.h"
18 #include "OSThreads.h"
19 #include "BlockAlloc.h"
20 #include "MBlock.h"
21 #include "GC.h"
22 #include "Compact.h"
23 #include "Schedule.h"
24 #include "Apply.h"
25 #include "Trace.h"
26
27 // Turn off inlining when debugging - it obfuscates things
28 #ifdef DEBUG
29 # undef STATIC_INLINE
30 # define STATIC_INLINE static
31 #endif
32
33 /* -----------------------------------------------------------------------------
34 Threading / unthreading pointers.
35
36 The basic idea here is to chain together all the fields pointing at
37 a particular object, with the root of the chain in the object's
38 info table field. The original contents of the info pointer goes
39 at the end of the chain.
40
41 Adding a new field to the chain is a matter of swapping the
42 contents of the field with the contents of the object's info table
43 field.
44
45 To unthread the chain, we walk down it updating all the fields on
46 the chain with the new location of the object. We stop when we
47 reach the info pointer at the end.
48
49 We use a trick to identify the info pointer: when swapping pointers
50 for threading, we set the low bit of the original pointer, with the
51 result that all the pointers in the chain have their low bits set
52 except for the info pointer.
53 -------------------------------------------------------------------------- */
54
55 STATIC_INLINE void
56 thread (StgClosure **p)
57 {
58 StgPtr q = *(StgPtr *)p;
59 bdescr *bd;
60
61 // It doesn't look like a closure at the moment, because the info
62 // ptr is possibly threaded:
63 // ASSERT(LOOKS_LIKE_CLOSURE_PTR(q));
64
65 if (HEAP_ALLOCED(q)) {
66 bd = Bdescr(q);
67 // a handy way to discover whether the ptr is into the
68 // compacted area of the old gen, is that the EVACUATED flag
69 // is zero (it's non-zero for all the other areas of live
70 // memory).
71 if ((bd->flags & BF_EVACUATED) == 0) {
72
73 *(StgPtr)p = (StgWord)*q;
74 *q = (StgWord)p + 1; // set the low bit
75 }
76 }
77 }
78
79 // This version of thread() takes a (void *), used to circumvent
80 // warnings from gcc about pointer punning and strict aliasing.
81 STATIC_INLINE void thread_ (void *p) { thread((StgClosure **)p); }
82
83 STATIC_INLINE void
84 unthread( StgPtr p, StgPtr free )
85 {
86 StgWord q = *p, r;
87
88 while ((q & 1) != 0) {
89 q -= 1; // unset the low bit again
90 r = *((StgPtr)q);
91 *((StgPtr)q) = (StgWord)free;
92 q = r;
93 }
94 *p = q;
95 }
96
97 STATIC_INLINE StgInfoTable *
98 get_threaded_info( StgPtr p )
99 {
100 StgPtr q = (P_)GET_INFO((StgClosure *)p);
101
102 while (((StgWord)q & 1) != 0) {
103 q = (P_)*((StgPtr)((StgWord)q-1));
104 }
105
106 ASSERT(LOOKS_LIKE_INFO_PTR(q));
107 return INFO_PTR_TO_STRUCT((StgInfoTable *)q);
108 }
109
110 // A word-aligned memmove will be faster for small objects than libc's or gcc's.
111 // Remember, the two regions *might* overlap, but: to <= from.
112 STATIC_INLINE void
113 move(StgPtr to, StgPtr from, nat size)
114 {
115 for(; size > 0; --size) {
116 *to++ = *from++;
117 }
118 }
119
120 static void
121 thread_static( StgClosure* p )
122 {
123 const StgInfoTable *info;
124
125 // keep going until we've threaded all the objects on the linked
126 // list...
127 while (p != END_OF_STATIC_LIST) {
128
129 info = get_itbl(p);
130 switch (info->type) {
131
132 case IND_STATIC:
133 thread(&((StgInd *)p)->indirectee);
134 p = *IND_STATIC_LINK(p);
135 continue;
136
137 case THUNK_STATIC:
138 p = *THUNK_STATIC_LINK(p);
139 continue;
140 case FUN_STATIC:
141 p = *FUN_STATIC_LINK(p);
142 continue;
143 case CONSTR_STATIC:
144 p = *STATIC_LINK(info,p);
145 continue;
146
147 default:
148 barf("thread_static: strange closure %d", (int)(info->type));
149 }
150
151 }
152 }
153
154 STATIC_INLINE void
155 thread_large_bitmap( StgPtr p, StgLargeBitmap *large_bitmap, nat size )
156 {
157 nat i, b;
158 StgWord bitmap;
159
160 b = 0;
161 bitmap = large_bitmap->bitmap[b];
162 for (i = 0; i < size; ) {
163 if ((bitmap & 1) == 0) {
164 thread((StgClosure **)p);
165 }
166 i++;
167 p++;
168 if (i % BITS_IN(W_) == 0) {
169 b++;
170 bitmap = large_bitmap->bitmap[b];
171 } else {
172 bitmap = bitmap >> 1;
173 }
174 }
175 }
176
177 STATIC_INLINE StgPtr
178 thread_arg_block (StgFunInfoTable *fun_info, StgClosure **args)
179 {
180 StgPtr p;
181 StgWord bitmap;
182 nat size;
183
184 p = (StgPtr)args;
185 switch (fun_info->f.fun_type) {
186 case ARG_GEN:
187 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
188 size = BITMAP_SIZE(fun_info->f.b.bitmap);
189 goto small_bitmap;
190 case ARG_GEN_BIG:
191 size = GET_FUN_LARGE_BITMAP(fun_info)->size;
192 thread_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
193 p += size;
194 break;
195 default:
196 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
197 size = BITMAP_SIZE(stg_arg_bitmaps[fun_info->f.fun_type]);
198 small_bitmap:
199 while (size > 0) {
200 if ((bitmap & 1) == 0) {
201 thread((StgClosure **)p);
202 }
203 p++;
204 bitmap = bitmap >> 1;
205 size--;
206 }
207 break;
208 }
209 return p;
210 }
211
212 static void
213 thread_stack(StgPtr p, StgPtr stack_end)
214 {
215 const StgRetInfoTable* info;
216 StgWord bitmap;
217 nat size;
218
219 // highly similar to scavenge_stack, but we do pointer threading here.
220
221 while (p < stack_end) {
222
223 // *p must be the info pointer of an activation
224 // record. All activation records have 'bitmap' style layout
225 // info.
226 //
227 info = get_ret_itbl((StgClosure *)p);
228
229 switch (info->i.type) {
230
231 // Dynamic bitmap: the mask is stored on the stack
232 case RET_DYN:
233 {
234 StgWord dyn;
235 dyn = ((StgRetDyn *)p)->liveness;
236
237 // traverse the bitmap first
238 bitmap = RET_DYN_LIVENESS(dyn);
239 p = (P_)&((StgRetDyn *)p)->payload[0];
240 size = RET_DYN_BITMAP_SIZE;
241 while (size > 0) {
242 if ((bitmap & 1) == 0) {
243 thread((StgClosure **)p);
244 }
245 p++;
246 bitmap = bitmap >> 1;
247 size--;
248 }
249
250 // skip over the non-ptr words
251 p += RET_DYN_NONPTRS(dyn) + RET_DYN_NONPTR_REGS_SIZE;
252
253 // follow the ptr words
254 for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
255 thread((StgClosure **)p);
256 p++;
257 }
258 continue;
259 }
260
261 // small bitmap (<= 32 entries, or 64 on a 64-bit machine)
262 case CATCH_RETRY_FRAME:
263 case CATCH_STM_FRAME:
264 case ATOMICALLY_FRAME:
265 case UPDATE_FRAME:
266 case STOP_FRAME:
267 case CATCH_FRAME:
268 case RET_SMALL:
269 case RET_VEC_SMALL:
270 bitmap = BITMAP_BITS(info->i.layout.bitmap);
271 size = BITMAP_SIZE(info->i.layout.bitmap);
272 p++;
273 // NOTE: the payload starts immediately after the info-ptr, we
274 // don't have an StgHeader in the same sense as a heap closure.
275 while (size > 0) {
276 if ((bitmap & 1) == 0) {
277 thread((StgClosure **)p);
278 }
279 p++;
280 bitmap = bitmap >> 1;
281 size--;
282 }
283 continue;
284
285 case RET_BCO: {
286 StgBCO *bco;
287 nat size;
288
289 p++;
290 bco = (StgBCO *)*p;
291 thread((StgClosure **)p);
292 p++;
293 size = BCO_BITMAP_SIZE(bco);
294 thread_large_bitmap(p, BCO_BITMAP(bco), size);
295 p += size;
296 continue;
297 }
298
299 // large bitmap (> 32 entries, or 64 on a 64-bit machine)
300 case RET_BIG:
301 case RET_VEC_BIG:
302 p++;
303 size = GET_LARGE_BITMAP(&info->i)->size;
304 thread_large_bitmap(p, GET_LARGE_BITMAP(&info->i), size);
305 p += size;
306 continue;
307
308 case RET_FUN:
309 {
310 StgRetFun *ret_fun = (StgRetFun *)p;
311 StgFunInfoTable *fun_info;
312
313 fun_info = itbl_to_fun_itbl(
314 get_threaded_info((StgPtr)ret_fun->fun));
315 // *before* threading it!
316 thread(&ret_fun->fun);
317 p = thread_arg_block(fun_info, ret_fun->payload);
318 continue;
319 }
320
321 default:
322 barf("thread_stack: weird activation record found on stack: %d",
323 (int)(info->i.type));
324 }
325 }
326 }
327
328 STATIC_INLINE StgPtr
329 thread_PAP_payload (StgClosure *fun, StgClosure **payload, StgWord size)
330 {
331 StgPtr p;
332 StgWord bitmap;
333 StgFunInfoTable *fun_info;
334
335 fun_info = itbl_to_fun_itbl(get_threaded_info((StgPtr)fun));
336 ASSERT(fun_info->i.type != PAP);
337
338 p = (StgPtr)payload;
339
340 switch (fun_info->f.fun_type) {
341 case ARG_GEN:
342 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
343 goto small_bitmap;
344 case ARG_GEN_BIG:
345 thread_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
346 p += size;
347 break;
348 case ARG_BCO:
349 thread_large_bitmap((StgPtr)payload, BCO_BITMAP(fun), size);
350 p += size;
351 break;
352 default:
353 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
354 small_bitmap:
355 while (size > 0) {
356 if ((bitmap & 1) == 0) {
357 thread((StgClosure **)p);
358 }
359 p++;
360 bitmap = bitmap >> 1;
361 size--;
362 }
363 break;
364 }
365
366 return p;
367 }
368
369 STATIC_INLINE StgPtr
370 thread_PAP (StgPAP *pap)
371 {
372 StgPtr p;
373 p = thread_PAP_payload(pap->fun, pap->payload, pap->n_args);
374 thread(&pap->fun);
375 return p;
376 }
377
378 STATIC_INLINE StgPtr
379 thread_AP (StgAP *ap)
380 {
381 StgPtr p;
382 p = thread_PAP_payload(ap->fun, ap->payload, ap->n_args);
383 thread(&ap->fun);
384 return p;
385 }
386
387 STATIC_INLINE StgPtr
388 thread_AP_STACK (StgAP_STACK *ap)
389 {
390 thread(&ap->fun);
391 thread_stack((P_)ap->payload, (P_)ap->payload + ap->size);
392 return (P_)ap + sizeofW(StgAP_STACK) + ap->size;
393 }
394
395 static StgPtr
396 thread_TSO (StgTSO *tso)
397 {
398 thread_(&tso->link);
399 thread_(&tso->global_link);
400
401 if ( tso->why_blocked == BlockedOnMVar
402 || tso->why_blocked == BlockedOnBlackHole
403 || tso->why_blocked == BlockedOnException
404 ) {
405 thread_(&tso->block_info.closure);
406 }
407 thread_(&tso->blocked_exceptions);
408
409 thread_(&tso->trec);
410
411 thread_stack(tso->sp, &(tso->stack[tso->stack_size]));
412 return (StgPtr)tso + tso_sizeW(tso);
413 }
414
415
416 static void
417 update_fwd_large( bdescr *bd )
418 {
419 StgPtr p;
420 const StgInfoTable* info;
421
422 for (; bd != NULL; bd = bd->link) {
423
424 p = bd->start;
425 info = get_itbl((StgClosure *)p);
426
427 switch (info->type) {
428
429 case ARR_WORDS:
430 // nothing to follow
431 continue;
432
433 case MUT_ARR_PTRS_CLEAN:
434 case MUT_ARR_PTRS_DIRTY:
435 case MUT_ARR_PTRS_FROZEN:
436 case MUT_ARR_PTRS_FROZEN0:
437 // follow everything
438 {
439 StgPtr next;
440
441 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
442 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
443 thread((StgClosure **)p);
444 }
445 continue;
446 }
447
448 case TSO:
449 thread_TSO((StgTSO *)p);
450 continue;
451
452 case AP_STACK:
453 thread_AP_STACK((StgAP_STACK *)p);
454 continue;
455
456 case PAP:
457 thread_PAP((StgPAP *)p);
458 continue;
459
460 case TREC_CHUNK:
461 {
462 StgWord i;
463 StgTRecChunk *tc = (StgTRecChunk *)p;
464 TRecEntry *e = &(tc -> entries[0]);
465 thread_(&tc->prev_chunk);
466 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
467 thread_(&e->tvar);
468 thread(&e->expected_value);
469 thread(&e->new_value);
470 }
471 continue;
472 }
473
474 default:
475 barf("update_fwd_large: unknown/strange object %d", (int)(info->type));
476 }
477 }
478 }
479
480 // ToDo: too big to inline
481 static /* STATIC_INLINE */ StgPtr
482 thread_obj (StgInfoTable *info, StgPtr p)
483 {
484 switch (info->type) {
485 case THUNK_0_1:
486 return p + sizeofW(StgThunk) + 1;
487
488 case FUN_0_1:
489 case CONSTR_0_1:
490 return p + sizeofW(StgHeader) + 1;
491
492 case FUN_1_0:
493 case CONSTR_1_0:
494 thread(&((StgClosure *)p)->payload[0]);
495 return p + sizeofW(StgHeader) + 1;
496
497 case THUNK_1_0:
498 thread(&((StgThunk *)p)->payload[0]);
499 return p + sizeofW(StgThunk) + 1;
500
501 case THUNK_0_2:
502 return p + sizeofW(StgThunk) + 2;
503
504 case FUN_0_2:
505 case CONSTR_0_2:
506 return p + sizeofW(StgHeader) + 2;
507
508 case THUNK_1_1:
509 thread(&((StgThunk *)p)->payload[0]);
510 return p + sizeofW(StgThunk) + 2;
511
512 case FUN_1_1:
513 case CONSTR_1_1:
514 thread(&((StgClosure *)p)->payload[0]);
515 return p + sizeofW(StgHeader) + 2;
516
517 case THUNK_2_0:
518 thread(&((StgThunk *)p)->payload[0]);
519 thread(&((StgThunk *)p)->payload[1]);
520 return p + sizeofW(StgThunk) + 2;
521
522 case FUN_2_0:
523 case CONSTR_2_0:
524 thread(&((StgClosure *)p)->payload[0]);
525 thread(&((StgClosure *)p)->payload[1]);
526 return p + sizeofW(StgHeader) + 2;
527
528 case BCO: {
529 StgBCO *bco = (StgBCO *)p;
530 thread_(&bco->instrs);
531 thread_(&bco->literals);
532 thread_(&bco->ptrs);
533 thread_(&bco->itbls);
534 return p + bco_sizeW(bco);
535 }
536
537 case THUNK:
538 {
539 StgPtr end;
540
541 end = (P_)((StgThunk *)p)->payload +
542 info->layout.payload.ptrs;
543 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
544 thread((StgClosure **)p);
545 }
546 return p + info->layout.payload.nptrs;
547 }
548
549 case FUN:
550 case CONSTR:
551 case STABLE_NAME:
552 case IND_PERM:
553 case MUT_VAR_CLEAN:
554 case MUT_VAR_DIRTY:
555 case CAF_BLACKHOLE:
556 case SE_CAF_BLACKHOLE:
557 case SE_BLACKHOLE:
558 case BLACKHOLE:
559 {
560 StgPtr end;
561
562 end = (P_)((StgClosure *)p)->payload +
563 info->layout.payload.ptrs;
564 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
565 thread((StgClosure **)p);
566 }
567 return p + info->layout.payload.nptrs;
568 }
569
570 case WEAK:
571 {
572 StgWeak *w = (StgWeak *)p;
573 thread(&w->key);
574 thread(&w->value);
575 thread(&w->finalizer);
576 if (w->link != NULL) {
577 thread_(&w->link);
578 }
579 return p + sizeofW(StgWeak);
580 }
581
582 case MVAR:
583 {
584 StgMVar *mvar = (StgMVar *)p;
585 thread_(&mvar->head);
586 thread_(&mvar->tail);
587 thread(&mvar->value);
588 return p + sizeofW(StgMVar);
589 }
590
591 case IND_OLDGEN:
592 case IND_OLDGEN_PERM:
593 thread(&((StgInd *)p)->indirectee);
594 return p + sizeofW(StgInd);
595
596 case THUNK_SELECTOR:
597 {
598 StgSelector *s = (StgSelector *)p;
599 thread(&s->selectee);
600 return p + THUNK_SELECTOR_sizeW();
601 }
602
603 case AP_STACK:
604 return thread_AP_STACK((StgAP_STACK *)p);
605
606 case PAP:
607 return thread_PAP((StgPAP *)p);
608
609 case AP:
610 return thread_AP((StgAP *)p);
611
612 case ARR_WORDS:
613 return p + arr_words_sizeW((StgArrWords *)p);
614
615 case MUT_ARR_PTRS_CLEAN:
616 case MUT_ARR_PTRS_DIRTY:
617 case MUT_ARR_PTRS_FROZEN:
618 case MUT_ARR_PTRS_FROZEN0:
619 // follow everything
620 {
621 StgPtr next;
622
623 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
624 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
625 thread((StgClosure **)p);
626 }
627 return p;
628 }
629
630 case TSO:
631 return thread_TSO((StgTSO *)p);
632
633 case TVAR_WATCH_QUEUE:
634 {
635 StgTVarWatchQueue *wq = (StgTVarWatchQueue *)p;
636 thread_(&wq->closure);
637 thread_(&wq->next_queue_entry);
638 thread_(&wq->prev_queue_entry);
639 return p + sizeofW(StgTVarWatchQueue);
640 }
641
642 case TVAR:
643 {
644 StgTVar *tvar = (StgTVar *)p;
645 thread((void *)&tvar->current_value);
646 thread((void *)&tvar->first_watch_queue_entry);
647 return p + sizeofW(StgTVar);
648 }
649
650 case TREC_HEADER:
651 {
652 StgTRecHeader *trec = (StgTRecHeader *)p;
653 thread_(&trec->enclosing_trec);
654 thread_(&trec->current_chunk);
655 thread_(&trec->invariants_to_check);
656 return p + sizeofW(StgTRecHeader);
657 }
658
659 case TREC_CHUNK:
660 {
661 StgWord i;
662 StgTRecChunk *tc = (StgTRecChunk *)p;
663 TRecEntry *e = &(tc -> entries[0]);
664 thread_(&tc->prev_chunk);
665 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
666 thread_(&e->tvar);
667 thread(&e->expected_value);
668 thread(&e->new_value);
669 }
670 return p + sizeofW(StgTRecChunk);
671 }
672
673 case ATOMIC_INVARIANT:
674 {
675 StgAtomicInvariant *invariant = (StgAtomicInvariant *)p;
676 thread_(&invariant->code);
677 thread_(&invariant->last_execution);
678 return p + sizeofW(StgAtomicInvariant);
679 }
680
681 case INVARIANT_CHECK_QUEUE:
682 {
683 StgInvariantCheckQueue *queue = (StgInvariantCheckQueue *)p;
684 thread_(&queue->invariant);
685 thread_(&queue->my_execution);
686 thread_(&queue->next_queue_entry);
687 return p + sizeofW(StgInvariantCheckQueue);
688 }
689
690 default:
691 barf("update_fwd: unknown/strange object %d", (int)(info->type));
692 return NULL;
693 }
694 }
695
696 static void
697 update_fwd( bdescr *blocks )
698 {
699 StgPtr p;
700 bdescr *bd;
701 StgInfoTable *info;
702
703 bd = blocks;
704
705 // cycle through all the blocks in the step
706 for (; bd != NULL; bd = bd->link) {
707 p = bd->start;
708
709 // linearly scan the objects in this block
710 while (p < bd->free) {
711 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
712 info = get_itbl((StgClosure *)p);
713 p = thread_obj(info, p);
714 }
715 }
716 }
717
718 static void
719 update_fwd_compact( bdescr *blocks )
720 {
721 StgPtr p, q, free;
722 #if 0
723 StgWord m;
724 #endif
725 bdescr *bd, *free_bd;
726 StgInfoTable *info;
727 nat size;
728
729 bd = blocks;
730 free_bd = blocks;
731 free = free_bd->start;
732
733 // cycle through all the blocks in the step
734 for (; bd != NULL; bd = bd->link) {
735 p = bd->start;
736
737 while (p < bd->free ) {
738
739 while ( p < bd->free && !is_marked(p,bd) ) {
740 p++;
741 }
742 if (p >= bd->free) {
743 break;
744 }
745
746 #if 0
747 next:
748 m = * ((StgPtr)bd->u.bitmap + ((p - bd->start) / (BITS_IN(StgWord))));
749 m >>= ((p - bd->start) & (BITS_IN(StgWord) - 1));
750
751 while ( p < bd->free ) {
752
753 if ((m & 1) == 0) {
754 m >>= 1;
755 p++;
756 if (((StgWord)p & (sizeof(W_) * BITS_IN(StgWord))) == 0) {
757 goto next;
758 } else {
759 continue;
760 }
761 }
762 #endif
763
764 // Problem: we need to know the destination for this cell
765 // in order to unthread its info pointer. But we can't
766 // know the destination without the size, because we may
767 // spill into the next block. So we have to run down the
768 // threaded list and get the info ptr first.
769 info = get_threaded_info(p);
770
771 q = p;
772
773 p = thread_obj(info, p);
774
775 size = p - q;
776 if (free + size > free_bd->start + BLOCK_SIZE_W) {
777 // unset the next bit in the bitmap to indicate that
778 // this object needs to be pushed into the next
779 // block. This saves us having to run down the
780 // threaded info pointer list twice during the next pass.
781 unmark(q+1,bd);
782 free_bd = free_bd->link;
783 free = free_bd->start;
784 } else {
785 ASSERT(is_marked(q+1,bd));
786 }
787
788 unthread(q,free);
789 free += size;
790 #if 0
791 goto next;
792 #endif
793 }
794 }
795 }
796
797 static nat
798 update_bkwd_compact( step *stp )
799 {
800 StgPtr p, free;
801 #if 0
802 StgWord m;
803 #endif
804 bdescr *bd, *free_bd;
805 StgInfoTable *info;
806 nat size, free_blocks;
807
808 bd = free_bd = stp->old_blocks;
809 free = free_bd->start;
810 free_blocks = 1;
811
812 // cycle through all the blocks in the step
813 for (; bd != NULL; bd = bd->link) {
814 p = bd->start;
815
816 while (p < bd->free ) {
817
818 while ( p < bd->free && !is_marked(p,bd) ) {
819 p++;
820 }
821 if (p >= bd->free) {
822 break;
823 }
824
825 #if 0
826 next:
827 m = * ((StgPtr)bd->u.bitmap + ((p - bd->start) / (BITS_IN(StgWord))));
828 m >>= ((p - bd->start) & (BITS_IN(StgWord) - 1));
829
830 while ( p < bd->free ) {
831
832 if ((m & 1) == 0) {
833 m >>= 1;
834 p++;
835 if (((StgWord)p & (sizeof(W_) * BITS_IN(StgWord))) == 0) {
836 goto next;
837 } else {
838 continue;
839 }
840 }
841 #endif
842
843 if (!is_marked(p+1,bd)) {
844 // don't forget to update the free ptr in the block desc.
845 free_bd->free = free;
846 free_bd = free_bd->link;
847 free = free_bd->start;
848 free_blocks++;
849 }
850
851 unthread(p,free);
852 ASSERT(LOOKS_LIKE_INFO_PTR(((StgClosure *)p)->header.info));
853 info = get_itbl((StgClosure *)p);
854 size = closure_sizeW_((StgClosure *)p,info);
855
856 if (free != p) {
857 move(free,p,size);
858 }
859
860 // relocate TSOs
861 if (info->type == TSO) {
862 move_TSO((StgTSO *)p, (StgTSO *)free);
863 }
864
865 free += size;
866 p += size;
867 #if 0
868 goto next;
869 #endif
870 }
871 }
872
873 // free the remaining blocks and count what's left.
874 free_bd->free = free;
875 if (free_bd->link != NULL) {
876 freeChain(free_bd->link);
877 free_bd->link = NULL;
878 }
879
880 return free_blocks;
881 }
882
883 void
884 compact(void)
885 {
886 nat g, s, blocks;
887 step *stp;
888
889 // 1. thread the roots
890 GetRoots((evac_fn)thread);
891
892 // the weak pointer lists...
893 if (weak_ptr_list != NULL) {
894 thread((void *)&weak_ptr_list);
895 }
896 if (old_weak_ptr_list != NULL) {
897 thread((void *)&old_weak_ptr_list); // tmp
898 }
899
900 // mutable lists
901 for (g = 1; g < RtsFlags.GcFlags.generations; g++) {
902 bdescr *bd;
903 StgPtr p;
904 for (bd = generations[g].mut_list; bd != NULL; bd = bd->link) {
905 for (p = bd->start; p < bd->free; p++) {
906 thread((StgClosure **)p);
907 }
908 }
909 }
910
911 // the global thread list
912 thread((void *)&all_threads);
913
914 // any threads resurrected during this GC
915 thread((void *)&resurrected_threads);
916
917 // the task list
918 {
919 Task *task;
920 for (task = all_tasks; task != NULL; task = task->all_link) {
921 if (task->tso) {
922 thread_(&task->tso);
923 }
924 }
925 }
926
927 // the static objects
928 thread_static(scavenged_static_objects);
929
930 // the stable pointer table
931 threadStablePtrTable((evac_fn)thread);
932
933 // the CAF list (used by GHCi)
934 markCAFs((evac_fn)thread);
935
936 // 2. update forward ptrs
937 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
938 for (s = 0; s < generations[g].n_steps; s++) {
939 if (g==0 && s ==0) continue;
940 stp = &generations[g].steps[s];
941 debugTrace(DEBUG_gc, "update_fwd: %d.%d",
942 stp->gen->no, stp->no);
943
944 update_fwd(stp->blocks);
945 update_fwd_large(stp->scavenged_large_objects);
946 if (g == RtsFlags.GcFlags.generations-1 && stp->old_blocks != NULL) {
947 debugTrace(DEBUG_gc, "update_fwd: %d.%d (compact)",
948 stp->gen->no, stp->no);
949 update_fwd_compact(stp->old_blocks);
950 }
951 }
952 }
953
954 // 3. update backward ptrs
955 stp = &oldest_gen->steps[0];
956 if (stp->old_blocks != NULL) {
957 blocks = update_bkwd_compact(stp);
958 debugTrace(DEBUG_gc,
959 "update_bkwd: %d.%d (compact, old: %d blocks, now %d blocks)",
960 stp->gen->no, stp->no,
961 stp->n_old_blocks, blocks);
962 stp->n_old_blocks = blocks;
963 }
964 }