Eliminate zero_static_objects_list()
[ghc.git] / rts / sm / Compact.c
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team 2001-2008
4 *
5 * Compacting garbage collector
6 *
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
9 *
10 * http://ghc.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/GC
11 *
12 * ---------------------------------------------------------------------------*/
13
14 #include "PosixSource.h"
15 #include "Rts.h"
16
17 #include "GCThread.h"
18 #include "Storage.h"
19 #include "RtsUtils.h"
20 #include "BlockAlloc.h"
21 #include "GC.h"
22 #include "Compact.h"
23 #include "Schedule.h"
24 #include "Apply.h"
25 #include "Trace.h"
26 #include "Weak.h"
27 #include "MarkWeak.h"
28 #include "Stable.h"
29
30 // Turn off inlining when debugging - it obfuscates things
31 #ifdef DEBUG
32 # undef STATIC_INLINE
33 # define STATIC_INLINE static
34 #endif
35
36 /* ----------------------------------------------------------------------------
37 Threading / unthreading pointers.
38
39 The basic idea here is to chain together all the fields pointing at
40 a particular object, with the root of the chain in the object's
41 info table field. The original contents of the info pointer goes
42 at the end of the chain.
43
44 Adding a new field to the chain is a matter of swapping the
45 contents of the field with the contents of the object's info table
46 field.
47
48 To unthread the chain, we walk down it updating all the fields on
49 the chain with the new location of the object. We stop when we
50 reach the info pointer at the end.
51
52 The main difficulty here is that we need to be able to identify the
53 info pointer at the end of the chain. We can't use the low bits of
54 the pointer for this; they are already being used for
55 pointer-tagging. What's more, we need to retain the
56 pointer-tagging tag bits on each pointer during the
57 threading/unthreading process.
58
59 Our solution is as follows:
60 - an info pointer (chain length zero) is identified by having tag 0
61 - in a threaded chain of length > 0:
62 - the pointer-tagging tag bits are attached to the info pointer
63 - the first entry in the chain has tag 1
64 - second and subsequent entries in the chain have tag 2
65
66 This exploits the fact that the tag on each pointer to a given
67 closure is normally the same (if they are not the same, then
68 presumably the tag is not essential and it therefore doesn't matter
69 if we throw away some of the tags).
70 ------------------------------------------------------------------------- */
71
72 STATIC_INLINE void
73 thread (StgClosure **p)
74 {
75 StgClosure *q0;
76 StgPtr q;
77 StgWord iptr;
78 bdescr *bd;
79
80 q0 = *p;
81 q = (StgPtr)UNTAG_CLOSURE(q0);
82
83 // It doesn't look like a closure at the moment, because the info
84 // ptr is possibly threaded:
85 // ASSERT(LOOKS_LIKE_CLOSURE_PTR(q));
86
87 if (HEAP_ALLOCED(q)) {
88 bd = Bdescr(q);
89
90 if (bd->flags & BF_MARKED)
91 {
92 iptr = *q;
93 switch (GET_CLOSURE_TAG((StgClosure *)iptr))
94 {
95 case 0:
96 // this is the info pointer; we are creating a new chain.
97 // save the original tag at the end of the chain.
98 *p = (StgClosure *)((StgWord)iptr + GET_CLOSURE_TAG(q0));
99 *q = (StgWord)p + 1;
100 break;
101 case 1:
102 case 2:
103 // this is a chain of length 1 or more
104 *p = (StgClosure *)iptr;
105 *q = (StgWord)p + 2;
106 break;
107 }
108 }
109 }
110 }
111
112 static void
113 thread_root (void *user STG_UNUSED, StgClosure **p)
114 {
115 thread(p);
116 }
117
118 // This version of thread() takes a (void *), used to circumvent
119 // warnings from gcc about pointer punning and strict aliasing.
120 STATIC_INLINE void thread_ (void *p) { thread((StgClosure **)p); }
121
122 STATIC_INLINE void
123 unthread( StgPtr p, StgWord free )
124 {
125 StgWord q, r;
126 StgPtr q0;
127
128 q = *p;
129 loop:
130 switch (GET_CLOSURE_TAG((StgClosure *)q))
131 {
132 case 0:
133 // nothing to do; the chain is length zero
134 return;
135 case 1:
136 q0 = (StgPtr)(q-1);
137 r = *q0; // r is the info ptr, tagged with the pointer-tag
138 *q0 = free;
139 *p = (StgWord)UNTAG_CLOSURE((StgClosure *)r);
140 return;
141 case 2:
142 q0 = (StgPtr)(q-2);
143 r = *q0;
144 *q0 = free;
145 q = r;
146 goto loop;
147 default:
148 barf("unthread");
149 }
150 }
151
152 // Traverse a threaded chain and pull out the info pointer at the end.
153 // The info pointer is also tagged with the appropriate pointer tag
154 // for this closure, which should be attached to the pointer
155 // subsequently passed to unthread().
156 STATIC_INLINE StgWord
157 get_threaded_info( StgPtr p )
158 {
159 StgWord q;
160
161 q = (W_)GET_INFO(UNTAG_CLOSURE((StgClosure *)p));
162
163 loop:
164 switch (GET_CLOSURE_TAG((StgClosure *)q))
165 {
166 case 0:
167 ASSERT(LOOKS_LIKE_INFO_PTR(q));
168 return q;
169 case 1:
170 {
171 StgWord r = *(StgPtr)(q-1);
172 ASSERT(LOOKS_LIKE_INFO_PTR((StgWord)UNTAG_CLOSURE((StgClosure *)r)));
173 return r;
174 }
175 case 2:
176 q = *(StgPtr)(q-2);
177 goto loop;
178 default:
179 barf("get_threaded_info");
180 }
181 }
182
183 // A word-aligned memmove will be faster for small objects than libc's or gcc's.
184 // Remember, the two regions *might* overlap, but: to <= from.
185 STATIC_INLINE void
186 move(StgPtr to, StgPtr from, StgWord size)
187 {
188 for(; size > 0; --size) {
189 *to++ = *from++;
190 }
191 }
192
193 static void
194 thread_static( StgClosure* p )
195 {
196 const StgInfoTable *info;
197
198 // keep going until we've threaded all the objects on the linked
199 // list...
200 while (p != END_OF_STATIC_OBJECT_LIST) {
201 p = UNTAG_STATIC_LIST_PTR(p);
202 info = get_itbl(p);
203 switch (info->type) {
204
205 case IND_STATIC:
206 thread(&((StgInd *)p)->indirectee);
207 p = *IND_STATIC_LINK(p);
208 continue;
209
210 case THUNK_STATIC:
211 p = *THUNK_STATIC_LINK(p);
212 continue;
213 case FUN_STATIC:
214 p = *FUN_STATIC_LINK(p);
215 continue;
216 case CONSTR_STATIC:
217 p = *STATIC_LINK(info,p);
218 continue;
219
220 default:
221 barf("thread_static: strange closure %d", (int)(info->type));
222 }
223
224 }
225 }
226
227 STATIC_INLINE void
228 thread_large_bitmap( StgPtr p, StgLargeBitmap *large_bitmap, StgWord size )
229 {
230 W_ i, b;
231 StgWord bitmap;
232
233 b = 0;
234 bitmap = large_bitmap->bitmap[b];
235 for (i = 0; i < size; ) {
236 if ((bitmap & 1) == 0) {
237 thread((StgClosure **)p);
238 }
239 i++;
240 p++;
241 if (i % BITS_IN(W_) == 0) {
242 b++;
243 bitmap = large_bitmap->bitmap[b];
244 } else {
245 bitmap = bitmap >> 1;
246 }
247 }
248 }
249
250 STATIC_INLINE StgPtr
251 thread_small_bitmap (StgPtr p, StgWord size, StgWord bitmap)
252 {
253 while (size > 0) {
254 if ((bitmap & 1) == 0) {
255 thread((StgClosure **)p);
256 }
257 p++;
258 bitmap = bitmap >> 1;
259 size--;
260 }
261 return p;
262 }
263
264 STATIC_INLINE StgPtr
265 thread_arg_block (StgFunInfoTable *fun_info, StgClosure **args)
266 {
267 StgPtr p;
268 StgWord bitmap;
269 StgWord size;
270
271 p = (StgPtr)args;
272 switch (fun_info->f.fun_type) {
273 case ARG_GEN:
274 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
275 size = BITMAP_SIZE(fun_info->f.b.bitmap);
276 goto small_bitmap;
277 case ARG_GEN_BIG:
278 size = GET_FUN_LARGE_BITMAP(fun_info)->size;
279 thread_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
280 p += size;
281 break;
282 default:
283 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
284 size = BITMAP_SIZE(stg_arg_bitmaps[fun_info->f.fun_type]);
285 small_bitmap:
286 p = thread_small_bitmap(p, size, bitmap);
287 break;
288 }
289 return p;
290 }
291
292 static void
293 thread_stack(StgPtr p, StgPtr stack_end)
294 {
295 const StgRetInfoTable* info;
296 StgWord bitmap;
297 StgWord size;
298
299 // highly similar to scavenge_stack, but we do pointer threading here.
300
301 while (p < stack_end) {
302
303 // *p must be the info pointer of an activation
304 // record. All activation records have 'bitmap' style layout
305 // info.
306 //
307 info = get_ret_itbl((StgClosure *)p);
308
309 switch (info->i.type) {
310
311 // small bitmap (<= 32 entries, or 64 on a 64-bit machine)
312 case CATCH_RETRY_FRAME:
313 case CATCH_STM_FRAME:
314 case ATOMICALLY_FRAME:
315 case UPDATE_FRAME:
316 case UNDERFLOW_FRAME:
317 case STOP_FRAME:
318 case CATCH_FRAME:
319 case RET_SMALL:
320 bitmap = BITMAP_BITS(info->i.layout.bitmap);
321 size = BITMAP_SIZE(info->i.layout.bitmap);
322 p++;
323 // NOTE: the payload starts immediately after the info-ptr, we
324 // don't have an StgHeader in the same sense as a heap closure.
325 p = thread_small_bitmap(p, size, bitmap);
326 continue;
327
328 case RET_BCO: {
329 StgBCO *bco;
330
331 p++;
332 bco = (StgBCO *)*p;
333 thread((StgClosure **)p);
334 p++;
335 size = BCO_BITMAP_SIZE(bco);
336 thread_large_bitmap(p, BCO_BITMAP(bco), size);
337 p += size;
338 continue;
339 }
340
341 // large bitmap (> 32 entries, or 64 on a 64-bit machine)
342 case RET_BIG:
343 p++;
344 size = GET_LARGE_BITMAP(&info->i)->size;
345 thread_large_bitmap(p, GET_LARGE_BITMAP(&info->i), size);
346 p += size;
347 continue;
348
349 case RET_FUN:
350 {
351 StgRetFun *ret_fun = (StgRetFun *)p;
352 StgFunInfoTable *fun_info;
353
354 fun_info = FUN_INFO_PTR_TO_STRUCT((StgInfoTable *)UNTAG_CLOSURE((StgClosure *)
355 get_threaded_info((StgPtr)ret_fun->fun)));
356 // *before* threading it!
357 thread(&ret_fun->fun);
358 p = thread_arg_block(fun_info, ret_fun->payload);
359 continue;
360 }
361
362 default:
363 barf("thread_stack: weird activation record found on stack: %d",
364 (int)(info->i.type));
365 }
366 }
367 }
368
369 STATIC_INLINE StgPtr
370 thread_PAP_payload (StgClosure *fun, StgClosure **payload, StgWord size)
371 {
372 StgPtr p;
373 StgWord bitmap;
374 StgFunInfoTable *fun_info;
375
376 fun_info = FUN_INFO_PTR_TO_STRUCT((StgInfoTable *)UNTAG_CLOSURE((StgClosure *)
377 get_threaded_info((StgPtr)fun)));
378 ASSERT(fun_info->i.type != PAP);
379
380 p = (StgPtr)payload;
381
382 switch (fun_info->f.fun_type) {
383 case ARG_GEN:
384 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
385 goto small_bitmap;
386 case ARG_GEN_BIG:
387 thread_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
388 p += size;
389 break;
390 case ARG_BCO:
391 thread_large_bitmap((StgPtr)payload, BCO_BITMAP(fun), size);
392 p += size;
393 break;
394 default:
395 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
396 small_bitmap:
397 p = thread_small_bitmap(p, size, bitmap);
398 break;
399 }
400
401 return p;
402 }
403
404 STATIC_INLINE StgPtr
405 thread_PAP (StgPAP *pap)
406 {
407 StgPtr p;
408 p = thread_PAP_payload(pap->fun, pap->payload, pap->n_args);
409 thread(&pap->fun);
410 return p;
411 }
412
413 STATIC_INLINE StgPtr
414 thread_AP (StgAP *ap)
415 {
416 StgPtr p;
417 p = thread_PAP_payload(ap->fun, ap->payload, ap->n_args);
418 thread(&ap->fun);
419 return p;
420 }
421
422 STATIC_INLINE StgPtr
423 thread_AP_STACK (StgAP_STACK *ap)
424 {
425 thread(&ap->fun);
426 thread_stack((P_)ap->payload, (P_)ap->payload + ap->size);
427 return (P_)ap + sizeofW(StgAP_STACK) + ap->size;
428 }
429
430 static StgPtr
431 thread_TSO (StgTSO *tso)
432 {
433 thread_(&tso->_link);
434 thread_(&tso->global_link);
435
436 if ( tso->why_blocked == BlockedOnMVar
437 || tso->why_blocked == BlockedOnMVarRead
438 || tso->why_blocked == BlockedOnBlackHole
439 || tso->why_blocked == BlockedOnMsgThrowTo
440 || tso->why_blocked == NotBlocked
441 ) {
442 thread_(&tso->block_info.closure);
443 }
444 thread_(&tso->blocked_exceptions);
445 thread_(&tso->bq);
446
447 thread_(&tso->trec);
448
449 thread_(&tso->stackobj);
450 return (StgPtr)tso + sizeofW(StgTSO);
451 }
452
453
454 static void
455 update_fwd_large( bdescr *bd )
456 {
457 StgPtr p;
458 const StgInfoTable* info;
459
460 for (; bd != NULL; bd = bd->link) {
461
462 // nothing to do in a pinned block; it might not even have an object
463 // at the beginning.
464 if (bd->flags & BF_PINNED) continue;
465
466 p = bd->start;
467 info = get_itbl((StgClosure *)p);
468
469 switch (info->type) {
470
471 case ARR_WORDS:
472 // nothing to follow
473 continue;
474
475 case MUT_ARR_PTRS_CLEAN:
476 case MUT_ARR_PTRS_DIRTY:
477 case MUT_ARR_PTRS_FROZEN:
478 case MUT_ARR_PTRS_FROZEN0:
479 // follow everything
480 {
481 StgMutArrPtrs *a;
482
483 a = (StgMutArrPtrs*)p;
484 for (p = (P_)a->payload; p < (P_)&a->payload[a->ptrs]; p++) {
485 thread((StgClosure **)p);
486 }
487 continue;
488 }
489
490 case SMALL_MUT_ARR_PTRS_CLEAN:
491 case SMALL_MUT_ARR_PTRS_DIRTY:
492 case SMALL_MUT_ARR_PTRS_FROZEN:
493 case SMALL_MUT_ARR_PTRS_FROZEN0:
494 // follow everything
495 {
496 StgSmallMutArrPtrs *a;
497
498 a = (StgSmallMutArrPtrs*)p;
499 for (p = (P_)a->payload; p < (P_)&a->payload[a->ptrs]; p++) {
500 thread((StgClosure **)p);
501 }
502 continue;
503 }
504
505 case STACK:
506 {
507 StgStack *stack = (StgStack*)p;
508 thread_stack(stack->sp, stack->stack + stack->stack_size);
509 continue;
510 }
511
512 case AP_STACK:
513 thread_AP_STACK((StgAP_STACK *)p);
514 continue;
515
516 case PAP:
517 thread_PAP((StgPAP *)p);
518 continue;
519
520 case TREC_CHUNK:
521 {
522 StgWord i;
523 StgTRecChunk *tc = (StgTRecChunk *)p;
524 TRecEntry *e = &(tc -> entries[0]);
525 thread_(&tc->prev_chunk);
526 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
527 thread_(&e->tvar);
528 thread(&e->expected_value);
529 thread(&e->new_value);
530 }
531 continue;
532 }
533
534 default:
535 barf("update_fwd_large: unknown/strange object %d", (int)(info->type));
536 }
537 }
538 }
539
540 // ToDo: too big to inline
541 static /* STATIC_INLINE */ StgPtr
542 thread_obj (StgInfoTable *info, StgPtr p)
543 {
544 switch (info->type) {
545 case THUNK_0_1:
546 return p + sizeofW(StgThunk) + 1;
547
548 case FUN_0_1:
549 case CONSTR_0_1:
550 return p + sizeofW(StgHeader) + 1;
551
552 case FUN_1_0:
553 case CONSTR_1_0:
554 thread(&((StgClosure *)p)->payload[0]);
555 return p + sizeofW(StgHeader) + 1;
556
557 case THUNK_1_0:
558 thread(&((StgThunk *)p)->payload[0]);
559 return p + sizeofW(StgThunk) + 1;
560
561 case THUNK_0_2:
562 return p + sizeofW(StgThunk) + 2;
563
564 case FUN_0_2:
565 case CONSTR_0_2:
566 return p + sizeofW(StgHeader) + 2;
567
568 case THUNK_1_1:
569 thread(&((StgThunk *)p)->payload[0]);
570 return p + sizeofW(StgThunk) + 2;
571
572 case FUN_1_1:
573 case CONSTR_1_1:
574 thread(&((StgClosure *)p)->payload[0]);
575 return p + sizeofW(StgHeader) + 2;
576
577 case THUNK_2_0:
578 thread(&((StgThunk *)p)->payload[0]);
579 thread(&((StgThunk *)p)->payload[1]);
580 return p + sizeofW(StgThunk) + 2;
581
582 case FUN_2_0:
583 case CONSTR_2_0:
584 thread(&((StgClosure *)p)->payload[0]);
585 thread(&((StgClosure *)p)->payload[1]);
586 return p + sizeofW(StgHeader) + 2;
587
588 case BCO: {
589 StgBCO *bco = (StgBCO *)p;
590 thread_(&bco->instrs);
591 thread_(&bco->literals);
592 thread_(&bco->ptrs);
593 return p + bco_sizeW(bco);
594 }
595
596 case THUNK:
597 {
598 StgPtr end;
599
600 end = (P_)((StgThunk *)p)->payload +
601 info->layout.payload.ptrs;
602 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
603 thread((StgClosure **)p);
604 }
605 return p + info->layout.payload.nptrs;
606 }
607
608 case FUN:
609 case CONSTR:
610 case PRIM:
611 case MUT_PRIM:
612 case MUT_VAR_CLEAN:
613 case MUT_VAR_DIRTY:
614 case TVAR:
615 case BLACKHOLE:
616 case BLOCKING_QUEUE:
617 {
618 StgPtr end;
619
620 end = (P_)((StgClosure *)p)->payload +
621 info->layout.payload.ptrs;
622 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
623 thread((StgClosure **)p);
624 }
625 return p + info->layout.payload.nptrs;
626 }
627
628 case WEAK:
629 {
630 StgWeak *w = (StgWeak *)p;
631 thread(&w->cfinalizers);
632 thread(&w->key);
633 thread(&w->value);
634 thread(&w->finalizer);
635 if (w->link != NULL) {
636 thread_(&w->link);
637 }
638 return p + sizeofW(StgWeak);
639 }
640
641 case MVAR_CLEAN:
642 case MVAR_DIRTY:
643 {
644 StgMVar *mvar = (StgMVar *)p;
645 thread_(&mvar->head);
646 thread_(&mvar->tail);
647 thread(&mvar->value);
648 return p + sizeofW(StgMVar);
649 }
650
651 case IND:
652 case IND_PERM:
653 thread(&((StgInd *)p)->indirectee);
654 return p + sizeofW(StgInd);
655
656 case THUNK_SELECTOR:
657 {
658 StgSelector *s = (StgSelector *)p;
659 thread(&s->selectee);
660 return p + THUNK_SELECTOR_sizeW();
661 }
662
663 case AP_STACK:
664 return thread_AP_STACK((StgAP_STACK *)p);
665
666 case PAP:
667 return thread_PAP((StgPAP *)p);
668
669 case AP:
670 return thread_AP((StgAP *)p);
671
672 case ARR_WORDS:
673 return p + arr_words_sizeW((StgArrWords *)p);
674
675 case MUT_ARR_PTRS_CLEAN:
676 case MUT_ARR_PTRS_DIRTY:
677 case MUT_ARR_PTRS_FROZEN:
678 case MUT_ARR_PTRS_FROZEN0:
679 // follow everything
680 {
681 StgMutArrPtrs *a;
682
683 a = (StgMutArrPtrs *)p;
684 for (p = (P_)a->payload; p < (P_)&a->payload[a->ptrs]; p++) {
685 thread((StgClosure **)p);
686 }
687
688 return (StgPtr)a + mut_arr_ptrs_sizeW(a);
689 }
690
691 case SMALL_MUT_ARR_PTRS_CLEAN:
692 case SMALL_MUT_ARR_PTRS_DIRTY:
693 case SMALL_MUT_ARR_PTRS_FROZEN:
694 case SMALL_MUT_ARR_PTRS_FROZEN0:
695 // follow everything
696 {
697 StgSmallMutArrPtrs *a;
698
699 a = (StgSmallMutArrPtrs *)p;
700 for (p = (P_)a->payload; p < (P_)&a->payload[a->ptrs]; p++) {
701 thread((StgClosure **)p);
702 }
703
704 return (StgPtr)a + small_mut_arr_ptrs_sizeW(a);
705 }
706
707 case TSO:
708 return thread_TSO((StgTSO *)p);
709
710 case STACK:
711 {
712 StgStack *stack = (StgStack*)p;
713 thread_stack(stack->sp, stack->stack + stack->stack_size);
714 return p + stack_sizeW(stack);
715 }
716
717 case TREC_CHUNK:
718 {
719 StgWord i;
720 StgTRecChunk *tc = (StgTRecChunk *)p;
721 TRecEntry *e = &(tc -> entries[0]);
722 thread_(&tc->prev_chunk);
723 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
724 thread_(&e->tvar);
725 thread(&e->expected_value);
726 thread(&e->new_value);
727 }
728 return p + sizeofW(StgTRecChunk);
729 }
730
731 default:
732 barf("update_fwd: unknown/strange object %d", (int)(info->type));
733 return NULL;
734 }
735 }
736
737 static void
738 update_fwd( bdescr *blocks )
739 {
740 StgPtr p;
741 bdescr *bd;
742 StgInfoTable *info;
743
744 bd = blocks;
745
746 // cycle through all the blocks in the step
747 for (; bd != NULL; bd = bd->link) {
748 p = bd->start;
749
750 // linearly scan the objects in this block
751 while (p < bd->free) {
752 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
753 info = get_itbl((StgClosure *)p);
754 p = thread_obj(info, p);
755 }
756 }
757 }
758
759 static void
760 update_fwd_compact( bdescr *blocks )
761 {
762 StgPtr p, q, free;
763 #if 0
764 StgWord m;
765 #endif
766 bdescr *bd, *free_bd;
767 StgInfoTable *info;
768 StgWord size;
769 StgWord iptr;
770
771 bd = blocks;
772 free_bd = blocks;
773 free = free_bd->start;
774
775 // cycle through all the blocks in the step
776 for (; bd != NULL; bd = bd->link) {
777 p = bd->start;
778
779 while (p < bd->free ) {
780
781 while ( p < bd->free && !is_marked(p,bd) ) {
782 p++;
783 }
784 if (p >= bd->free) {
785 break;
786 }
787
788 #if 0
789 next:
790 m = * ((StgPtr)bd->u.bitmap + ((p - bd->start) / (BITS_IN(StgWord))));
791 m >>= ((p - bd->start) & (BITS_IN(StgWord) - 1));
792
793 while ( p < bd->free ) {
794
795 if ((m & 1) == 0) {
796 m >>= 1;
797 p++;
798 if (((StgWord)p & (sizeof(W_) * BITS_IN(StgWord))) == 0) {
799 goto next;
800 } else {
801 continue;
802 }
803 }
804 #endif
805
806 // Problem: we need to know the destination for this cell
807 // in order to unthread its info pointer. But we can't
808 // know the destination without the size, because we may
809 // spill into the next block. So we have to run down the
810 // threaded list and get the info ptr first.
811 //
812 // ToDo: one possible avenue of attack is to use the fact
813 // that if (p&BLOCK_MASK) >= (free&BLOCK_MASK), then we
814 // definitely have enough room. Also see bug #1147.
815 iptr = get_threaded_info(p);
816 info = INFO_PTR_TO_STRUCT((StgInfoTable *)UNTAG_CLOSURE((StgClosure *)iptr));
817
818 q = p;
819
820 p = thread_obj(info, p);
821
822 size = p - q;
823 if (free + size > free_bd->start + BLOCK_SIZE_W) {
824 // set the next bit in the bitmap to indicate that
825 // this object needs to be pushed into the next
826 // block. This saves us having to run down the
827 // threaded info pointer list twice during the next pass.
828 mark(q+1,bd);
829 free_bd = free_bd->link;
830 free = free_bd->start;
831 } else {
832 ASSERT(!is_marked(q+1,bd));
833 }
834
835 unthread(q,(StgWord)free + GET_CLOSURE_TAG((StgClosure *)iptr));
836 free += size;
837 #if 0
838 goto next;
839 #endif
840 }
841 }
842 }
843
844 static W_
845 update_bkwd_compact( generation *gen )
846 {
847 StgPtr p, free;
848 #if 0
849 StgWord m;
850 #endif
851 bdescr *bd, *free_bd;
852 StgInfoTable *info;
853 StgWord size;
854 W_ free_blocks;
855 StgWord iptr;
856
857 bd = free_bd = gen->old_blocks;
858 free = free_bd->start;
859 free_blocks = 1;
860
861 // cycle through all the blocks in the step
862 for (; bd != NULL; bd = bd->link) {
863 p = bd->start;
864
865 while (p < bd->free ) {
866
867 while ( p < bd->free && !is_marked(p,bd) ) {
868 p++;
869 }
870 if (p >= bd->free) {
871 break;
872 }
873
874 #if 0
875 next:
876 m = * ((StgPtr)bd->u.bitmap + ((p - bd->start) / (BITS_IN(StgWord))));
877 m >>= ((p - bd->start) & (BITS_IN(StgWord) - 1));
878
879 while ( p < bd->free ) {
880
881 if ((m & 1) == 0) {
882 m >>= 1;
883 p++;
884 if (((StgWord)p & (sizeof(W_) * BITS_IN(StgWord))) == 0) {
885 goto next;
886 } else {
887 continue;
888 }
889 }
890 #endif
891
892 if (is_marked(p+1,bd)) {
893 // don't forget to update the free ptr in the block desc.
894 free_bd->free = free;
895 free_bd = free_bd->link;
896 free = free_bd->start;
897 free_blocks++;
898 }
899
900 iptr = get_threaded_info(p);
901 unthread(p, (StgWord)free + GET_CLOSURE_TAG((StgClosure *)iptr));
902 ASSERT(LOOKS_LIKE_INFO_PTR((StgWord)((StgClosure *)p)->header.info));
903 info = get_itbl((StgClosure *)p);
904 size = closure_sizeW_((StgClosure *)p,info);
905
906 if (free != p) {
907 move(free,p,size);
908 }
909
910 // relocate TSOs
911 if (info->type == STACK) {
912 move_STACK((StgStack *)p, (StgStack *)free);
913 }
914
915 free += size;
916 p += size;
917 #if 0
918 goto next;
919 #endif
920 }
921 }
922
923 // free the remaining blocks and count what's left.
924 free_bd->free = free;
925 if (free_bd->link != NULL) {
926 freeChain(free_bd->link);
927 free_bd->link = NULL;
928 }
929
930 return free_blocks;
931 }
932
933 void
934 compact(StgClosure *static_objects)
935 {
936 W_ n, g, blocks;
937 generation *gen;
938
939 // 1. thread the roots
940 markCapabilities((evac_fn)thread_root, NULL);
941
942 markScheduler((evac_fn)thread_root, NULL);
943
944 // the weak pointer lists...
945 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
946 if (generations[g].weak_ptr_list != NULL) {
947 thread((void *)&generations[g].weak_ptr_list);
948 }
949 }
950
951 if (dead_weak_ptr_list != NULL) {
952 thread((void *)&dead_weak_ptr_list); // tmp
953 }
954
955 // mutable lists
956 for (g = 1; g < RtsFlags.GcFlags.generations; g++) {
957 bdescr *bd;
958 StgPtr p;
959 for (n = 0; n < n_capabilities; n++) {
960 for (bd = capabilities[n]->mut_lists[g];
961 bd != NULL; bd = bd->link) {
962 for (p = bd->start; p < bd->free; p++) {
963 thread((StgClosure **)p);
964 }
965 }
966 }
967 }
968
969 // the global thread list
970 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
971 thread((void *)&generations[g].threads);
972 }
973
974 // any threads resurrected during this GC
975 thread((void *)&resurrected_threads);
976
977 // the task list
978 {
979 Task *task;
980 InCall *incall;
981 for (task = all_tasks; task != NULL; task = task->all_next) {
982 for (incall = task->incall; incall != NULL;
983 incall = incall->prev_stack) {
984 if (incall->tso) {
985 thread_(&incall->tso);
986 }
987 }
988 }
989 }
990
991 // the static objects
992 thread_static(static_objects /* ToDo: ok? */);
993
994 // the stable pointer table
995 threadStableTables((evac_fn)thread_root, NULL);
996
997 // the CAF list (used by GHCi)
998 markCAFs((evac_fn)thread_root, NULL);
999
1000 // 2. update forward ptrs
1001 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
1002 gen = &generations[g];
1003 debugTrace(DEBUG_gc, "update_fwd: %d", g);
1004
1005 update_fwd(gen->blocks);
1006 for (n = 0; n < n_capabilities; n++) {
1007 update_fwd(gc_threads[n]->gens[g].todo_bd);
1008 update_fwd(gc_threads[n]->gens[g].part_list);
1009 }
1010 update_fwd_large(gen->scavenged_large_objects);
1011 if (g == RtsFlags.GcFlags.generations-1 && gen->old_blocks != NULL) {
1012 debugTrace(DEBUG_gc, "update_fwd: %d (compact)", g);
1013 update_fwd_compact(gen->old_blocks);
1014 }
1015 }
1016
1017 // 3. update backward ptrs
1018 gen = oldest_gen;
1019 if (gen->old_blocks != NULL) {
1020 blocks = update_bkwd_compact(gen);
1021 debugTrace(DEBUG_gc,
1022 "update_bkwd: %d (compact, old: %d blocks, now %d blocks)",
1023 gen->no, gen->n_old_blocks, blocks);
1024 gen->n_old_blocks = blocks;
1025 }
1026 }