1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team 1998-2008
5 * Generational garbage collector: scavenging functions
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
10 * https://gitlab.haskell.org/ghc/ghc/wikis/commentary/rts/storage/gc
12 * ---------------------------------------------------------------------------*/
14 /* ----------------------------------------------------------------------------
15 We have two main scavenge functions:
17 - scavenge_block(bdescr *bd)
18 - scavenge_one(StgPtr p)
20 As the names and parameters suggest, first one scavenges a whole block while
21 the second one only scavenges one object. This however is not the only
22 difference. scavenge_block scavenges all SRTs while scavenge_one only
23 scavenges SRTs of stacks. The reason is because scavenge_one is called in two
26 - When scavenging a mut_list
27 - When scavenging a large object
29 We don't have to scavenge SRTs when scavenging a mut_list, because we only
30 scavenge mut_lists in minor GCs, and static objects are only collected in
33 However, because scavenge_one is also used to scavenge large objects (which
34 are scavenged even in major GCs), we need to deal with SRTs of large
35 objects. We never allocate large FUNs and THUNKs, but we allocate large
36 STACKs (e.g. in threadStackOverflow), and stack frames can have SRTs. So
37 scavenge_one skips FUN and THUNK SRTs but scavenges stack frame SRTs.
39 In summary, in a major GC:
41 - scavenge_block() scavenges all SRTs
42 - scavenge_one() scavenges only stack frame SRTs
43 ------------------------------------------------------------------------- */
45 #include "PosixSource.h"
53 #include "MarkStack.h"
59 #include "Capability.h"
60 #include "LdvProfile.h"
61 #include "HeapUtils.h"
64 #include "sm/MarkWeak.h"
65 #include "sm/NonMoving.h" // for nonmoving_set_closure_mark_bit
66 #include "sm/NonMovingScav.h"
68 static void scavenge_large_bitmap (StgPtr p
,
69 StgLargeBitmap
*large_bitmap
,
72 #if defined(THREADED_RTS) && !defined(PARALLEL_GC)
73 # define evacuate(a) evacuate1(a)
74 # define evacuate_BLACKHOLE(a) evacuate_BLACKHOLE1(a)
75 # define scavenge_loop(a) scavenge_loop1(a)
76 # define scavenge_block(a) scavenge_block1(a)
77 # define scavenge_mutable_list(bd,g) scavenge_mutable_list1(bd,g)
78 # define scavenge_capability_mut_lists(cap) scavenge_capability_mut_Lists1(cap)
79 # define scavengeTSO(tso) scavengeTSO1(tso)
80 # define scavenge_stack(p, stack_end) scavenge_stack1(p, stack_end)
81 # define scavenge_fun_srt(info) scavenge_fun_srt1(info)
82 # define scavenge_fun_srt(info) scavenge_fun_srt1(info)
83 # define scavenge_thunk_srt(info) scavenge_thunk_srt1(info)
84 # define scavenge_mut_arr_ptrs(info) scavenge_mut_arr_ptrs1(info)
85 # define scavenge_PAP(pap) scavenge_PAP1(pap)
86 # define scavenge_AP(ap) scavenge_AP1(ap)
87 # define scavenge_compact(str) scavenge_compact1(str)
90 static void do_evacuate(StgClosure
**p
, void *user STG_UNUSED
)
95 /* -----------------------------------------------------------------------------
97 -------------------------------------------------------------------------- */
100 scavengeTSO (StgTSO
*tso
)
104 debugTrace(DEBUG_gc
,"scavenging thread %d",(int)tso
->id
);
106 // update the pointer from the InCall.
107 if (tso
->bound
!= NULL
) {
108 // NB. We can't just set tso->bound->tso = tso, because this
109 // might be an invalid copy the TSO resulting from multiple
110 // threads evacuating the TSO simultaneously (see
111 // Evac.c:copy_tag()). Calling evacuate() on this pointer
112 // will ensure that we update it to point to the correct copy.
113 evacuate((StgClosure
**)&tso
->bound
->tso
);
116 saved_eager
= gct
->eager_promotion
;
117 gct
->eager_promotion
= false;
119 evacuate((StgClosure
**)&tso
->blocked_exceptions
);
120 evacuate((StgClosure
**)&tso
->bq
);
122 // scavange current transaction record
123 evacuate((StgClosure
**)&tso
->trec
);
125 evacuate((StgClosure
**)&tso
->stackobj
);
127 evacuate((StgClosure
**)&tso
->_link
);
128 if ( tso
->why_blocked
== BlockedOnMVar
129 || tso
->why_blocked
== BlockedOnMVarRead
130 || tso
->why_blocked
== BlockedOnBlackHole
131 || tso
->why_blocked
== BlockedOnMsgThrowTo
132 || tso
->why_blocked
== NotBlocked
134 evacuate(&tso
->block_info
.closure
);
136 #if defined(THREADED_RTS)
137 // in the THREADED_RTS, block_info.closure must always point to a
138 // valid closure, because we assume this in throwTo(). In the
139 // non-threaded RTS it might be a FD (for
140 // BlockedOnRead/BlockedOnWrite) or a time value (BlockedOnDelay)
142 tso
->block_info
.closure
= (StgClosure
*)END_TSO_QUEUE
;
146 tso
->dirty
= gct
->failed_to_evac
;
148 gct
->eager_promotion
= saved_eager
;
151 /* ----------------------------------------------------------------------------
152 Scavenging compact objects
153 ------------------------------------------------------------------------- */
156 // We must save gct when calling mapHashTable(), which is compiled
157 // without GCThread.h and so uses a different calling convention.
158 // See also GC.c:mark_root where we do a similar thing.
159 gc_thread
*saved_gct
;
164 evacuate_hash_entry(MapHashData
*dat
, StgWord key
, const void *value
)
166 StgClosure
*p
= (StgClosure
*)key
;
167 #if defined(THREADED_RTS)
168 gc_thread
*old_gct
= gct
;
171 SET_GCT(dat
->saved_gct
);
173 insertHashTable(dat
->newHash
, (StgWord
)p
, value
);
177 /* Here we scavenge the sharing-preservation hash-table, which may contain keys
178 * living in from-space.
181 scavenge_compact(StgCompactNFData
*str
)
184 saved_eager
= gct
->eager_promotion
;
185 gct
->eager_promotion
= false;
190 HashTable
*newHash
= allocHashTable();
191 dat
.newHash
= newHash
;
192 mapHashTable(str
->hash
, (void*)&dat
, (MapHashFn
)evacuate_hash_entry
);
193 freeHashTable(str
->hash
, NULL
);
197 debugTrace(DEBUG_compact
,
198 "compact alive @%p, gen %d, %" FMT_Word
" bytes",
199 str
, Bdescr((P_
)str
)->gen_no
, str
->totalW
* sizeof(W_
))
201 gct
->eager_promotion
= saved_eager
;
202 if (gct
->failed_to_evac
) {
203 ((StgClosure
*)str
)->header
.info
= &stg_COMPACT_NFDATA_DIRTY_info
;
205 ((StgClosure
*)str
)->header
.info
= &stg_COMPACT_NFDATA_CLEAN_info
;
209 /* -----------------------------------------------------------------------------
210 Mutable arrays of pointers
211 -------------------------------------------------------------------------- */
213 StgPtr
scavenge_mut_arr_ptrs (StgMutArrPtrs
*a
)
220 p
= (StgPtr
)&a
->payload
[0];
221 for (m
= 0; (int)m
< (int)mutArrPtrsCards(a
->ptrs
) - 1; m
++)
223 q
= p
+ (1 << MUT_ARR_PTRS_CARD_BITS
);
225 evacuate((StgClosure
**)p
);
227 if (gct
->failed_to_evac
) {
229 *mutArrPtrsCard(a
,m
) = 1;
230 gct
->failed_to_evac
= false;
232 *mutArrPtrsCard(a
,m
) = 0;
236 q
= (StgPtr
)&a
->payload
[a
->ptrs
];
239 evacuate((StgClosure
**)p
);
241 if (gct
->failed_to_evac
) {
243 *mutArrPtrsCard(a
,m
) = 1;
244 gct
->failed_to_evac
= false;
246 *mutArrPtrsCard(a
,m
) = 0;
250 gct
->failed_to_evac
= any_failed
;
251 return (StgPtr
)a
+ mut_arr_ptrs_sizeW(a
);
254 // scavenge only the marked areas of a MUT_ARR_PTRS
255 static StgPtr
scavenge_mut_arr_ptrs_marked (StgMutArrPtrs
*a
)
262 for (m
= 0; m
< mutArrPtrsCards(a
->ptrs
); m
++)
264 if (*mutArrPtrsCard(a
,m
) != 0) {
265 p
= (StgPtr
)&a
->payload
[m
<< MUT_ARR_PTRS_CARD_BITS
];
266 q
= stg_min(p
+ (1 << MUT_ARR_PTRS_CARD_BITS
),
267 (StgPtr
)&a
->payload
[a
->ptrs
]);
269 evacuate((StgClosure
**)p
);
271 if (gct
->failed_to_evac
) {
273 gct
->failed_to_evac
= false;
275 *mutArrPtrsCard(a
,m
) = 0;
280 gct
->failed_to_evac
= any_failed
;
281 return (StgPtr
)a
+ mut_arr_ptrs_sizeW(a
);
285 scavenge_small_bitmap (StgPtr p
, StgWord size
, StgWord bitmap
)
288 if ((bitmap
& 1) == 0) {
289 evacuate((StgClosure
**)p
);
292 bitmap
= bitmap
>> 1;
298 /* -----------------------------------------------------------------------------
299 Blocks of function args occur on the stack (at the top) and
301 -------------------------------------------------------------------------- */
304 scavenge_arg_block (const StgFunInfoTable
*fun_info
, StgClosure
**args
)
311 switch (fun_info
->f
.fun_type
) {
313 bitmap
= BITMAP_BITS(fun_info
->f
.b
.bitmap
);
314 size
= BITMAP_SIZE(fun_info
->f
.b
.bitmap
);
317 size
= GET_FUN_LARGE_BITMAP(fun_info
)->size
;
318 scavenge_large_bitmap(p
, GET_FUN_LARGE_BITMAP(fun_info
), size
);
322 bitmap
= BITMAP_BITS(stg_arg_bitmaps
[fun_info
->f
.fun_type
]);
323 size
= BITMAP_SIZE(stg_arg_bitmaps
[fun_info
->f
.fun_type
]);
325 p
= scavenge_small_bitmap(p
, size
, bitmap
);
331 STATIC_INLINE GNUC_ATTR_HOT StgPtr
332 scavenge_PAP_payload (StgClosure
*fun
, StgClosure
**payload
, StgWord size
)
336 const StgFunInfoTable
*fun_info
;
338 fun_info
= get_fun_itbl(UNTAG_CONST_CLOSURE(fun
));
339 ASSERT(fun_info
->i
.type
!= PAP
);
342 switch (fun_info
->f
.fun_type
) {
344 bitmap
= BITMAP_BITS(fun_info
->f
.b
.bitmap
);
347 scavenge_large_bitmap(p
, GET_FUN_LARGE_BITMAP(fun_info
), size
);
351 scavenge_large_bitmap((StgPtr
)payload
, BCO_BITMAP(fun
), size
);
355 bitmap
= BITMAP_BITS(stg_arg_bitmaps
[fun_info
->f
.fun_type
]);
357 p
= scavenge_small_bitmap(p
, size
, bitmap
);
364 scavenge_PAP (StgPAP
*pap
)
367 return scavenge_PAP_payload (pap
->fun
, pap
->payload
, pap
->n_args
);
371 scavenge_AP (StgAP
*ap
)
374 return scavenge_PAP_payload (ap
->fun
, ap
->payload
, ap
->n_args
);
377 /* -----------------------------------------------------------------------------
379 -------------------------------------------------------------------------- */
382 scavenge_thunk_srt(const StgInfoTable
*info
)
384 StgThunkInfoTable
*thunk_info
;
386 if (!major_gc
) return;
388 thunk_info
= itbl_to_thunk_itbl(info
);
389 if (thunk_info
->i
.srt
) {
390 StgClosure
*srt
= (StgClosure
*)GET_SRT(thunk_info
);
396 scavenge_fun_srt(const StgInfoTable
*info
)
398 StgFunInfoTable
*fun_info
;
400 if (!major_gc
) return;
402 fun_info
= itbl_to_fun_itbl(info
);
403 if (fun_info
->i
.srt
) {
404 StgClosure
*srt
= (StgClosure
*)GET_FUN_SRT(fun_info
);
409 /* -----------------------------------------------------------------------------
410 Scavenge a block from the given scan pointer up to bd->free.
412 evac_gen_no is set by the caller to be either zero (for a step in a
413 generation < N) or G where G is the generation of the step being
416 We sometimes temporarily change evac_gen_no back to zero if we're
417 scavenging a mutable object where eager promotion isn't such a good
419 -------------------------------------------------------------------------- */
421 static GNUC_ATTR_HOT
void
422 scavenge_block (bdescr
*bd
)
425 const StgInfoTable
*info
;
426 bool saved_eager_promotion
;
429 debugTrace(DEBUG_gc
, "scavenging block %p (gen %d) @ %p",
430 bd
->start
, bd
->gen_no
, bd
->u
.scan
);
433 gct
->evac_gen_no
= bd
->gen_no
;
434 saved_eager_promotion
= gct
->eager_promotion
;
435 gct
->failed_to_evac
= false;
437 ws
= &gct
->gens
[bd
->gen
->no
];
441 // we might be evacuating into the very object that we're
442 // scavenging, so we have to check the real bd->free pointer each
443 // time around the loop.
444 while (p
< bd
->free
|| (bd
== ws
->todo_bd
&& p
< ws
->todo_free
)) {
446 ASSERT(bd
->link
== NULL
);
447 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p
));
448 info
= get_itbl((StgClosure
*)p
);
450 ASSERT(gct
->thunk_selector_depth
== 0);
453 switch (info
->type
) {
458 StgMVar
*mvar
= ((StgMVar
*)p
);
459 gct
->eager_promotion
= false;
460 evacuate((StgClosure
**)&mvar
->head
);
461 evacuate((StgClosure
**)&mvar
->tail
);
462 evacuate((StgClosure
**)&mvar
->value
);
463 gct
->eager_promotion
= saved_eager_promotion
;
465 if (gct
->failed_to_evac
) {
466 mvar
->header
.info
= &stg_MVAR_DIRTY_info
;
468 mvar
->header
.info
= &stg_MVAR_CLEAN_info
;
470 p
+= sizeofW(StgMVar
);
476 StgTVar
*tvar
= ((StgTVar
*)p
);
477 gct
->eager_promotion
= false;
478 evacuate((StgClosure
**)&tvar
->current_value
);
479 evacuate((StgClosure
**)&tvar
->first_watch_queue_entry
);
480 gct
->eager_promotion
= saved_eager_promotion
;
482 if (gct
->failed_to_evac
) {
483 tvar
->header
.info
= &stg_TVAR_DIRTY_info
;
485 tvar
->header
.info
= &stg_TVAR_CLEAN_info
;
487 p
+= sizeofW(StgTVar
);
492 scavenge_fun_srt(info
);
493 evacuate(&((StgClosure
*)p
)->payload
[1]);
494 evacuate(&((StgClosure
*)p
)->payload
[0]);
495 p
+= sizeofW(StgHeader
) + 2;
499 scavenge_thunk_srt(info
);
500 evacuate(&((StgThunk
*)p
)->payload
[1]);
501 evacuate(&((StgThunk
*)p
)->payload
[0]);
502 p
+= sizeofW(StgThunk
) + 2;
506 evacuate(&((StgClosure
*)p
)->payload
[1]);
507 evacuate(&((StgClosure
*)p
)->payload
[0]);
508 p
+= sizeofW(StgHeader
) + 2;
512 scavenge_thunk_srt(info
);
513 evacuate(&((StgThunk
*)p
)->payload
[0]);
514 p
+= sizeofW(StgThunk
) + 1;
518 scavenge_fun_srt(info
);
521 evacuate(&((StgClosure
*)p
)->payload
[0]);
522 p
+= sizeofW(StgHeader
) + 1;
526 scavenge_thunk_srt(info
);
527 p
+= sizeofW(StgThunk
) + 1;
531 scavenge_fun_srt(info
);
534 p
+= sizeofW(StgHeader
) + 1;
538 scavenge_thunk_srt(info
);
539 p
+= sizeofW(StgThunk
) + 2;
543 scavenge_fun_srt(info
);
546 p
+= sizeofW(StgHeader
) + 2;
550 scavenge_thunk_srt(info
);
551 evacuate(&((StgThunk
*)p
)->payload
[0]);
552 p
+= sizeofW(StgThunk
) + 2;
556 scavenge_fun_srt(info
);
559 evacuate(&((StgClosure
*)p
)->payload
[0]);
560 p
+= sizeofW(StgHeader
) + 2;
564 scavenge_fun_srt(info
);
571 scavenge_thunk_srt(info
);
572 end
= (P_
)((StgThunk
*)p
)->payload
+ info
->layout
.payload
.ptrs
;
573 for (p
= (P_
)((StgThunk
*)p
)->payload
; p
< end
; p
++) {
574 evacuate((StgClosure
**)p
);
576 p
+= info
->layout
.payload
.nptrs
;
588 end
= (P_
)((StgClosure
*)p
)->payload
+ info
->layout
.payload
.ptrs
;
589 for (p
= (P_
)((StgClosure
*)p
)->payload
; p
< end
; p
++) {
590 evacuate((StgClosure
**)p
);
592 p
+= info
->layout
.payload
.nptrs
;
597 StgBCO
*bco
= (StgBCO
*)p
;
598 evacuate((StgClosure
**)&bco
->instrs
);
599 evacuate((StgClosure
**)&bco
->literals
);
600 evacuate((StgClosure
**)&bco
->ptrs
);
606 evacuate(&((StgInd
*)p
)->indirectee
);
607 p
+= sizeofW(StgInd
);
612 gct
->eager_promotion
= false;
613 evacuate(&((StgMutVar
*)p
)->var
);
614 gct
->eager_promotion
= saved_eager_promotion
;
616 if (gct
->failed_to_evac
) {
617 ((StgClosure
*)q
)->header
.info
= &stg_MUT_VAR_DIRTY_info
;
619 ((StgClosure
*)q
)->header
.info
= &stg_MUT_VAR_CLEAN_info
;
621 p
+= sizeofW(StgMutVar
);
626 StgBlockingQueue
*bq
= (StgBlockingQueue
*)p
;
628 gct
->eager_promotion
= false;
630 evacuate((StgClosure
**)&bq
->owner
);
631 evacuate((StgClosure
**)&bq
->queue
);
632 evacuate((StgClosure
**)&bq
->link
);
633 gct
->eager_promotion
= saved_eager_promotion
;
635 if (gct
->failed_to_evac
) {
636 bq
->header
.info
= &stg_BLOCKING_QUEUE_DIRTY_info
;
638 bq
->header
.info
= &stg_BLOCKING_QUEUE_CLEAN_info
;
640 p
+= sizeofW(StgBlockingQueue
);
646 StgSelector
*s
= (StgSelector
*)p
;
647 evacuate(&s
->selectee
);
648 p
+= THUNK_SELECTOR_sizeW();
652 // A chunk of stack saved in a heap object
655 StgAP_STACK
*ap
= (StgAP_STACK
*)p
;
658 scavenge_stack((StgPtr
)ap
->payload
, (StgPtr
)ap
->payload
+ ap
->size
);
659 p
= (StgPtr
)ap
->payload
+ ap
->size
;
664 p
= scavenge_PAP((StgPAP
*)p
);
668 p
= scavenge_AP((StgAP
*)p
);
673 p
+= arr_words_sizeW((StgArrBytes
*)p
);
676 case MUT_ARR_PTRS_CLEAN
:
677 case MUT_ARR_PTRS_DIRTY
:
679 // We don't eagerly promote objects pointed to by a mutable
680 // array, but if we find the array only points to objects in
681 // the same or an older generation, we mark it "clean" and
682 // avoid traversing it during minor GCs.
683 gct
->eager_promotion
= false;
685 p
= scavenge_mut_arr_ptrs((StgMutArrPtrs
*)p
);
687 if (gct
->failed_to_evac
) {
688 ((StgClosure
*)q
)->header
.info
= &stg_MUT_ARR_PTRS_DIRTY_info
;
690 ((StgClosure
*)q
)->header
.info
= &stg_MUT_ARR_PTRS_CLEAN_info
;
693 gct
->eager_promotion
= saved_eager_promotion
;
694 gct
->failed_to_evac
= true; // always put it on the mutable list.
698 case MUT_ARR_PTRS_FROZEN_CLEAN
:
699 case MUT_ARR_PTRS_FROZEN_DIRTY
:
702 p
= scavenge_mut_arr_ptrs((StgMutArrPtrs
*)p
);
704 if (gct
->failed_to_evac
) {
705 ((StgClosure
*)q
)->header
.info
= &stg_MUT_ARR_PTRS_FROZEN_DIRTY_info
;
707 ((StgClosure
*)q
)->header
.info
= &stg_MUT_ARR_PTRS_FROZEN_CLEAN_info
;
712 case SMALL_MUT_ARR_PTRS_CLEAN
:
713 case SMALL_MUT_ARR_PTRS_DIRTY
:
718 // We don't eagerly promote objects pointed to by a mutable
719 // array, but if we find the array only points to objects in
720 // the same or an older generation, we mark it "clean" and
721 // avoid traversing it during minor GCs.
722 gct
->eager_promotion
= false;
723 next
= p
+ small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs
*)p
);
724 for (p
= (P_
)((StgSmallMutArrPtrs
*)p
)->payload
; p
< next
; p
++) {
725 evacuate((StgClosure
**)p
);
727 gct
->eager_promotion
= saved_eager_promotion
;
729 if (gct
->failed_to_evac
) {
730 ((StgClosure
*)q
)->header
.info
= &stg_SMALL_MUT_ARR_PTRS_DIRTY_info
;
732 ((StgClosure
*)q
)->header
.info
= &stg_SMALL_MUT_ARR_PTRS_CLEAN_info
;
735 gct
->failed_to_evac
= true; // always put it on the mutable list.
739 case SMALL_MUT_ARR_PTRS_FROZEN_CLEAN
:
740 case SMALL_MUT_ARR_PTRS_FROZEN_DIRTY
:
745 next
= p
+ small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs
*)p
);
746 for (p
= (P_
)((StgSmallMutArrPtrs
*)p
)->payload
; p
< next
; p
++) {
747 evacuate((StgClosure
**)p
);
750 if (gct
->failed_to_evac
) {
751 ((StgClosure
*)q
)->header
.info
= &stg_SMALL_MUT_ARR_PTRS_FROZEN_DIRTY_info
;
753 ((StgClosure
*)q
)->header
.info
= &stg_SMALL_MUT_ARR_PTRS_FROZEN_CLEAN_info
;
760 scavengeTSO((StgTSO
*)p
);
761 p
+= sizeofW(StgTSO
);
767 StgStack
*stack
= (StgStack
*)p
;
769 gct
->eager_promotion
= false;
771 scavenge_stack(stack
->sp
, stack
->stack
+ stack
->stack_size
);
772 stack
->dirty
= gct
->failed_to_evac
;
773 p
+= stack_sizeW(stack
);
775 gct
->eager_promotion
= saved_eager_promotion
;
783 gct
->eager_promotion
= false;
785 end
= (P_
)((StgClosure
*)p
)->payload
+ info
->layout
.payload
.ptrs
;
786 for (p
= (P_
)((StgClosure
*)p
)->payload
; p
< end
; p
++) {
787 evacuate((StgClosure
**)p
);
789 p
+= info
->layout
.payload
.nptrs
;
791 gct
->eager_promotion
= saved_eager_promotion
;
792 gct
->failed_to_evac
= true; // mutable
799 StgTRecChunk
*tc
= ((StgTRecChunk
*) p
);
800 TRecEntry
*e
= &(tc
-> entries
[0]);
801 gct
->eager_promotion
= false;
802 evacuate((StgClosure
**)&tc
->prev_chunk
);
803 for (i
= 0; i
< tc
-> next_entry_idx
; i
++, e
++ ) {
804 evacuate((StgClosure
**)&e
->tvar
);
805 evacuate((StgClosure
**)&e
->expected_value
);
806 evacuate((StgClosure
**)&e
->new_value
);
808 gct
->eager_promotion
= saved_eager_promotion
;
809 gct
->failed_to_evac
= true; // mutable
810 p
+= sizeofW(StgTRecChunk
);
815 barf("scavenge: unimplemented/strange closure type %d @ %p",
820 * We need to record the current object on the mutable list if
821 * (a) It is actually mutable, or
822 * (b) It contains pointers to a younger generation.
823 * Case (b) arises if we didn't manage to promote everything that
824 * the current object points to into the current generation.
826 if (gct
->failed_to_evac
) {
827 gct
->failed_to_evac
= false;
828 if (bd
->gen_no
> 0) {
829 recordMutableGen_GC((StgClosure
*)q
, bd
->gen_no
);
835 gct
->copied
+= ws
->todo_free
- bd
->free
;
839 debugTrace(DEBUG_gc
, " scavenged %ld bytes",
840 (unsigned long)((bd
->free
- bd
->u
.scan
) * sizeof(W_
)));
842 // update stats: this is a block that has been scavenged
843 gct
->scanned
+= bd
->free
- bd
->u
.scan
;
844 bd
->u
.scan
= bd
->free
;
846 if (bd
!= ws
->todo_bd
) {
847 // we're not going to evac any more objects into
848 // this block, so push it now.
849 push_scanned_block(bd
, ws
);
854 /* -----------------------------------------------------------------------------
855 Scavenge everything on the mark stack.
857 This is slightly different from scavenge():
858 - we don't walk linearly through the objects, so the scavenger
859 doesn't need to advance the pointer on to the next object.
860 -------------------------------------------------------------------------- */
863 scavenge_mark_stack(void)
866 const StgInfoTable
*info
;
867 bool saved_eager_promotion
;
869 gct
->evac_gen_no
= oldest_gen
->no
;
870 saved_eager_promotion
= gct
->eager_promotion
;
872 while ((p
= pop_mark_stack())) {
874 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p
));
875 info
= get_itbl((StgClosure
*)p
);
878 switch (info
->type
) {
883 StgMVar
*mvar
= ((StgMVar
*)p
);
884 gct
->eager_promotion
= false;
885 evacuate((StgClosure
**)&mvar
->head
);
886 evacuate((StgClosure
**)&mvar
->tail
);
887 evacuate((StgClosure
**)&mvar
->value
);
888 gct
->eager_promotion
= saved_eager_promotion
;
890 if (gct
->failed_to_evac
) {
891 mvar
->header
.info
= &stg_MVAR_DIRTY_info
;
893 mvar
->header
.info
= &stg_MVAR_CLEAN_info
;
900 StgTVar
*tvar
= ((StgTVar
*)p
);
901 gct
->eager_promotion
= false;
902 evacuate((StgClosure
**)&tvar
->current_value
);
903 evacuate((StgClosure
**)&tvar
->first_watch_queue_entry
);
904 gct
->eager_promotion
= saved_eager_promotion
;
906 if (gct
->failed_to_evac
) {
907 tvar
->header
.info
= &stg_TVAR_DIRTY_info
;
909 tvar
->header
.info
= &stg_TVAR_CLEAN_info
;
915 scavenge_fun_srt(info
);
916 evacuate(&((StgClosure
*)p
)->payload
[1]);
917 evacuate(&((StgClosure
*)p
)->payload
[0]);
921 scavenge_thunk_srt(info
);
922 evacuate(&((StgThunk
*)p
)->payload
[1]);
923 evacuate(&((StgThunk
*)p
)->payload
[0]);
927 evacuate(&((StgClosure
*)p
)->payload
[1]);
928 evacuate(&((StgClosure
*)p
)->payload
[0]);
933 scavenge_fun_srt(info
);
934 evacuate(&((StgClosure
*)p
)->payload
[0]);
939 scavenge_thunk_srt(info
);
940 evacuate(&((StgThunk
*)p
)->payload
[0]);
945 evacuate(&((StgClosure
*)p
)->payload
[0]);
950 scavenge_fun_srt(info
);
955 scavenge_thunk_srt(info
);
963 scavenge_fun_srt(info
);
970 scavenge_thunk_srt(info
);
971 end
= (P_
)((StgThunk
*)p
)->payload
+ info
->layout
.payload
.ptrs
;
972 for (p
= (P_
)((StgThunk
*)p
)->payload
; p
< end
; p
++) {
973 evacuate((StgClosure
**)p
);
986 end
= (P_
)((StgClosure
*)p
)->payload
+ info
->layout
.payload
.ptrs
;
987 for (p
= (P_
)((StgClosure
*)p
)->payload
; p
< end
; p
++) {
988 evacuate((StgClosure
**)p
);
994 StgBCO
*bco
= (StgBCO
*)p
;
995 evacuate((StgClosure
**)&bco
->instrs
);
996 evacuate((StgClosure
**)&bco
->literals
);
997 evacuate((StgClosure
**)&bco
->ptrs
);
1003 evacuate(&((StgInd
*)p
)->indirectee
);
1007 case MUT_VAR_DIRTY
: {
1008 gct
->eager_promotion
= false;
1009 evacuate(&((StgMutVar
*)p
)->var
);
1010 gct
->eager_promotion
= saved_eager_promotion
;
1012 if (gct
->failed_to_evac
) {
1013 ((StgClosure
*)q
)->header
.info
= &stg_MUT_VAR_DIRTY_info
;
1015 ((StgClosure
*)q
)->header
.info
= &stg_MUT_VAR_CLEAN_info
;
1020 case BLOCKING_QUEUE
:
1022 StgBlockingQueue
*bq
= (StgBlockingQueue
*)p
;
1024 gct
->eager_promotion
= false;
1026 evacuate((StgClosure
**)&bq
->owner
);
1027 evacuate((StgClosure
**)&bq
->queue
);
1028 evacuate((StgClosure
**)&bq
->link
);
1029 gct
->eager_promotion
= saved_eager_promotion
;
1031 if (gct
->failed_to_evac
) {
1032 bq
->header
.info
= &stg_BLOCKING_QUEUE_DIRTY_info
;
1034 bq
->header
.info
= &stg_BLOCKING_QUEUE_CLEAN_info
;
1042 case THUNK_SELECTOR
:
1044 StgSelector
*s
= (StgSelector
*)p
;
1045 evacuate(&s
->selectee
);
1049 // A chunk of stack saved in a heap object
1052 StgAP_STACK
*ap
= (StgAP_STACK
*)p
;
1055 scavenge_stack((StgPtr
)ap
->payload
, (StgPtr
)ap
->payload
+ ap
->size
);
1060 scavenge_PAP((StgPAP
*)p
);
1064 scavenge_AP((StgAP
*)p
);
1067 case MUT_ARR_PTRS_CLEAN
:
1068 case MUT_ARR_PTRS_DIRTY
:
1069 // follow everything
1071 // We don't eagerly promote objects pointed to by a mutable
1072 // array, but if we find the array only points to objects in
1073 // the same or an older generation, we mark it "clean" and
1074 // avoid traversing it during minor GCs.
1075 gct
->eager_promotion
= false;
1077 scavenge_mut_arr_ptrs((StgMutArrPtrs
*)p
);
1079 if (gct
->failed_to_evac
) {
1080 ((StgClosure
*)q
)->header
.info
= &stg_MUT_ARR_PTRS_DIRTY_info
;
1082 ((StgClosure
*)q
)->header
.info
= &stg_MUT_ARR_PTRS_CLEAN_info
;
1085 gct
->eager_promotion
= saved_eager_promotion
;
1086 gct
->failed_to_evac
= true; // mutable anyhow.
1090 case MUT_ARR_PTRS_FROZEN_CLEAN
:
1091 case MUT_ARR_PTRS_FROZEN_DIRTY
:
1092 // follow everything
1096 scavenge_mut_arr_ptrs((StgMutArrPtrs
*)p
);
1098 if (gct
->failed_to_evac
) {
1099 ((StgClosure
*)q
)->header
.info
= &stg_MUT_ARR_PTRS_FROZEN_DIRTY_info
;
1101 ((StgClosure
*)q
)->header
.info
= &stg_MUT_ARR_PTRS_FROZEN_CLEAN_info
;
1106 case SMALL_MUT_ARR_PTRS_CLEAN
:
1107 case SMALL_MUT_ARR_PTRS_DIRTY
:
1108 // follow everything
1113 // We don't eagerly promote objects pointed to by a mutable
1114 // array, but if we find the array only points to objects in
1115 // the same or an older generation, we mark it "clean" and
1116 // avoid traversing it during minor GCs.
1117 saved_eager
= gct
->eager_promotion
;
1118 gct
->eager_promotion
= false;
1119 next
= p
+ small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs
*)p
);
1120 for (p
= (P_
)((StgSmallMutArrPtrs
*)p
)->payload
; p
< next
; p
++) {
1121 evacuate((StgClosure
**)p
);
1123 gct
->eager_promotion
= saved_eager
;
1125 if (gct
->failed_to_evac
) {
1126 ((StgClosure
*)q
)->header
.info
= &stg_SMALL_MUT_ARR_PTRS_DIRTY_info
;
1128 ((StgClosure
*)q
)->header
.info
= &stg_SMALL_MUT_ARR_PTRS_CLEAN_info
;
1131 gct
->failed_to_evac
= true; // mutable anyhow.
1135 case SMALL_MUT_ARR_PTRS_FROZEN_CLEAN
:
1136 case SMALL_MUT_ARR_PTRS_FROZEN_DIRTY
:
1137 // follow everything
1141 next
= p
+ small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs
*)p
);
1142 for (p
= (P_
)((StgSmallMutArrPtrs
*)p
)->payload
; p
< next
; p
++) {
1143 evacuate((StgClosure
**)p
);
1146 if (gct
->failed_to_evac
) {
1147 ((StgClosure
*)q
)->header
.info
= &stg_SMALL_MUT_ARR_PTRS_FROZEN_DIRTY_info
;
1149 ((StgClosure
*)q
)->header
.info
= &stg_SMALL_MUT_ARR_PTRS_FROZEN_CLEAN_info
;
1156 scavengeTSO((StgTSO
*)p
);
1162 StgStack
*stack
= (StgStack
*)p
;
1164 gct
->eager_promotion
= false;
1166 scavenge_stack(stack
->sp
, stack
->stack
+ stack
->stack_size
);
1167 stack
->dirty
= gct
->failed_to_evac
;
1169 gct
->eager_promotion
= saved_eager_promotion
;
1177 gct
->eager_promotion
= false;
1179 end
= (P_
)((StgClosure
*)p
)->payload
+ info
->layout
.payload
.ptrs
;
1180 for (p
= (P_
)((StgClosure
*)p
)->payload
; p
< end
; p
++) {
1181 evacuate((StgClosure
**)p
);
1184 gct
->eager_promotion
= saved_eager_promotion
;
1185 gct
->failed_to_evac
= true; // mutable
1192 StgTRecChunk
*tc
= ((StgTRecChunk
*) p
);
1193 TRecEntry
*e
= &(tc
-> entries
[0]);
1194 gct
->eager_promotion
= false;
1195 evacuate((StgClosure
**)&tc
->prev_chunk
);
1196 for (i
= 0; i
< tc
-> next_entry_idx
; i
++, e
++ ) {
1197 evacuate((StgClosure
**)&e
->tvar
);
1198 evacuate((StgClosure
**)&e
->expected_value
);
1199 evacuate((StgClosure
**)&e
->new_value
);
1201 gct
->eager_promotion
= saved_eager_promotion
;
1202 gct
->failed_to_evac
= true; // mutable
1207 barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p",
1211 if (gct
->failed_to_evac
) {
1212 gct
->failed_to_evac
= false;
1213 if (gct
->evac_gen_no
) {
1214 recordMutableGen_GC((StgClosure
*)q
, gct
->evac_gen_no
);
1217 } // while (p = pop_mark_stack())
1220 /* -----------------------------------------------------------------------------
1221 Scavenge one object.
1223 This is used for objects that are temporarily marked as mutable
1224 because they contain old-to-new generation pointers. Only certain
1225 objects can have this property.
1226 -------------------------------------------------------------------------- */
1229 scavenge_one(StgPtr p
)
1231 const StgInfoTable
*info
;
1233 bool saved_eager_promotion
;
1235 saved_eager_promotion
= gct
->eager_promotion
;
1237 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p
));
1238 info
= get_itbl((StgClosure
*)p
);
1240 switch (info
->type
) {
1245 StgMVar
*mvar
= ((StgMVar
*)p
);
1246 gct
->eager_promotion
= false;
1247 evacuate((StgClosure
**)&mvar
->head
);
1248 evacuate((StgClosure
**)&mvar
->tail
);
1249 evacuate((StgClosure
**)&mvar
->value
);
1250 gct
->eager_promotion
= saved_eager_promotion
;
1252 if (gct
->failed_to_evac
) {
1253 mvar
->header
.info
= &stg_MVAR_DIRTY_info
;
1255 mvar
->header
.info
= &stg_MVAR_CLEAN_info
;
1262 StgTVar
*tvar
= ((StgTVar
*)p
);
1263 gct
->eager_promotion
= false;
1264 evacuate((StgClosure
**)&tvar
->current_value
);
1265 evacuate((StgClosure
**)&tvar
->first_watch_queue_entry
);
1266 gct
->eager_promotion
= saved_eager_promotion
;
1268 if (gct
->failed_to_evac
) {
1269 tvar
->header
.info
= &stg_TVAR_DIRTY_info
;
1271 tvar
->header
.info
= &stg_TVAR_CLEAN_info
;
1285 end
= (StgPtr
)((StgThunk
*)p
)->payload
+ info
->layout
.payload
.ptrs
;
1286 for (q
= (StgPtr
)((StgThunk
*)p
)->payload
; q
< end
; q
++) {
1287 evacuate((StgClosure
**)q
);
1293 case FUN_1_0
: // hardly worth specialising these guys
1309 end
= (StgPtr
)((StgClosure
*)p
)->payload
+ info
->layout
.payload
.ptrs
;
1310 for (q
= (StgPtr
)((StgClosure
*)p
)->payload
; q
< end
; q
++) {
1311 evacuate((StgClosure
**)q
);
1317 // This WEAK object will not be considered by tidyWeakList during this
1318 // collection because it is in a generation > N, but it is on the
1319 // mutable list so we must evacuate all of its pointers because some
1320 // of them may point into a younger generation.
1321 scavengeLiveWeak((StgWeak
*)p
);
1325 case MUT_VAR_DIRTY
: {
1328 gct
->eager_promotion
= false;
1329 evacuate(&((StgMutVar
*)p
)->var
);
1330 gct
->eager_promotion
= saved_eager_promotion
;
1332 if (gct
->failed_to_evac
) {
1333 ((StgClosure
*)q
)->header
.info
= &stg_MUT_VAR_DIRTY_info
;
1335 ((StgClosure
*)q
)->header
.info
= &stg_MUT_VAR_CLEAN_info
;
1340 case BLOCKING_QUEUE
:
1342 StgBlockingQueue
*bq
= (StgBlockingQueue
*)p
;
1344 gct
->eager_promotion
= false;
1346 evacuate((StgClosure
**)&bq
->owner
);
1347 evacuate((StgClosure
**)&bq
->queue
);
1348 evacuate((StgClosure
**)&bq
->link
);
1349 gct
->eager_promotion
= saved_eager_promotion
;
1351 if (gct
->failed_to_evac
) {
1352 bq
->header
.info
= &stg_BLOCKING_QUEUE_DIRTY_info
;
1354 bq
->header
.info
= &stg_BLOCKING_QUEUE_CLEAN_info
;
1359 case THUNK_SELECTOR
:
1361 StgSelector
*s
= (StgSelector
*)p
;
1362 evacuate(&s
->selectee
);
1368 StgAP_STACK
*ap
= (StgAP_STACK
*)p
;
1371 scavenge_stack((StgPtr
)ap
->payload
, (StgPtr
)ap
->payload
+ ap
->size
);
1372 p
= (StgPtr
)ap
->payload
+ ap
->size
;
1377 p
= scavenge_PAP((StgPAP
*)p
);
1381 p
= scavenge_AP((StgAP
*)p
);
1385 // nothing to follow
1388 case MUT_ARR_PTRS_CLEAN
:
1389 case MUT_ARR_PTRS_DIRTY
:
1391 // We don't eagerly promote objects pointed to by a mutable
1392 // array, but if we find the array only points to objects in
1393 // the same or an older generation, we mark it "clean" and
1394 // avoid traversing it during minor GCs.
1395 gct
->eager_promotion
= false;
1397 scavenge_mut_arr_ptrs((StgMutArrPtrs
*)p
);
1399 if (gct
->failed_to_evac
) {
1400 ((StgClosure
*)p
)->header
.info
= &stg_MUT_ARR_PTRS_DIRTY_info
;
1402 ((StgClosure
*)p
)->header
.info
= &stg_MUT_ARR_PTRS_CLEAN_info
;
1405 gct
->eager_promotion
= saved_eager_promotion
;
1406 gct
->failed_to_evac
= true;
1410 case MUT_ARR_PTRS_FROZEN_CLEAN
:
1411 case MUT_ARR_PTRS_FROZEN_DIRTY
:
1413 // follow everything
1414 scavenge_mut_arr_ptrs((StgMutArrPtrs
*)p
);
1416 if (gct
->failed_to_evac
) {
1417 ((StgClosure
*)p
)->header
.info
= &stg_MUT_ARR_PTRS_FROZEN_DIRTY_info
;
1419 ((StgClosure
*)p
)->header
.info
= &stg_MUT_ARR_PTRS_FROZEN_CLEAN_info
;
1424 case SMALL_MUT_ARR_PTRS_CLEAN
:
1425 case SMALL_MUT_ARR_PTRS_DIRTY
:
1430 // We don't eagerly promote objects pointed to by a mutable
1431 // array, but if we find the array only points to objects in
1432 // the same or an older generation, we mark it "clean" and
1433 // avoid traversing it during minor GCs.
1434 saved_eager
= gct
->eager_promotion
;
1435 gct
->eager_promotion
= false;
1437 next
= p
+ small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs
*)p
);
1438 for (p
= (P_
)((StgSmallMutArrPtrs
*)p
)->payload
; p
< next
; p
++) {
1439 evacuate((StgClosure
**)p
);
1441 gct
->eager_promotion
= saved_eager
;
1443 if (gct
->failed_to_evac
) {
1444 ((StgClosure
*)q
)->header
.info
= &stg_SMALL_MUT_ARR_PTRS_DIRTY_info
;
1446 ((StgClosure
*)q
)->header
.info
= &stg_SMALL_MUT_ARR_PTRS_CLEAN_info
;
1449 gct
->failed_to_evac
= true;
1453 case SMALL_MUT_ARR_PTRS_FROZEN_CLEAN
:
1454 case SMALL_MUT_ARR_PTRS_FROZEN_DIRTY
:
1456 // follow everything
1459 next
= p
+ small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs
*)p
);
1460 for (p
= (P_
)((StgSmallMutArrPtrs
*)p
)->payload
; p
< next
; p
++) {
1461 evacuate((StgClosure
**)p
);
1464 if (gct
->failed_to_evac
) {
1465 ((StgClosure
*)q
)->header
.info
= &stg_SMALL_MUT_ARR_PTRS_FROZEN_DIRTY_info
;
1467 ((StgClosure
*)q
)->header
.info
= &stg_SMALL_MUT_ARR_PTRS_FROZEN_CLEAN_info
;
1474 scavengeTSO((StgTSO
*)p
);
1480 StgStack
*stack
= (StgStack
*)p
;
1482 gct
->eager_promotion
= false;
1484 scavenge_stack(stack
->sp
, stack
->stack
+ stack
->stack_size
);
1485 stack
->dirty
= gct
->failed_to_evac
;
1487 gct
->eager_promotion
= saved_eager_promotion
;
1495 gct
->eager_promotion
= false;
1497 end
= (P_
)((StgClosure
*)p
)->payload
+ info
->layout
.payload
.ptrs
;
1498 for (p
= (P_
)((StgClosure
*)p
)->payload
; p
< end
; p
++) {
1499 evacuate((StgClosure
**)p
);
1502 gct
->eager_promotion
= saved_eager_promotion
;
1503 gct
->failed_to_evac
= true; // mutable
1511 StgTRecChunk
*tc
= ((StgTRecChunk
*) p
);
1512 TRecEntry
*e
= &(tc
-> entries
[0]);
1513 gct
->eager_promotion
= false;
1514 evacuate((StgClosure
**)&tc
->prev_chunk
);
1515 for (i
= 0; i
< tc
-> next_entry_idx
; i
++, e
++ ) {
1516 evacuate((StgClosure
**)&e
->tvar
);
1517 evacuate((StgClosure
**)&e
->expected_value
);
1518 evacuate((StgClosure
**)&e
->new_value
);
1520 gct
->eager_promotion
= saved_eager_promotion
;
1521 gct
->failed_to_evac
= true; // mutable
1526 // IND can happen, for example, when the interpreter allocates
1527 // a gigantic AP closure (more than one block), which ends up
1528 // on the large-object list and then gets updated. See #3424.
1531 evacuate(&((StgInd
*)p
)->indirectee
);
1533 #if 0 && defined(DEBUG)
1534 if (RtsFlags
.DebugFlags
.gc
)
1535 /* Debugging code to print out the size of the thing we just
1539 StgPtr start
= gen
->scan
;
1540 bdescr
*start_bd
= gen
->scan_bd
;
1543 if (start_bd
!= gen
->scan_bd
) {
1544 size
+= (P_
)BLOCK_ROUND_UP(start
) - start
;
1545 start_bd
= start_bd
->link
;
1546 while (start_bd
!= gen
->scan_bd
) {
1547 size
+= BLOCK_SIZE_W
;
1548 start_bd
= start_bd
->link
;
1551 (P_
)BLOCK_ROUND_DOWN(gen
->scan
);
1553 size
= gen
->scan
- start
;
1555 debugBelch("evac IND: %ld bytes", size
* sizeof(W_
));
1560 case COMPACT_NFDATA
:
1561 scavenge_compact((StgCompactNFData
*)p
);
1565 barf("scavenge_one: strange object %d", (int)(info
->type
));
1568 no_luck
= gct
->failed_to_evac
;
1569 gct
->failed_to_evac
= false;
1573 /* -----------------------------------------------------------------------------
1574 Scavenging mutable lists.
1576 We treat the mutable list of each generation > N (i.e. all the
1577 generations older than the one being collected) as roots. We also
1578 remove non-mutable objects from the mutable list at this point.
1579 -------------------------------------------------------------------------- */
1582 scavenge_mutable_list(bdescr
*bd
, generation
*gen
)
1586 uint32_t gen_no
= gen
->no
;
1587 gct
->evac_gen_no
= gen_no
;
1589 for (; bd
!= NULL
; bd
= bd
->link
) {
1590 for (q
= bd
->start
; q
< bd
->free
; q
++) {
1592 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p
));
1595 const StgInfoTable
*pinfo
;
1596 switch (get_itbl((StgClosure
*)p
)->type
) {
1598 // can happen due to concurrent writeMutVars
1600 mutlist_MUTVARS
++; break;
1601 case MUT_ARR_PTRS_CLEAN
:
1602 case MUT_ARR_PTRS_DIRTY
:
1603 case MUT_ARR_PTRS_FROZEN_CLEAN
:
1604 case MUT_ARR_PTRS_FROZEN_DIRTY
:
1605 mutlist_MUTARRS
++; break;
1607 barf("MVAR_CLEAN on mutable list");
1609 mutlist_MVARS
++; break;
1611 mutlist_TVAR
++; break;
1613 mutlist_TREC_CHUNK
++; break;
1615 pinfo
= ((StgClosure
*)p
)->header
.info
;
1616 if (pinfo
== &stg_TVAR_WATCH_QUEUE_info
)
1617 mutlist_TVAR_WATCH_QUEUE
++;
1618 else if (pinfo
== &stg_TREC_HEADER_info
)
1619 mutlist_TREC_HEADER
++;
1624 mutlist_OTHERS
++; break;
1628 // Check whether this object is "clean", that is it
1629 // definitely doesn't point into a young generation.
1630 // Clean objects don't need to be scavenged. Some clean
1631 // objects (MUT_VAR_CLEAN) are not kept on the mutable
1632 // list at all; others, such as MUT_ARR_PTRS
1633 // are always on the mutable list.
1635 switch (get_itbl((StgClosure
*)p
)->type
) {
1636 case MUT_ARR_PTRS_CLEAN
:
1637 case SMALL_MUT_ARR_PTRS_CLEAN
:
1638 recordMutableGen_GC((StgClosure
*)p
,gen_no
);
1640 case MUT_ARR_PTRS_DIRTY
:
1642 bool saved_eager_promotion
;
1643 saved_eager_promotion
= gct
->eager_promotion
;
1644 gct
->eager_promotion
= false;
1646 scavenge_mut_arr_ptrs_marked((StgMutArrPtrs
*)p
);
1648 if (gct
->failed_to_evac
) {
1649 ((StgClosure
*)p
)->header
.info
= &stg_MUT_ARR_PTRS_DIRTY_info
;
1651 ((StgClosure
*)p
)->header
.info
= &stg_MUT_ARR_PTRS_CLEAN_info
;
1654 gct
->eager_promotion
= saved_eager_promotion
;
1655 gct
->failed_to_evac
= false;
1656 recordMutableGen_GC((StgClosure
*)p
,gen_no
);
1663 if (RtsFlags
.GcFlags
.useNonmoving
&& major_gc
&& gen
== oldest_gen
) {
1664 // We can't use scavenge_one here as we need to scavenge SRTs
1665 nonmovingScavengeOne((StgClosure
*)p
);
1666 } else if (scavenge_one(p
)) {
1667 // didn't manage to promote everything, so put the
1668 // object back on the list.
1669 recordMutableGen_GC((StgClosure
*)p
,gen_no
);
1676 scavenge_capability_mut_lists (Capability
*cap
)
1678 // In a major GC only nonmoving heap's mut list is root
1679 if (RtsFlags
.GcFlags
.useNonmoving
&& major_gc
) {
1680 uint32_t g
= oldest_gen
->no
;
1681 scavenge_mutable_list(cap
->saved_mut_lists
[g
], oldest_gen
);
1682 freeChain_sync(cap
->saved_mut_lists
[g
]);
1683 cap
->saved_mut_lists
[g
] = NULL
;
1687 /* Mutable lists from each generation > N
1688 * we want to *scavenge* these roots, not evacuate them: they're not
1689 * going to move in this GC.
1690 * Also do them in reverse generation order, for the usual reason:
1691 * namely to reduce the likelihood of spurious old->new pointers.
1693 for (uint32_t g
= RtsFlags
.GcFlags
.generations
-1; g
> N
; g
--) {
1694 scavenge_mutable_list(cap
->saved_mut_lists
[g
], &generations
[g
]);
1695 freeChain_sync(cap
->saved_mut_lists
[g
]);
1696 cap
->saved_mut_lists
[g
] = NULL
;
1700 /* -----------------------------------------------------------------------------
1701 Scavenging the static objects.
1703 We treat the mutable list of each generation > N (i.e. all the
1704 generations older than the one being collected) as roots. We also
1705 remove non-mutable objects from the mutable list at this point.
1706 -------------------------------------------------------------------------- */
1709 scavenge_static(void)
1711 StgClosure
*flagged_p
, *p
;
1712 const StgInfoTable
*info
;
1714 debugTrace(DEBUG_gc
, "scavenging static objects");
1716 /* Always evacuate straight to the oldest generation for static
1718 gct
->evac_gen_no
= oldest_gen
->no
;
1720 /* keep going until we've scavenged all the objects on the linked
1725 /* get the next static object from the list. Remember, there might
1726 * be more stuff on this list after each evacuation...
1727 * (static_objects is a global)
1729 flagged_p
= gct
->static_objects
;
1730 if (flagged_p
== END_OF_STATIC_OBJECT_LIST
) {
1733 p
= UNTAG_STATIC_LIST_PTR(flagged_p
);
1735 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p
));
1737 // make sure the info pointer is into text space
1739 /* Take this object *off* the static_objects list,
1740 * and put it on the scavenged_static_objects list.
1742 gct
->static_objects
= *STATIC_LINK(info
,p
);
1743 *STATIC_LINK(info
,p
) = gct
->scavenged_static_objects
;
1744 gct
->scavenged_static_objects
= flagged_p
;
1746 switch (info
-> type
) {
1750 StgInd
*ind
= (StgInd
*)p
;
1751 evacuate(&ind
->indirectee
);
1753 /* might fail to evacuate it, in which case we have to pop it
1754 * back on the mutable list of the oldest generation. We
1755 * leave it *on* the scavenged_static_objects list, though,
1756 * in case we visit this object again.
1758 if (gct
->failed_to_evac
) {
1759 gct
->failed_to_evac
= false;
1760 recordMutableGen_GC((StgClosure
*)p
,oldest_gen
->no
);
1766 scavenge_thunk_srt(info
);
1770 scavenge_fun_srt(info
);
1773 // a FUN_STATIC can also be an SRT, so it may have pointer
1774 // fields. See Note [SRTs] in CmmBuildInfoTables, specifically
1775 // the [FUN] optimisation.
1787 next
= (P_
)p
->payload
+ info
->layout
.payload
.ptrs
;
1788 // evacuate the pointers
1789 for (q
= (P_
)p
->payload
; q
< next
; q
++) {
1790 evacuate((StgClosure
**)q
);
1796 barf("scavenge_static: strange closure %d", (int)(info
->type
));
1799 ASSERT(gct
->failed_to_evac
== false);
1803 /* -----------------------------------------------------------------------------
1804 scavenge a chunk of memory described by a bitmap
1805 -------------------------------------------------------------------------- */
1808 scavenge_large_bitmap( StgPtr p
, StgLargeBitmap
*large_bitmap
, StgWord size
)
1810 walk_large_bitmap(do_evacuate
, (StgClosure
**) p
, large_bitmap
, size
, NULL
);
1814 /* -----------------------------------------------------------------------------
1815 scavenge_stack walks over a section of stack and evacuates all the
1816 objects pointed to by it. We can use the same code for walking
1817 AP_STACK_UPDs, since these are just sections of copied stack.
1818 -------------------------------------------------------------------------- */
1821 scavenge_stack(StgPtr p
, StgPtr stack_end
)
1823 const StgRetInfoTable
* info
;
1828 * Each time around this loop, we are looking at a chunk of stack
1829 * that starts with an activation record.
1832 while (p
< stack_end
) {
1833 info
= get_ret_itbl((StgClosure
*)p
);
1835 switch (info
->i
.type
) {
1838 // Note [upd-black-hole]
1840 // In SMP, we can get update frames that point to indirections
1841 // when two threads evaluate the same thunk. We do attempt to
1842 // discover this situation in threadPaused(), but it's
1843 // possible that the following sequence occurs:
1852 // Now T is an indirection, and the update frame is already
1853 // marked on A's stack, so we won't traverse it again in
1854 // threadPaused(). We could traverse the whole stack again
1855 // before GC, but that would be too expensive.
1857 // Scavenging this update frame as normal would be disastrous;
1858 // the indirection will be shorted out, and the updatee would
1859 // end up pointing to the value. The update code will then
1860 // overwrite the value, instead of the BLACKHOLE it is
1861 // expecting to write to.
1863 // One way we could try to fix this is to detect when the
1864 // BLACKHOLE has been updated by another thread, and then
1865 // replace this update frame with a special frame that just
1866 // enters the value. But this introduces some other
1869 // - we must be careful to call checkBlockingQueues() in this
1870 // special frame, because we might otherwise miss wakeups
1871 // for threads that blocked on the original BLACKHOLE,
1872 // - we must spot this frame when we're stripping the stack in
1873 // raiseAsync() and raiseExceptionHelper(), and arrange to call
1874 // checkBlockingQueues() there too.
1876 // This is hard to get right, indeed we previously got it
1877 // wrong (see #13751). So we now take a different approach:
1878 // always copy the BLACKHOLE, even if it is actually an
1879 // indirection. This way we keep the update frame, we're
1880 // guaranteed to still perform the update, and check for
1881 // missed wakeups even when stripping the stack in
1882 // raiseAsync() and raiseExceptionHelper(). This is also a
1883 // little more efficient, because evacuating a known BLACKHOLE
1884 // is faster than evacuating an unknown closure.
1886 // NOTE: for the reasons above, blackholing (either lazy or
1887 // eager) is NOT optional. See also Note [avoiding
1888 // threadPaused] in Interpreter.c.
1890 // There are a couple of alternative solutions:
1891 // - if we see an update frame that points to an indirection,
1892 // arrange to call checkBlockingQueues() on that thread
1894 // - spot a BLOCKING_QUEUE that points to a value and
1895 // arrange to wake it up after the GC.
1897 // These are more difficult to implement, requiring an extra
1898 // list to be maintained during GC. They also rely on more
1899 // subtle invariants than the solution implemented here.
1903 StgUpdateFrame
*frame
= (StgUpdateFrame
*)p
;
1905 evacuate_BLACKHOLE(&frame
->updatee
);
1906 p
+= sizeofW(StgUpdateFrame
);
1910 // small bitmap (< 32 entries, or 64 on a 64-bit machine)
1911 case CATCH_STM_FRAME
:
1912 case CATCH_RETRY_FRAME
:
1913 case ATOMICALLY_FRAME
:
1914 case UNDERFLOW_FRAME
:
1918 bitmap
= BITMAP_BITS(info
->i
.layout
.bitmap
);
1919 size
= BITMAP_SIZE(info
->i
.layout
.bitmap
);
1920 // NOTE: the payload starts immediately after the info-ptr, we
1921 // don't have an StgHeader in the same sense as a heap closure.
1923 p
= scavenge_small_bitmap(p
, size
, bitmap
);
1926 if (major_gc
&& info
->i
.srt
) {
1927 StgClosure
*srt
= (StgClosure
*)GET_SRT(info
);
1937 evacuate((StgClosure
**)p
);
1940 size
= BCO_BITMAP_SIZE(bco
);
1941 scavenge_large_bitmap(p
, BCO_BITMAP(bco
), size
);
1946 // large bitmap (> 32 entries, or > 64 on a 64-bit machine)
1951 size
= GET_LARGE_BITMAP(&info
->i
)->size
;
1953 scavenge_large_bitmap(p
, GET_LARGE_BITMAP(&info
->i
), size
);
1955 // and don't forget to follow the SRT
1961 StgRetFun
*ret_fun
= (StgRetFun
*)p
;
1962 const StgFunInfoTable
*fun_info
;
1964 evacuate(&ret_fun
->fun
);
1965 fun_info
= get_fun_itbl(UNTAG_CLOSURE(ret_fun
->fun
));
1966 p
= scavenge_arg_block(fun_info
, ret_fun
->payload
);
1971 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info
->i
.type
));
1976 /*-----------------------------------------------------------------------------
1977 scavenge the large object list.
1979 evac_gen set by caller; similar games played with evac_gen as with
1980 scavenge() - see comment at the top of scavenge(). Most large
1981 objects are (repeatedly) mutable, so most of the time evac_gen will
1983 --------------------------------------------------------------------------- */
1986 scavenge_large (gen_workspace
*ws
)
1991 gct
->evac_gen_no
= ws
->gen
->no
;
1993 bd
= ws
->todo_large_objects
;
1995 for (; bd
!= NULL
; bd
= ws
->todo_large_objects
) {
1997 // take this object *off* the large objects list and put it on
1998 // the scavenged large objects list. This is so that we can
1999 // treat todo_large_objects as a stack and push new objects on
2000 // the front when evacuating.
2001 ws
->todo_large_objects
= bd
->link
;
2003 ACQUIRE_SPIN_LOCK(&ws
->gen
->sync
);
2004 if (bd
->flags
& BF_COMPACT
) {
2005 dbl_link_onto(bd
, &ws
->gen
->live_compact_objects
);
2006 StgCompactNFData
*str
= ((StgCompactNFDataBlock
*)bd
->start
)->owner
;
2007 ws
->gen
->n_live_compact_blocks
+= str
->totalW
/ BLOCK_SIZE_W
;
2010 dbl_link_onto(bd
, &ws
->gen
->scavenged_large_objects
);
2011 ws
->gen
->n_scavenged_large_blocks
+= bd
->blocks
;
2014 RELEASE_SPIN_LOCK(&ws
->gen
->sync
);
2016 if (scavenge_one(p
)) {
2017 if (ws
->gen
->no
> 0) {
2018 recordMutableGen_GC((StgClosure
*)p
, ws
->gen
->no
);
2023 gct
->scanned
+= closure_sizeW((StgClosure
*)p
);
2027 /* ----------------------------------------------------------------------------
2028 Look for work to do.
2030 We look for the oldest gen that has either a todo block that can
2031 be scanned, or a block of work on the global queue that we can
2034 It is important to take work from the *oldest* generation that we
2035 has work available, because that minimizes the likelihood of
2036 evacuating objects into a young generation when they should have
2037 been eagerly promoted. This really does make a difference (the
2038 cacheprof benchmark is one that is affected).
2040 We also want to scan the todo block if possible before grabbing
2041 work from the global queue, the reason being that we don't want to
2042 steal work from the global queue and starve other threads if there
2043 is other work we can usefully be doing.
2044 ------------------------------------------------------------------------- */
2047 scavenge_find_work (void)
2051 bool did_something
, did_anything
;
2054 gct
->scav_find_work
++;
2056 did_anything
= false;
2059 did_something
= false;
2060 for (g
= RtsFlags
.GcFlags
.generations
-1; g
>= 0; g
--) {
2063 if (ws
->todo_seg
!= END_NONMOVING_TODO_LIST
) {
2064 struct NonmovingSegment
*seg
= ws
->todo_seg
;
2065 ASSERT(seg
->todo_link
);
2066 ws
->todo_seg
= seg
->todo_link
;
2067 seg
->todo_link
= NULL
;
2068 scavengeNonmovingSegment(seg
);
2069 did_something
= true;
2073 gct
->scan_bd
= NULL
;
2075 // If we have a scan block with some work to do,
2076 // scavenge everything up to the free pointer.
2077 if (ws
->todo_bd
->u
.scan
< ws
->todo_free
)
2079 scavenge_block(ws
->todo_bd
);
2080 did_something
= true;
2084 // If we have any large objects to scavenge, do them now.
2085 if (ws
->todo_large_objects
) {
2087 did_something
= true;
2091 if ((bd
= grab_local_todo_block(ws
)) != NULL
) {
2093 did_something
= true;
2098 if (did_something
) {
2099 did_anything
= true;
2103 #if defined(THREADED_RTS)
2104 if (work_stealing
) {
2105 // look for work to steal
2106 for (g
= RtsFlags
.GcFlags
.generations
-1; g
>= 0; g
--) {
2107 if ((bd
= steal_todo_block(g
)) != NULL
) {
2109 did_something
= true;
2114 if (did_something
) {
2115 did_anything
= true;
2121 // only return when there is no more work to do
2123 return did_anything
;
2126 /* ----------------------------------------------------------------------------
2127 Scavenge until we can't find anything more to scavenge.
2128 ------------------------------------------------------------------------- */
2138 // scavenge static objects
2139 if (major_gc
&& gct
->static_objects
!= END_OF_STATIC_OBJECT_LIST
) {
2140 IF_DEBUG(sanity
, checkStaticObjects(gct
->static_objects
));
2144 // scavenge objects in compacted generation
2145 if (mark_stack_bd
!= NULL
&& !mark_stack_empty()) {
2146 scavenge_mark_stack();
2150 // Order is important here: we want to deal in full blocks as
2151 // much as possible, so go for global work in preference to
2152 // local work. Only if all the global work has been exhausted
2153 // do we start scavenging the fragments of blocks in the local
2155 if (scavenge_find_work()) goto loop
;
2157 if (work_to_do
) goto loop
;