Don't skip validity checks for built-in classes (#17355)
[ghc.git] / rts / sm / Scav.c
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team 1998-2008
4 *
5 * Generational garbage collector: scavenging functions
6 *
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
9 *
10 * https://gitlab.haskell.org/ghc/ghc/wikis/commentary/rts/storage/gc
11 *
12 * ---------------------------------------------------------------------------*/
13
14 /* ----------------------------------------------------------------------------
15 We have two main scavenge functions:
16
17 - scavenge_block(bdescr *bd)
18 - scavenge_one(StgPtr p)
19
20 As the names and parameters suggest, first one scavenges a whole block while
21 the second one only scavenges one object. This however is not the only
22 difference. scavenge_block scavenges all SRTs while scavenge_one only
23 scavenges SRTs of stacks. The reason is because scavenge_one is called in two
24 cases:
25
26 - When scavenging a mut_list
27 - When scavenging a large object
28
29 We don't have to scavenge SRTs when scavenging a mut_list, because we only
30 scavenge mut_lists in minor GCs, and static objects are only collected in
31 major GCs.
32
33 However, because scavenge_one is also used to scavenge large objects (which
34 are scavenged even in major GCs), we need to deal with SRTs of large
35 objects. We never allocate large FUNs and THUNKs, but we allocate large
36 STACKs (e.g. in threadStackOverflow), and stack frames can have SRTs. So
37 scavenge_one skips FUN and THUNK SRTs but scavenges stack frame SRTs.
38
39 In summary, in a major GC:
40
41 - scavenge_block() scavenges all SRTs
42 - scavenge_one() scavenges only stack frame SRTs
43 ------------------------------------------------------------------------- */
44
45 #include "PosixSource.h"
46 #include "Rts.h"
47
48 #include "Storage.h"
49 #include "GC.h"
50 #include "GCThread.h"
51 #include "GCUtils.h"
52 #include "Compact.h"
53 #include "MarkStack.h"
54 #include "Evac.h"
55 #include "Scav.h"
56 #include "Apply.h"
57 #include "Trace.h"
58 #include "Sanity.h"
59 #include "Capability.h"
60 #include "LdvProfile.h"
61 #include "HeapUtils.h"
62 #include "Hash.h"
63
64 #include "sm/MarkWeak.h"
65
66 static void scavenge_stack (StgPtr p, StgPtr stack_end);
67
68 static void scavenge_large_bitmap (StgPtr p,
69 StgLargeBitmap *large_bitmap,
70 StgWord size );
71
72 #if defined(THREADED_RTS) && !defined(PARALLEL_GC)
73 # define evacuate(a) evacuate1(a)
74 # define evacuate_BLACKHOLE(a) evacuate_BLACKHOLE1(a)
75 # define scavenge_loop(a) scavenge_loop1(a)
76 # define scavenge_block(a) scavenge_block1(a)
77 # define scavenge_mutable_list(bd,g) scavenge_mutable_list1(bd,g)
78 # define scavenge_capability_mut_lists(cap) scavenge_capability_mut_Lists1(cap)
79 #endif
80
81 static void do_evacuate(StgClosure **p, void *user STG_UNUSED)
82 {
83 evacuate(p);
84 }
85
86 /* -----------------------------------------------------------------------------
87 Scavenge a TSO.
88 -------------------------------------------------------------------------- */
89
90 static void
91 scavengeTSO (StgTSO *tso)
92 {
93 bool saved_eager;
94
95 debugTrace(DEBUG_gc,"scavenging thread %d",(int)tso->id);
96
97 // update the pointer from the InCall.
98 if (tso->bound != NULL) {
99 // NB. We can't just set tso->bound->tso = tso, because this
100 // might be an invalid copy the TSO resulting from multiple
101 // threads evacuating the TSO simultaneously (see
102 // Evac.c:copy_tag()). Calling evacuate() on this pointer
103 // will ensure that we update it to point to the correct copy.
104 evacuate((StgClosure **)&tso->bound->tso);
105 }
106
107 saved_eager = gct->eager_promotion;
108 gct->eager_promotion = false;
109
110 evacuate((StgClosure **)&tso->blocked_exceptions);
111 evacuate((StgClosure **)&tso->bq);
112
113 // scavange current transaction record
114 evacuate((StgClosure **)&tso->trec);
115
116 evacuate((StgClosure **)&tso->stackobj);
117
118 evacuate((StgClosure **)&tso->_link);
119 if ( tso->why_blocked == BlockedOnMVar
120 || tso->why_blocked == BlockedOnMVarRead
121 || tso->why_blocked == BlockedOnBlackHole
122 || tso->why_blocked == BlockedOnMsgThrowTo
123 || tso->why_blocked == NotBlocked
124 ) {
125 evacuate(&tso->block_info.closure);
126 }
127 #if defined(THREADED_RTS)
128 // in the THREADED_RTS, block_info.closure must always point to a
129 // valid closure, because we assume this in throwTo(). In the
130 // non-threaded RTS it might be a FD (for
131 // BlockedOnRead/BlockedOnWrite) or a time value (BlockedOnDelay)
132 else {
133 tso->block_info.closure = (StgClosure *)END_TSO_QUEUE;
134 }
135 #endif
136
137 tso->dirty = gct->failed_to_evac;
138
139 gct->eager_promotion = saved_eager;
140 }
141
142 /* ----------------------------------------------------------------------------
143 Scavenging compact objects
144 ------------------------------------------------------------------------- */
145
146 typedef struct {
147 // We must save gct when calling mapHashTable(), which is compiled
148 // without GCThread.h and so uses a different calling convention.
149 // See also GC.c:mark_root where we do a similar thing.
150 gc_thread *saved_gct;
151 HashTable *newHash;
152 } MapHashData;
153
154 static void
155 evacuate_hash_entry(MapHashData *dat, StgWord key, const void *value)
156 {
157 StgClosure *p = (StgClosure*)key;
158 #if defined(THREADED_RTS)
159 gc_thread *old_gct = gct;
160 #endif
161
162 SET_GCT(dat->saved_gct);
163 evacuate(&p);
164 insertHashTable(dat->newHash, (StgWord)p, value);
165 SET_GCT(old_gct);
166 }
167
168 static void
169 scavenge_compact(StgCompactNFData *str)
170 {
171 bool saved_eager;
172 saved_eager = gct->eager_promotion;
173 gct->eager_promotion = false;
174
175 if (str->hash) {
176 MapHashData dat;
177 dat.saved_gct = gct;
178 HashTable *newHash = allocHashTable();
179 dat.newHash = newHash;
180 mapHashTable(str->hash, (void*)&dat, (MapHashFn)evacuate_hash_entry);
181 freeHashTable(str->hash, NULL);
182 str->hash = newHash;
183 }
184
185 debugTrace(DEBUG_compact,
186 "compact alive @%p, gen %d, %" FMT_Word " bytes",
187 str, Bdescr((P_)str)->gen_no, str->totalW * sizeof(W_))
188
189 gct->eager_promotion = saved_eager;
190 if (gct->failed_to_evac) {
191 ((StgClosure *)str)->header.info = &stg_COMPACT_NFDATA_DIRTY_info;
192 } else {
193 ((StgClosure *)str)->header.info = &stg_COMPACT_NFDATA_CLEAN_info;
194 }
195 }
196
197 /* -----------------------------------------------------------------------------
198 Mutable arrays of pointers
199 -------------------------------------------------------------------------- */
200
201 static StgPtr scavenge_mut_arr_ptrs (StgMutArrPtrs *a)
202 {
203 W_ m;
204 bool any_failed;
205 StgPtr p, q;
206
207 any_failed = false;
208 p = (StgPtr)&a->payload[0];
209 for (m = 0; (int)m < (int)mutArrPtrsCards(a->ptrs) - 1; m++)
210 {
211 q = p + (1 << MUT_ARR_PTRS_CARD_BITS);
212 for (; p < q; p++) {
213 evacuate((StgClosure**)p);
214 }
215 if (gct->failed_to_evac) {
216 any_failed = true;
217 *mutArrPtrsCard(a,m) = 1;
218 gct->failed_to_evac = false;
219 } else {
220 *mutArrPtrsCard(a,m) = 0;
221 }
222 }
223
224 q = (StgPtr)&a->payload[a->ptrs];
225 if (p < q) {
226 for (; p < q; p++) {
227 evacuate((StgClosure**)p);
228 }
229 if (gct->failed_to_evac) {
230 any_failed = true;
231 *mutArrPtrsCard(a,m) = 1;
232 gct->failed_to_evac = false;
233 } else {
234 *mutArrPtrsCard(a,m) = 0;
235 }
236 }
237
238 gct->failed_to_evac = any_failed;
239 return (StgPtr)a + mut_arr_ptrs_sizeW(a);
240 }
241
242 // scavenge only the marked areas of a MUT_ARR_PTRS
243 static StgPtr scavenge_mut_arr_ptrs_marked (StgMutArrPtrs *a)
244 {
245 W_ m;
246 StgPtr p, q;
247 bool any_failed;
248
249 any_failed = false;
250 for (m = 0; m < mutArrPtrsCards(a->ptrs); m++)
251 {
252 if (*mutArrPtrsCard(a,m) != 0) {
253 p = (StgPtr)&a->payload[m << MUT_ARR_PTRS_CARD_BITS];
254 q = stg_min(p + (1 << MUT_ARR_PTRS_CARD_BITS),
255 (StgPtr)&a->payload[a->ptrs]);
256 for (; p < q; p++) {
257 evacuate((StgClosure**)p);
258 }
259 if (gct->failed_to_evac) {
260 any_failed = true;
261 gct->failed_to_evac = false;
262 } else {
263 *mutArrPtrsCard(a,m) = 0;
264 }
265 }
266 }
267
268 gct->failed_to_evac = any_failed;
269 return (StgPtr)a + mut_arr_ptrs_sizeW(a);
270 }
271
272 STATIC_INLINE StgPtr
273 scavenge_small_bitmap (StgPtr p, StgWord size, StgWord bitmap)
274 {
275 while (size > 0) {
276 if ((bitmap & 1) == 0) {
277 evacuate((StgClosure **)p);
278 }
279 p++;
280 bitmap = bitmap >> 1;
281 size--;
282 }
283 return p;
284 }
285
286 /* -----------------------------------------------------------------------------
287 Blocks of function args occur on the stack (at the top) and
288 in PAPs.
289 -------------------------------------------------------------------------- */
290
291 STATIC_INLINE StgPtr
292 scavenge_arg_block (const StgFunInfoTable *fun_info, StgClosure **args)
293 {
294 StgPtr p;
295 StgWord bitmap;
296 StgWord size;
297
298 p = (StgPtr)args;
299 switch (fun_info->f.fun_type) {
300 case ARG_GEN:
301 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
302 size = BITMAP_SIZE(fun_info->f.b.bitmap);
303 goto small_bitmap;
304 case ARG_GEN_BIG:
305 size = GET_FUN_LARGE_BITMAP(fun_info)->size;
306 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
307 p += size;
308 break;
309 default:
310 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
311 size = BITMAP_SIZE(stg_arg_bitmaps[fun_info->f.fun_type]);
312 small_bitmap:
313 p = scavenge_small_bitmap(p, size, bitmap);
314 break;
315 }
316 return p;
317 }
318
319 STATIC_INLINE GNUC_ATTR_HOT StgPtr
320 scavenge_PAP_payload (StgClosure *fun, StgClosure **payload, StgWord size)
321 {
322 StgPtr p;
323 StgWord bitmap;
324 const StgFunInfoTable *fun_info;
325
326 fun_info = get_fun_itbl(UNTAG_CONST_CLOSURE(fun));
327 ASSERT(fun_info->i.type != PAP);
328 p = (StgPtr)payload;
329
330 switch (fun_info->f.fun_type) {
331 case ARG_GEN:
332 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
333 goto small_bitmap;
334 case ARG_GEN_BIG:
335 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
336 p += size;
337 break;
338 case ARG_BCO:
339 scavenge_large_bitmap((StgPtr)payload, BCO_BITMAP(fun), size);
340 p += size;
341 break;
342 default:
343 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
344 small_bitmap:
345 p = scavenge_small_bitmap(p, size, bitmap);
346 break;
347 }
348 return p;
349 }
350
351 STATIC_INLINE GNUC_ATTR_HOT StgPtr
352 scavenge_PAP (StgPAP *pap)
353 {
354 evacuate(&pap->fun);
355 return scavenge_PAP_payload (pap->fun, pap->payload, pap->n_args);
356 }
357
358 STATIC_INLINE StgPtr
359 scavenge_AP (StgAP *ap)
360 {
361 evacuate(&ap->fun);
362 return scavenge_PAP_payload (ap->fun, ap->payload, ap->n_args);
363 }
364
365 /* -----------------------------------------------------------------------------
366 Scavenge SRTs
367 -------------------------------------------------------------------------- */
368
369 STATIC_INLINE GNUC_ATTR_HOT void
370 scavenge_thunk_srt(const StgInfoTable *info)
371 {
372 StgThunkInfoTable *thunk_info;
373
374 if (!major_gc) return;
375
376 thunk_info = itbl_to_thunk_itbl(info);
377 if (thunk_info->i.srt) {
378 StgClosure *srt = (StgClosure*)GET_SRT(thunk_info);
379 evacuate(&srt);
380 }
381 }
382
383 STATIC_INLINE GNUC_ATTR_HOT void
384 scavenge_fun_srt(const StgInfoTable *info)
385 {
386 StgFunInfoTable *fun_info;
387
388 if (!major_gc) return;
389
390 fun_info = itbl_to_fun_itbl(info);
391 if (fun_info->i.srt) {
392 StgClosure *srt = (StgClosure*)GET_FUN_SRT(fun_info);
393 evacuate(&srt);
394 }
395 }
396
397 /* -----------------------------------------------------------------------------
398 Scavenge a block from the given scan pointer up to bd->free.
399
400 evac_gen_no is set by the caller to be either zero (for a step in a
401 generation < N) or G where G is the generation of the step being
402 scavenged.
403
404 We sometimes temporarily change evac_gen_no back to zero if we're
405 scavenging a mutable object where eager promotion isn't such a good
406 idea.
407 -------------------------------------------------------------------------- */
408
409 static GNUC_ATTR_HOT void
410 scavenge_block (bdescr *bd)
411 {
412 StgPtr p, q;
413 const StgInfoTable *info;
414 bool saved_eager_promotion;
415 gen_workspace *ws;
416
417 debugTrace(DEBUG_gc, "scavenging block %p (gen %d) @ %p",
418 bd->start, bd->gen_no, bd->u.scan);
419
420 gct->scan_bd = bd;
421 gct->evac_gen_no = bd->gen_no;
422 saved_eager_promotion = gct->eager_promotion;
423 gct->failed_to_evac = false;
424
425 ws = &gct->gens[bd->gen->no];
426
427 p = bd->u.scan;
428
429 // we might be evacuating into the very object that we're
430 // scavenging, so we have to check the real bd->free pointer each
431 // time around the loop.
432 while (p < bd->free || (bd == ws->todo_bd && p < ws->todo_free)) {
433
434 ASSERT(bd->link == NULL);
435 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
436 info = get_itbl((StgClosure *)p);
437
438 ASSERT(gct->thunk_selector_depth == 0);
439
440 q = p;
441 switch (info->type) {
442
443 case MVAR_CLEAN:
444 case MVAR_DIRTY:
445 {
446 StgMVar *mvar = ((StgMVar *)p);
447 gct->eager_promotion = false;
448 evacuate((StgClosure **)&mvar->head);
449 evacuate((StgClosure **)&mvar->tail);
450 evacuate((StgClosure **)&mvar->value);
451 gct->eager_promotion = saved_eager_promotion;
452
453 if (gct->failed_to_evac) {
454 mvar->header.info = &stg_MVAR_DIRTY_info;
455 } else {
456 mvar->header.info = &stg_MVAR_CLEAN_info;
457 }
458 p += sizeofW(StgMVar);
459 break;
460 }
461
462 case TVAR:
463 {
464 StgTVar *tvar = ((StgTVar *)p);
465 gct->eager_promotion = false;
466 evacuate((StgClosure **)&tvar->current_value);
467 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
468 gct->eager_promotion = saved_eager_promotion;
469
470 if (gct->failed_to_evac) {
471 tvar->header.info = &stg_TVAR_DIRTY_info;
472 } else {
473 tvar->header.info = &stg_TVAR_CLEAN_info;
474 }
475 p += sizeofW(StgTVar);
476 break;
477 }
478
479 case FUN_2_0:
480 scavenge_fun_srt(info);
481 evacuate(&((StgClosure *)p)->payload[1]);
482 evacuate(&((StgClosure *)p)->payload[0]);
483 p += sizeofW(StgHeader) + 2;
484 break;
485
486 case THUNK_2_0:
487 scavenge_thunk_srt(info);
488 evacuate(&((StgThunk *)p)->payload[1]);
489 evacuate(&((StgThunk *)p)->payload[0]);
490 p += sizeofW(StgThunk) + 2;
491 break;
492
493 case CONSTR_2_0:
494 evacuate(&((StgClosure *)p)->payload[1]);
495 evacuate(&((StgClosure *)p)->payload[0]);
496 p += sizeofW(StgHeader) + 2;
497 break;
498
499 case THUNK_1_0:
500 scavenge_thunk_srt(info);
501 evacuate(&((StgThunk *)p)->payload[0]);
502 p += sizeofW(StgThunk) + 1;
503 break;
504
505 case FUN_1_0:
506 scavenge_fun_srt(info);
507 FALLTHROUGH;
508 case CONSTR_1_0:
509 evacuate(&((StgClosure *)p)->payload[0]);
510 p += sizeofW(StgHeader) + 1;
511 break;
512
513 case THUNK_0_1:
514 scavenge_thunk_srt(info);
515 p += sizeofW(StgThunk) + 1;
516 break;
517
518 case FUN_0_1:
519 scavenge_fun_srt(info);
520 FALLTHROUGH;
521 case CONSTR_0_1:
522 p += sizeofW(StgHeader) + 1;
523 break;
524
525 case THUNK_0_2:
526 scavenge_thunk_srt(info);
527 p += sizeofW(StgThunk) + 2;
528 break;
529
530 case FUN_0_2:
531 scavenge_fun_srt(info);
532 FALLTHROUGH;
533 case CONSTR_0_2:
534 p += sizeofW(StgHeader) + 2;
535 break;
536
537 case THUNK_1_1:
538 scavenge_thunk_srt(info);
539 evacuate(&((StgThunk *)p)->payload[0]);
540 p += sizeofW(StgThunk) + 2;
541 break;
542
543 case FUN_1_1:
544 scavenge_fun_srt(info);
545 FALLTHROUGH;
546 case CONSTR_1_1:
547 evacuate(&((StgClosure *)p)->payload[0]);
548 p += sizeofW(StgHeader) + 2;
549 break;
550
551 case FUN:
552 scavenge_fun_srt(info);
553 goto gen_obj;
554
555 case THUNK:
556 {
557 StgPtr end;
558
559 scavenge_thunk_srt(info);
560 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
561 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
562 evacuate((StgClosure **)p);
563 }
564 p += info->layout.payload.nptrs;
565 break;
566 }
567
568 gen_obj:
569 case CONSTR:
570 case CONSTR_NOCAF:
571 case WEAK:
572 case PRIM:
573 {
574 StgPtr end;
575
576 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
577 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
578 evacuate((StgClosure **)p);
579 }
580 p += info->layout.payload.nptrs;
581 break;
582 }
583
584 case BCO: {
585 StgBCO *bco = (StgBCO *)p;
586 evacuate((StgClosure **)&bco->instrs);
587 evacuate((StgClosure **)&bco->literals);
588 evacuate((StgClosure **)&bco->ptrs);
589 p += bco_sizeW(bco);
590 break;
591 }
592
593 case BLACKHOLE:
594 evacuate(&((StgInd *)p)->indirectee);
595 p += sizeofW(StgInd);
596 break;
597
598 case MUT_VAR_CLEAN:
599 case MUT_VAR_DIRTY:
600 gct->eager_promotion = false;
601 evacuate(&((StgMutVar *)p)->var);
602 gct->eager_promotion = saved_eager_promotion;
603
604 if (gct->failed_to_evac) {
605 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
606 } else {
607 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
608 }
609 p += sizeofW(StgMutVar);
610 break;
611
612 case BLOCKING_QUEUE:
613 {
614 StgBlockingQueue *bq = (StgBlockingQueue *)p;
615
616 gct->eager_promotion = false;
617 evacuate(&bq->bh);
618 evacuate((StgClosure**)&bq->owner);
619 evacuate((StgClosure**)&bq->queue);
620 evacuate((StgClosure**)&bq->link);
621 gct->eager_promotion = saved_eager_promotion;
622
623 if (gct->failed_to_evac) {
624 bq->header.info = &stg_BLOCKING_QUEUE_DIRTY_info;
625 } else {
626 bq->header.info = &stg_BLOCKING_QUEUE_CLEAN_info;
627 }
628 p += sizeofW(StgBlockingQueue);
629 break;
630 }
631
632 case THUNK_SELECTOR:
633 {
634 StgSelector *s = (StgSelector *)p;
635 evacuate(&s->selectee);
636 p += THUNK_SELECTOR_sizeW();
637 break;
638 }
639
640 // A chunk of stack saved in a heap object
641 case AP_STACK:
642 {
643 StgAP_STACK *ap = (StgAP_STACK *)p;
644
645 evacuate(&ap->fun);
646 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
647 p = (StgPtr)ap->payload + ap->size;
648 break;
649 }
650
651 case PAP:
652 p = scavenge_PAP((StgPAP *)p);
653 break;
654
655 case AP:
656 p = scavenge_AP((StgAP *)p);
657 break;
658
659 case ARR_WORDS:
660 // nothing to follow
661 p += arr_words_sizeW((StgArrBytes *)p);
662 break;
663
664 case MUT_ARR_PTRS_CLEAN:
665 case MUT_ARR_PTRS_DIRTY:
666 {
667 // We don't eagerly promote objects pointed to by a mutable
668 // array, but if we find the array only points to objects in
669 // the same or an older generation, we mark it "clean" and
670 // avoid traversing it during minor GCs.
671 gct->eager_promotion = false;
672
673 p = scavenge_mut_arr_ptrs((StgMutArrPtrs*)p);
674
675 if (gct->failed_to_evac) {
676 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
677 } else {
678 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
679 }
680
681 gct->eager_promotion = saved_eager_promotion;
682 gct->failed_to_evac = true; // always put it on the mutable list.
683 break;
684 }
685
686 case MUT_ARR_PTRS_FROZEN_CLEAN:
687 case MUT_ARR_PTRS_FROZEN_DIRTY:
688 // follow everything
689 {
690 p = scavenge_mut_arr_ptrs((StgMutArrPtrs*)p);
691
692 if (gct->failed_to_evac) {
693 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_DIRTY_info;
694 } else {
695 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_CLEAN_info;
696 }
697 break;
698 }
699
700 case SMALL_MUT_ARR_PTRS_CLEAN:
701 case SMALL_MUT_ARR_PTRS_DIRTY:
702 // follow everything
703 {
704 StgPtr next;
705
706 // We don't eagerly promote objects pointed to by a mutable
707 // array, but if we find the array only points to objects in
708 // the same or an older generation, we mark it "clean" and
709 // avoid traversing it during minor GCs.
710 gct->eager_promotion = false;
711 next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
712 for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
713 evacuate((StgClosure **)p);
714 }
715 gct->eager_promotion = saved_eager_promotion;
716
717 if (gct->failed_to_evac) {
718 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_DIRTY_info;
719 } else {
720 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_CLEAN_info;
721 }
722
723 gct->failed_to_evac = true; // always put it on the mutable list.
724 break;
725 }
726
727 case SMALL_MUT_ARR_PTRS_FROZEN_CLEAN:
728 case SMALL_MUT_ARR_PTRS_FROZEN_DIRTY:
729 // follow everything
730 {
731 StgPtr next;
732
733 next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
734 for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
735 evacuate((StgClosure **)p);
736 }
737
738 if (gct->failed_to_evac) {
739 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN_DIRTY_info;
740 } else {
741 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN_CLEAN_info;
742 }
743 break;
744 }
745
746 case TSO:
747 {
748 scavengeTSO((StgTSO *)p);
749 p += sizeofW(StgTSO);
750 break;
751 }
752
753 case STACK:
754 {
755 StgStack *stack = (StgStack*)p;
756
757 gct->eager_promotion = false;
758
759 scavenge_stack(stack->sp, stack->stack + stack->stack_size);
760 stack->dirty = gct->failed_to_evac;
761 p += stack_sizeW(stack);
762
763 gct->eager_promotion = saved_eager_promotion;
764 break;
765 }
766
767 case MUT_PRIM:
768 {
769 StgPtr end;
770
771 gct->eager_promotion = false;
772
773 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
774 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
775 evacuate((StgClosure **)p);
776 }
777 p += info->layout.payload.nptrs;
778
779 gct->eager_promotion = saved_eager_promotion;
780 gct->failed_to_evac = true; // mutable
781 break;
782 }
783
784 case TREC_CHUNK:
785 {
786 StgWord i;
787 StgTRecChunk *tc = ((StgTRecChunk *) p);
788 TRecEntry *e = &(tc -> entries[0]);
789 gct->eager_promotion = false;
790 evacuate((StgClosure **)&tc->prev_chunk);
791 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
792 evacuate((StgClosure **)&e->tvar);
793 evacuate((StgClosure **)&e->expected_value);
794 evacuate((StgClosure **)&e->new_value);
795 }
796 gct->eager_promotion = saved_eager_promotion;
797 gct->failed_to_evac = true; // mutable
798 p += sizeofW(StgTRecChunk);
799 break;
800 }
801
802 default:
803 barf("scavenge: unimplemented/strange closure type %d @ %p",
804 info->type, p);
805 }
806
807 /*
808 * We need to record the current object on the mutable list if
809 * (a) It is actually mutable, or
810 * (b) It contains pointers to a younger generation.
811 * Case (b) arises if we didn't manage to promote everything that
812 * the current object points to into the current generation.
813 */
814 if (gct->failed_to_evac) {
815 gct->failed_to_evac = false;
816 if (bd->gen_no > 0) {
817 recordMutableGen_GC((StgClosure *)q, bd->gen_no);
818 }
819 }
820 }
821
822 if (p > bd->free) {
823 gct->copied += ws->todo_free - bd->free;
824 bd->free = p;
825 }
826
827 debugTrace(DEBUG_gc, " scavenged %ld bytes",
828 (unsigned long)((bd->free - bd->u.scan) * sizeof(W_)));
829
830 // update stats: this is a block that has been scavenged
831 gct->scanned += bd->free - bd->u.scan;
832 bd->u.scan = bd->free;
833
834 if (bd != ws->todo_bd) {
835 // we're not going to evac any more objects into
836 // this block, so push it now.
837 push_scanned_block(bd, ws);
838 }
839
840 gct->scan_bd = NULL;
841 }
842 /* -----------------------------------------------------------------------------
843 Scavenge everything on the mark stack.
844
845 This is slightly different from scavenge():
846 - we don't walk linearly through the objects, so the scavenger
847 doesn't need to advance the pointer on to the next object.
848 -------------------------------------------------------------------------- */
849
850 static void
851 scavenge_mark_stack(void)
852 {
853 StgPtr p, q;
854 const StgInfoTable *info;
855 bool saved_eager_promotion;
856
857 gct->evac_gen_no = oldest_gen->no;
858 saved_eager_promotion = gct->eager_promotion;
859
860 while ((p = pop_mark_stack())) {
861
862 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
863 info = get_itbl((StgClosure *)p);
864
865 q = p;
866 switch (info->type) {
867
868 case MVAR_CLEAN:
869 case MVAR_DIRTY:
870 {
871 StgMVar *mvar = ((StgMVar *)p);
872 gct->eager_promotion = false;
873 evacuate((StgClosure **)&mvar->head);
874 evacuate((StgClosure **)&mvar->tail);
875 evacuate((StgClosure **)&mvar->value);
876 gct->eager_promotion = saved_eager_promotion;
877
878 if (gct->failed_to_evac) {
879 mvar->header.info = &stg_MVAR_DIRTY_info;
880 } else {
881 mvar->header.info = &stg_MVAR_CLEAN_info;
882 }
883 break;
884 }
885
886 case TVAR:
887 {
888 StgTVar *tvar = ((StgTVar *)p);
889 gct->eager_promotion = false;
890 evacuate((StgClosure **)&tvar->current_value);
891 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
892 gct->eager_promotion = saved_eager_promotion;
893
894 if (gct->failed_to_evac) {
895 tvar->header.info = &stg_TVAR_DIRTY_info;
896 } else {
897 tvar->header.info = &stg_TVAR_CLEAN_info;
898 }
899 break;
900 }
901
902 case FUN_2_0:
903 scavenge_fun_srt(info);
904 evacuate(&((StgClosure *)p)->payload[1]);
905 evacuate(&((StgClosure *)p)->payload[0]);
906 break;
907
908 case THUNK_2_0:
909 scavenge_thunk_srt(info);
910 evacuate(&((StgThunk *)p)->payload[1]);
911 evacuate(&((StgThunk *)p)->payload[0]);
912 break;
913
914 case CONSTR_2_0:
915 evacuate(&((StgClosure *)p)->payload[1]);
916 evacuate(&((StgClosure *)p)->payload[0]);
917 break;
918
919 case FUN_1_0:
920 case FUN_1_1:
921 scavenge_fun_srt(info);
922 evacuate(&((StgClosure *)p)->payload[0]);
923 break;
924
925 case THUNK_1_0:
926 case THUNK_1_1:
927 scavenge_thunk_srt(info);
928 evacuate(&((StgThunk *)p)->payload[0]);
929 break;
930
931 case CONSTR_1_0:
932 case CONSTR_1_1:
933 evacuate(&((StgClosure *)p)->payload[0]);
934 break;
935
936 case FUN_0_1:
937 case FUN_0_2:
938 scavenge_fun_srt(info);
939 break;
940
941 case THUNK_0_1:
942 case THUNK_0_2:
943 scavenge_thunk_srt(info);
944 break;
945
946 case CONSTR_0_1:
947 case CONSTR_0_2:
948 break;
949
950 case FUN:
951 scavenge_fun_srt(info);
952 goto gen_obj;
953
954 case THUNK:
955 {
956 StgPtr end;
957
958 scavenge_thunk_srt(info);
959 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
960 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
961 evacuate((StgClosure **)p);
962 }
963 break;
964 }
965
966 gen_obj:
967 case CONSTR:
968 case CONSTR_NOCAF:
969 case WEAK:
970 case PRIM:
971 {
972 StgPtr end;
973
974 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
975 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
976 evacuate((StgClosure **)p);
977 }
978 break;
979 }
980
981 case BCO: {
982 StgBCO *bco = (StgBCO *)p;
983 evacuate((StgClosure **)&bco->instrs);
984 evacuate((StgClosure **)&bco->literals);
985 evacuate((StgClosure **)&bco->ptrs);
986 break;
987 }
988
989 case IND:
990 case BLACKHOLE:
991 evacuate(&((StgInd *)p)->indirectee);
992 break;
993
994 case MUT_VAR_CLEAN:
995 case MUT_VAR_DIRTY: {
996 gct->eager_promotion = false;
997 evacuate(&((StgMutVar *)p)->var);
998 gct->eager_promotion = saved_eager_promotion;
999
1000 if (gct->failed_to_evac) {
1001 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
1002 } else {
1003 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
1004 }
1005 break;
1006 }
1007
1008 case BLOCKING_QUEUE:
1009 {
1010 StgBlockingQueue *bq = (StgBlockingQueue *)p;
1011
1012 gct->eager_promotion = false;
1013 evacuate(&bq->bh);
1014 evacuate((StgClosure**)&bq->owner);
1015 evacuate((StgClosure**)&bq->queue);
1016 evacuate((StgClosure**)&bq->link);
1017 gct->eager_promotion = saved_eager_promotion;
1018
1019 if (gct->failed_to_evac) {
1020 bq->header.info = &stg_BLOCKING_QUEUE_DIRTY_info;
1021 } else {
1022 bq->header.info = &stg_BLOCKING_QUEUE_CLEAN_info;
1023 }
1024 break;
1025 }
1026
1027 case ARR_WORDS:
1028 break;
1029
1030 case THUNK_SELECTOR:
1031 {
1032 StgSelector *s = (StgSelector *)p;
1033 evacuate(&s->selectee);
1034 break;
1035 }
1036
1037 // A chunk of stack saved in a heap object
1038 case AP_STACK:
1039 {
1040 StgAP_STACK *ap = (StgAP_STACK *)p;
1041
1042 evacuate(&ap->fun);
1043 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
1044 break;
1045 }
1046
1047 case PAP:
1048 scavenge_PAP((StgPAP *)p);
1049 break;
1050
1051 case AP:
1052 scavenge_AP((StgAP *)p);
1053 break;
1054
1055 case MUT_ARR_PTRS_CLEAN:
1056 case MUT_ARR_PTRS_DIRTY:
1057 // follow everything
1058 {
1059 // We don't eagerly promote objects pointed to by a mutable
1060 // array, but if we find the array only points to objects in
1061 // the same or an older generation, we mark it "clean" and
1062 // avoid traversing it during minor GCs.
1063 gct->eager_promotion = false;
1064
1065 scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
1066
1067 if (gct->failed_to_evac) {
1068 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
1069 } else {
1070 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
1071 }
1072
1073 gct->eager_promotion = saved_eager_promotion;
1074 gct->failed_to_evac = true; // mutable anyhow.
1075 break;
1076 }
1077
1078 case MUT_ARR_PTRS_FROZEN_CLEAN:
1079 case MUT_ARR_PTRS_FROZEN_DIRTY:
1080 // follow everything
1081 {
1082 StgPtr q = p;
1083
1084 scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
1085
1086 if (gct->failed_to_evac) {
1087 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_DIRTY_info;
1088 } else {
1089 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_CLEAN_info;
1090 }
1091 break;
1092 }
1093
1094 case SMALL_MUT_ARR_PTRS_CLEAN:
1095 case SMALL_MUT_ARR_PTRS_DIRTY:
1096 // follow everything
1097 {
1098 StgPtr next;
1099 bool saved_eager;
1100
1101 // We don't eagerly promote objects pointed to by a mutable
1102 // array, but if we find the array only points to objects in
1103 // the same or an older generation, we mark it "clean" and
1104 // avoid traversing it during minor GCs.
1105 saved_eager = gct->eager_promotion;
1106 gct->eager_promotion = false;
1107 next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
1108 for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
1109 evacuate((StgClosure **)p);
1110 }
1111 gct->eager_promotion = saved_eager;
1112
1113 if (gct->failed_to_evac) {
1114 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_DIRTY_info;
1115 } else {
1116 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_CLEAN_info;
1117 }
1118
1119 gct->failed_to_evac = true; // mutable anyhow.
1120 break;
1121 }
1122
1123 case SMALL_MUT_ARR_PTRS_FROZEN_CLEAN:
1124 case SMALL_MUT_ARR_PTRS_FROZEN_DIRTY:
1125 // follow everything
1126 {
1127 StgPtr next, q = p;
1128
1129 next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
1130 for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
1131 evacuate((StgClosure **)p);
1132 }
1133
1134 if (gct->failed_to_evac) {
1135 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN_DIRTY_info;
1136 } else {
1137 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN_CLEAN_info;
1138 }
1139 break;
1140 }
1141
1142 case TSO:
1143 {
1144 scavengeTSO((StgTSO*)p);
1145 break;
1146 }
1147
1148 case STACK:
1149 {
1150 StgStack *stack = (StgStack*)p;
1151
1152 gct->eager_promotion = false;
1153
1154 scavenge_stack(stack->sp, stack->stack + stack->stack_size);
1155 stack->dirty = gct->failed_to_evac;
1156
1157 gct->eager_promotion = saved_eager_promotion;
1158 break;
1159 }
1160
1161 case MUT_PRIM:
1162 {
1163 StgPtr end;
1164
1165 gct->eager_promotion = false;
1166
1167 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1168 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
1169 evacuate((StgClosure **)p);
1170 }
1171
1172 gct->eager_promotion = saved_eager_promotion;
1173 gct->failed_to_evac = true; // mutable
1174 break;
1175 }
1176
1177 case TREC_CHUNK:
1178 {
1179 StgWord i;
1180 StgTRecChunk *tc = ((StgTRecChunk *) p);
1181 TRecEntry *e = &(tc -> entries[0]);
1182 gct->eager_promotion = false;
1183 evacuate((StgClosure **)&tc->prev_chunk);
1184 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
1185 evacuate((StgClosure **)&e->tvar);
1186 evacuate((StgClosure **)&e->expected_value);
1187 evacuate((StgClosure **)&e->new_value);
1188 }
1189 gct->eager_promotion = saved_eager_promotion;
1190 gct->failed_to_evac = true; // mutable
1191 break;
1192 }
1193
1194 default:
1195 barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p",
1196 info->type, p);
1197 }
1198
1199 if (gct->failed_to_evac) {
1200 gct->failed_to_evac = false;
1201 if (gct->evac_gen_no) {
1202 recordMutableGen_GC((StgClosure *)q, gct->evac_gen_no);
1203 }
1204 }
1205 } // while (p = pop_mark_stack())
1206 }
1207
1208 /* -----------------------------------------------------------------------------
1209 Scavenge one object.
1210
1211 This is used for objects that are temporarily marked as mutable
1212 because they contain old-to-new generation pointers. Only certain
1213 objects can have this property.
1214 -------------------------------------------------------------------------- */
1215
1216 static bool
1217 scavenge_one(StgPtr p)
1218 {
1219 const StgInfoTable *info;
1220 bool no_luck;
1221 bool saved_eager_promotion;
1222
1223 saved_eager_promotion = gct->eager_promotion;
1224
1225 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1226 info = get_itbl((StgClosure *)p);
1227
1228 switch (info->type) {
1229
1230 case MVAR_CLEAN:
1231 case MVAR_DIRTY:
1232 {
1233 StgMVar *mvar = ((StgMVar *)p);
1234 gct->eager_promotion = false;
1235 evacuate((StgClosure **)&mvar->head);
1236 evacuate((StgClosure **)&mvar->tail);
1237 evacuate((StgClosure **)&mvar->value);
1238 gct->eager_promotion = saved_eager_promotion;
1239
1240 if (gct->failed_to_evac) {
1241 mvar->header.info = &stg_MVAR_DIRTY_info;
1242 } else {
1243 mvar->header.info = &stg_MVAR_CLEAN_info;
1244 }
1245 break;
1246 }
1247
1248 case TVAR:
1249 {
1250 StgTVar *tvar = ((StgTVar *)p);
1251 gct->eager_promotion = false;
1252 evacuate((StgClosure **)&tvar->current_value);
1253 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
1254 gct->eager_promotion = saved_eager_promotion;
1255
1256 if (gct->failed_to_evac) {
1257 tvar->header.info = &stg_TVAR_DIRTY_info;
1258 } else {
1259 tvar->header.info = &stg_TVAR_CLEAN_info;
1260 }
1261 break;
1262 }
1263
1264 case THUNK:
1265 case THUNK_1_0:
1266 case THUNK_0_1:
1267 case THUNK_1_1:
1268 case THUNK_0_2:
1269 case THUNK_2_0:
1270 {
1271 StgPtr q, end;
1272
1273 end = (StgPtr)((StgThunk *)p)->payload + info->layout.payload.ptrs;
1274 for (q = (StgPtr)((StgThunk *)p)->payload; q < end; q++) {
1275 evacuate((StgClosure **)q);
1276 }
1277 break;
1278 }
1279
1280 case FUN:
1281 case FUN_1_0: // hardly worth specialising these guys
1282 case FUN_0_1:
1283 case FUN_1_1:
1284 case FUN_0_2:
1285 case FUN_2_0:
1286 case CONSTR:
1287 case CONSTR_NOCAF:
1288 case CONSTR_1_0:
1289 case CONSTR_0_1:
1290 case CONSTR_1_1:
1291 case CONSTR_0_2:
1292 case CONSTR_2_0:
1293 case PRIM:
1294 {
1295 StgPtr q, end;
1296
1297 end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1298 for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) {
1299 evacuate((StgClosure **)q);
1300 }
1301 break;
1302 }
1303
1304 case WEAK:
1305 // This WEAK object will not be considered by tidyWeakList during this
1306 // collection because it is in a generation > N, but it is on the
1307 // mutable list so we must evacuate all of its pointers because some
1308 // of them may point into a younger generation.
1309 scavengeLiveWeak((StgWeak *)p);
1310 break;
1311
1312 case MUT_VAR_CLEAN:
1313 case MUT_VAR_DIRTY: {
1314 StgPtr q = p;
1315
1316 gct->eager_promotion = false;
1317 evacuate(&((StgMutVar *)p)->var);
1318 gct->eager_promotion = saved_eager_promotion;
1319
1320 if (gct->failed_to_evac) {
1321 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
1322 } else {
1323 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
1324 }
1325 break;
1326 }
1327
1328 case BLOCKING_QUEUE:
1329 {
1330 StgBlockingQueue *bq = (StgBlockingQueue *)p;
1331
1332 gct->eager_promotion = false;
1333 evacuate(&bq->bh);
1334 evacuate((StgClosure**)&bq->owner);
1335 evacuate((StgClosure**)&bq->queue);
1336 evacuate((StgClosure**)&bq->link);
1337 gct->eager_promotion = saved_eager_promotion;
1338
1339 if (gct->failed_to_evac) {
1340 bq->header.info = &stg_BLOCKING_QUEUE_DIRTY_info;
1341 } else {
1342 bq->header.info = &stg_BLOCKING_QUEUE_CLEAN_info;
1343 }
1344 break;
1345 }
1346
1347 case THUNK_SELECTOR:
1348 {
1349 StgSelector *s = (StgSelector *)p;
1350 evacuate(&s->selectee);
1351 break;
1352 }
1353
1354 case AP_STACK:
1355 {
1356 StgAP_STACK *ap = (StgAP_STACK *)p;
1357
1358 evacuate(&ap->fun);
1359 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
1360 p = (StgPtr)ap->payload + ap->size;
1361 break;
1362 }
1363
1364 case PAP:
1365 p = scavenge_PAP((StgPAP *)p);
1366 break;
1367
1368 case AP:
1369 p = scavenge_AP((StgAP *)p);
1370 break;
1371
1372 case ARR_WORDS:
1373 // nothing to follow
1374 break;
1375
1376 case MUT_ARR_PTRS_CLEAN:
1377 case MUT_ARR_PTRS_DIRTY:
1378 {
1379 // We don't eagerly promote objects pointed to by a mutable
1380 // array, but if we find the array only points to objects in
1381 // the same or an older generation, we mark it "clean" and
1382 // avoid traversing it during minor GCs.
1383 gct->eager_promotion = false;
1384
1385 scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
1386
1387 if (gct->failed_to_evac) {
1388 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
1389 } else {
1390 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
1391 }
1392
1393 gct->eager_promotion = saved_eager_promotion;
1394 gct->failed_to_evac = true;
1395 break;
1396 }
1397
1398 case MUT_ARR_PTRS_FROZEN_CLEAN:
1399 case MUT_ARR_PTRS_FROZEN_DIRTY:
1400 {
1401 // follow everything
1402 scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
1403
1404 if (gct->failed_to_evac) {
1405 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_FROZEN_DIRTY_info;
1406 } else {
1407 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_FROZEN_CLEAN_info;
1408 }
1409 break;
1410 }
1411
1412 case SMALL_MUT_ARR_PTRS_CLEAN:
1413 case SMALL_MUT_ARR_PTRS_DIRTY:
1414 {
1415 StgPtr next, q;
1416 bool saved_eager;
1417
1418 // We don't eagerly promote objects pointed to by a mutable
1419 // array, but if we find the array only points to objects in
1420 // the same or an older generation, we mark it "clean" and
1421 // avoid traversing it during minor GCs.
1422 saved_eager = gct->eager_promotion;
1423 gct->eager_promotion = false;
1424 q = p;
1425 next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
1426 for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
1427 evacuate((StgClosure **)p);
1428 }
1429 gct->eager_promotion = saved_eager;
1430
1431 if (gct->failed_to_evac) {
1432 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_DIRTY_info;
1433 } else {
1434 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_CLEAN_info;
1435 }
1436
1437 gct->failed_to_evac = true;
1438 break;
1439 }
1440
1441 case SMALL_MUT_ARR_PTRS_FROZEN_CLEAN:
1442 case SMALL_MUT_ARR_PTRS_FROZEN_DIRTY:
1443 {
1444 // follow everything
1445 StgPtr next, q=p;
1446
1447 next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
1448 for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
1449 evacuate((StgClosure **)p);
1450 }
1451
1452 if (gct->failed_to_evac) {
1453 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN_DIRTY_info;
1454 } else {
1455 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN_CLEAN_info;
1456 }
1457 break;
1458 }
1459
1460 case TSO:
1461 {
1462 scavengeTSO((StgTSO*)p);
1463 break;
1464 }
1465
1466 case STACK:
1467 {
1468 StgStack *stack = (StgStack*)p;
1469
1470 gct->eager_promotion = false;
1471
1472 scavenge_stack(stack->sp, stack->stack + stack->stack_size);
1473 stack->dirty = gct->failed_to_evac;
1474
1475 gct->eager_promotion = saved_eager_promotion;
1476 break;
1477 }
1478
1479 case MUT_PRIM:
1480 {
1481 StgPtr end;
1482
1483 gct->eager_promotion = false;
1484
1485 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1486 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
1487 evacuate((StgClosure **)p);
1488 }
1489
1490 gct->eager_promotion = saved_eager_promotion;
1491 gct->failed_to_evac = true; // mutable
1492 break;
1493
1494 }
1495
1496 case TREC_CHUNK:
1497 {
1498 StgWord i;
1499 StgTRecChunk *tc = ((StgTRecChunk *) p);
1500 TRecEntry *e = &(tc -> entries[0]);
1501 gct->eager_promotion = false;
1502 evacuate((StgClosure **)&tc->prev_chunk);
1503 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
1504 evacuate((StgClosure **)&e->tvar);
1505 evacuate((StgClosure **)&e->expected_value);
1506 evacuate((StgClosure **)&e->new_value);
1507 }
1508 gct->eager_promotion = saved_eager_promotion;
1509 gct->failed_to_evac = true; // mutable
1510 break;
1511 }
1512
1513 case IND:
1514 // IND can happen, for example, when the interpreter allocates
1515 // a gigantic AP closure (more than one block), which ends up
1516 // on the large-object list and then gets updated. See #3424.
1517 case BLACKHOLE:
1518 case IND_STATIC:
1519 evacuate(&((StgInd *)p)->indirectee);
1520
1521 #if 0 && defined(DEBUG)
1522 if (RtsFlags.DebugFlags.gc)
1523 /* Debugging code to print out the size of the thing we just
1524 * promoted
1525 */
1526 {
1527 StgPtr start = gen->scan;
1528 bdescr *start_bd = gen->scan_bd;
1529 StgWord size = 0;
1530 scavenge(&gen);
1531 if (start_bd != gen->scan_bd) {
1532 size += (P_)BLOCK_ROUND_UP(start) - start;
1533 start_bd = start_bd->link;
1534 while (start_bd != gen->scan_bd) {
1535 size += BLOCK_SIZE_W;
1536 start_bd = start_bd->link;
1537 }
1538 size += gen->scan -
1539 (P_)BLOCK_ROUND_DOWN(gen->scan);
1540 } else {
1541 size = gen->scan - start;
1542 }
1543 debugBelch("evac IND: %ld bytes", size * sizeof(W_));
1544 }
1545 #endif
1546 break;
1547
1548 case COMPACT_NFDATA:
1549 scavenge_compact((StgCompactNFData*)p);
1550 break;
1551
1552 default:
1553 barf("scavenge_one: strange object %d", (int)(info->type));
1554 }
1555
1556 no_luck = gct->failed_to_evac;
1557 gct->failed_to_evac = false;
1558 return (no_luck);
1559 }
1560
1561 /* -----------------------------------------------------------------------------
1562 Scavenging mutable lists.
1563
1564 We treat the mutable list of each generation > N (i.e. all the
1565 generations older than the one being collected) as roots. We also
1566 remove non-mutable objects from the mutable list at this point.
1567 -------------------------------------------------------------------------- */
1568
1569 static void
1570 scavenge_mutable_list(bdescr *bd, generation *gen)
1571 {
1572 StgPtr p, q;
1573 uint32_t gen_no;
1574
1575 gen_no = gen->no;
1576 gct->evac_gen_no = gen_no;
1577 for (; bd != NULL; bd = bd->link) {
1578 for (q = bd->start; q < bd->free; q++) {
1579 p = (StgPtr)*q;
1580 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1581
1582 #if defined(DEBUG)
1583 const StgInfoTable *pinfo;
1584 switch (get_itbl((StgClosure *)p)->type) {
1585 case MUT_VAR_CLEAN:
1586 // can happen due to concurrent writeMutVars
1587 case MUT_VAR_DIRTY:
1588 mutlist_MUTVARS++; break;
1589 case MUT_ARR_PTRS_CLEAN:
1590 case MUT_ARR_PTRS_DIRTY:
1591 case MUT_ARR_PTRS_FROZEN_CLEAN:
1592 case MUT_ARR_PTRS_FROZEN_DIRTY:
1593 mutlist_MUTARRS++; break;
1594 case MVAR_CLEAN:
1595 barf("MVAR_CLEAN on mutable list");
1596 case MVAR_DIRTY:
1597 mutlist_MVARS++; break;
1598 case TVAR:
1599 mutlist_TVAR++; break;
1600 case TREC_CHUNK:
1601 mutlist_TREC_CHUNK++; break;
1602 case MUT_PRIM:
1603 pinfo = ((StgClosure*)p)->header.info;
1604 if (pinfo == &stg_TVAR_WATCH_QUEUE_info)
1605 mutlist_TVAR_WATCH_QUEUE++;
1606 else if (pinfo == &stg_TREC_HEADER_info)
1607 mutlist_TREC_HEADER++;
1608 else
1609 mutlist_OTHERS++;
1610 break;
1611 default:
1612 mutlist_OTHERS++; break;
1613 }
1614 #endif
1615
1616 // Check whether this object is "clean", that is it
1617 // definitely doesn't point into a young generation.
1618 // Clean objects don't need to be scavenged. Some clean
1619 // objects (MUT_VAR_CLEAN) are not kept on the mutable
1620 // list at all; others, such as MUT_ARR_PTRS
1621 // are always on the mutable list.
1622 //
1623 switch (get_itbl((StgClosure *)p)->type) {
1624 case MUT_ARR_PTRS_CLEAN:
1625 case SMALL_MUT_ARR_PTRS_CLEAN:
1626 recordMutableGen_GC((StgClosure *)p,gen_no);
1627 continue;
1628 case MUT_ARR_PTRS_DIRTY:
1629 {
1630 bool saved_eager_promotion;
1631 saved_eager_promotion = gct->eager_promotion;
1632 gct->eager_promotion = false;
1633
1634 scavenge_mut_arr_ptrs_marked((StgMutArrPtrs *)p);
1635
1636 if (gct->failed_to_evac) {
1637 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
1638 } else {
1639 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
1640 }
1641
1642 gct->eager_promotion = saved_eager_promotion;
1643 gct->failed_to_evac = false;
1644 recordMutableGen_GC((StgClosure *)p,gen_no);
1645 continue;
1646 }
1647 default:
1648 ;
1649 }
1650
1651 if (scavenge_one(p)) {
1652 // didn't manage to promote everything, so put the
1653 // object back on the list.
1654 recordMutableGen_GC((StgClosure *)p,gen_no);
1655 }
1656 }
1657 }
1658 }
1659
1660 void
1661 scavenge_capability_mut_lists (Capability *cap)
1662 {
1663 uint32_t g;
1664
1665 /* Mutable lists from each generation > N
1666 * we want to *scavenge* these roots, not evacuate them: they're not
1667 * going to move in this GC.
1668 * Also do them in reverse generation order, for the usual reason:
1669 * namely to reduce the likelihood of spurious old->new pointers.
1670 */
1671 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
1672 scavenge_mutable_list(cap->saved_mut_lists[g], &generations[g]);
1673 freeChain_sync(cap->saved_mut_lists[g]);
1674 cap->saved_mut_lists[g] = NULL;
1675 }
1676 }
1677
1678 /* -----------------------------------------------------------------------------
1679 Scavenging the static objects.
1680
1681 We treat the mutable list of each generation > N (i.e. all the
1682 generations older than the one being collected) as roots. We also
1683 remove non-mutable objects from the mutable list at this point.
1684 -------------------------------------------------------------------------- */
1685
1686 static void
1687 scavenge_static(void)
1688 {
1689 StgClosure *flagged_p, *p;
1690 const StgInfoTable *info;
1691
1692 debugTrace(DEBUG_gc, "scavenging static objects");
1693
1694 /* Always evacuate straight to the oldest generation for static
1695 * objects */
1696 gct->evac_gen_no = oldest_gen->no;
1697
1698 /* keep going until we've scavenged all the objects on the linked
1699 list... */
1700
1701 while (1) {
1702
1703 /* get the next static object from the list. Remember, there might
1704 * be more stuff on this list after each evacuation...
1705 * (static_objects is a global)
1706 */
1707 flagged_p = gct->static_objects;
1708 if (flagged_p == END_OF_STATIC_OBJECT_LIST) {
1709 break;
1710 }
1711 p = UNTAG_STATIC_LIST_PTR(flagged_p);
1712
1713 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1714 info = get_itbl(p);
1715 // make sure the info pointer is into text space
1716
1717 /* Take this object *off* the static_objects list,
1718 * and put it on the scavenged_static_objects list.
1719 */
1720 gct->static_objects = *STATIC_LINK(info,p);
1721 *STATIC_LINK(info,p) = gct->scavenged_static_objects;
1722 gct->scavenged_static_objects = flagged_p;
1723
1724 switch (info -> type) {
1725
1726 case IND_STATIC:
1727 {
1728 StgInd *ind = (StgInd *)p;
1729 evacuate(&ind->indirectee);
1730
1731 /* might fail to evacuate it, in which case we have to pop it
1732 * back on the mutable list of the oldest generation. We
1733 * leave it *on* the scavenged_static_objects list, though,
1734 * in case we visit this object again.
1735 */
1736 if (gct->failed_to_evac) {
1737 gct->failed_to_evac = false;
1738 recordMutableGen_GC((StgClosure *)p,oldest_gen->no);
1739 }
1740 break;
1741 }
1742
1743 case THUNK_STATIC:
1744 scavenge_thunk_srt(info);
1745 break;
1746
1747 case FUN_STATIC:
1748 scavenge_fun_srt(info);
1749 FALLTHROUGH;
1750
1751 // a FUN_STATIC can also be an SRT, so it may have pointer
1752 // fields. See Note [SRTs] in CmmBuildInfoTables, specifically
1753 // the [FUN] optimisation.
1754
1755 case CONSTR:
1756 case CONSTR_NOCAF:
1757 case CONSTR_1_0:
1758 case CONSTR_0_1:
1759 case CONSTR_2_0:
1760 case CONSTR_1_1:
1761 case CONSTR_0_2:
1762 {
1763 StgPtr q, next;
1764
1765 next = (P_)p->payload + info->layout.payload.ptrs;
1766 // evacuate the pointers
1767 for (q = (P_)p->payload; q < next; q++) {
1768 evacuate((StgClosure **)q);
1769 }
1770 break;
1771 }
1772
1773 default:
1774 barf("scavenge_static: strange closure %d", (int)(info->type));
1775 }
1776
1777 ASSERT(gct->failed_to_evac == false);
1778 }
1779 }
1780
1781 /* -----------------------------------------------------------------------------
1782 scavenge a chunk of memory described by a bitmap
1783 -------------------------------------------------------------------------- */
1784
1785 static void
1786 scavenge_large_bitmap( StgPtr p, StgLargeBitmap *large_bitmap, StgWord size )
1787 {
1788 walk_large_bitmap(do_evacuate, (StgClosure **) p, large_bitmap, size, NULL);
1789 }
1790
1791
1792 /* -----------------------------------------------------------------------------
1793 scavenge_stack walks over a section of stack and evacuates all the
1794 objects pointed to by it. We can use the same code for walking
1795 AP_STACK_UPDs, since these are just sections of copied stack.
1796 -------------------------------------------------------------------------- */
1797
1798 static void
1799 scavenge_stack(StgPtr p, StgPtr stack_end)
1800 {
1801 const StgRetInfoTable* info;
1802 StgWord bitmap;
1803 StgWord size;
1804
1805 /*
1806 * Each time around this loop, we are looking at a chunk of stack
1807 * that starts with an activation record.
1808 */
1809
1810 while (p < stack_end) {
1811 info = get_ret_itbl((StgClosure *)p);
1812
1813 switch (info->i.type) {
1814
1815 case UPDATE_FRAME:
1816 // Note [upd-black-hole]
1817 //
1818 // In SMP, we can get update frames that point to indirections
1819 // when two threads evaluate the same thunk. We do attempt to
1820 // discover this situation in threadPaused(), but it's
1821 // possible that the following sequence occurs:
1822 //
1823 // A B
1824 // enter T
1825 // enter T
1826 // blackhole T
1827 // update T
1828 // GC
1829 //
1830 // Now T is an indirection, and the update frame is already
1831 // marked on A's stack, so we won't traverse it again in
1832 // threadPaused(). We could traverse the whole stack again
1833 // before GC, but that would be too expensive.
1834 //
1835 // Scavenging this update frame as normal would be disastrous;
1836 // the indirection will be shorted out, and the updatee would
1837 // end up pointing to the value. The update code will then
1838 // overwrite the value, instead of the BLACKHOLE it is
1839 // expecting to write to.
1840 //
1841 // One way we could try to fix this is to detect when the
1842 // BLACKHOLE has been updated by another thread, and then
1843 // replace this update frame with a special frame that just
1844 // enters the value. But this introduces some other
1845 // complexities:
1846 //
1847 // - we must be careful to call checkBlockingQueues() in this
1848 // special frame, because we might otherwise miss wakeups
1849 // for threads that blocked on the original BLACKHOLE,
1850 // - we must spot this frame when we're stripping the stack in
1851 // raiseAsync() and raiseExceptionHelper(), and arrange to call
1852 // checkBlockingQueues() there too.
1853 //
1854 // This is hard to get right, indeed we previously got it
1855 // wrong (see #13751). So we now take a different approach:
1856 // always copy the BLACKHOLE, even if it is actually an
1857 // indirection. This way we keep the update frame, we're
1858 // guaranteed to still perform the update, and check for
1859 // missed wakeups even when stripping the stack in
1860 // raiseAsync() and raiseExceptionHelper(). This is also a
1861 // little more efficient, because evacuating a known BLACKHOLE
1862 // is faster than evacuating an unknown closure.
1863 //
1864 // NOTE: for the reasons above, blackholing (either lazy or
1865 // eager) is NOT optional. See also Note [avoiding
1866 // threadPaused] in Interpreter.c.
1867 //
1868 // There are a couple of alternative solutions:
1869 // - if we see an update frame that points to an indirection,
1870 // arrange to call checkBlockingQueues() on that thread
1871 // after GC.
1872 // - spot a BLOCKING_QUEUE that points to a value and
1873 // arrange to wake it up after the GC.
1874 //
1875 // These are more difficult to implement, requiring an extra
1876 // list to be maintained during GC. They also rely on more
1877 // subtle invariants than the solution implemented here.
1878 //
1879
1880 {
1881 StgUpdateFrame *frame = (StgUpdateFrame *)p;
1882
1883 evacuate_BLACKHOLE(&frame->updatee);
1884 p += sizeofW(StgUpdateFrame);
1885 continue;
1886 }
1887
1888 // small bitmap (< 32 entries, or 64 on a 64-bit machine)
1889 case CATCH_STM_FRAME:
1890 case CATCH_RETRY_FRAME:
1891 case ATOMICALLY_FRAME:
1892 case UNDERFLOW_FRAME:
1893 case STOP_FRAME:
1894 case CATCH_FRAME:
1895 case RET_SMALL:
1896 bitmap = BITMAP_BITS(info->i.layout.bitmap);
1897 size = BITMAP_SIZE(info->i.layout.bitmap);
1898 // NOTE: the payload starts immediately after the info-ptr, we
1899 // don't have an StgHeader in the same sense as a heap closure.
1900 p++;
1901 p = scavenge_small_bitmap(p, size, bitmap);
1902
1903 follow_srt:
1904 if (major_gc && info->i.srt) {
1905 StgClosure *srt = (StgClosure*)GET_SRT(info);
1906 evacuate(&srt);
1907 }
1908 continue;
1909
1910 case RET_BCO: {
1911 StgBCO *bco;
1912 StgWord size;
1913
1914 p++;
1915 evacuate((StgClosure **)p);
1916 bco = (StgBCO *)*p;
1917 p++;
1918 size = BCO_BITMAP_SIZE(bco);
1919 scavenge_large_bitmap(p, BCO_BITMAP(bco), size);
1920 p += size;
1921 continue;
1922 }
1923
1924 // large bitmap (> 32 entries, or > 64 on a 64-bit machine)
1925 case RET_BIG:
1926 {
1927 StgWord size;
1928
1929 size = GET_LARGE_BITMAP(&info->i)->size;
1930 p++;
1931 scavenge_large_bitmap(p, GET_LARGE_BITMAP(&info->i), size);
1932 p += size;
1933 // and don't forget to follow the SRT
1934 goto follow_srt;
1935 }
1936
1937 case RET_FUN:
1938 {
1939 StgRetFun *ret_fun = (StgRetFun *)p;
1940 const StgFunInfoTable *fun_info;
1941
1942 evacuate(&ret_fun->fun);
1943 fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun));
1944 p = scavenge_arg_block(fun_info, ret_fun->payload);
1945 goto follow_srt;
1946 }
1947
1948 default:
1949 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->i.type));
1950 }
1951 }
1952 }
1953
1954 /*-----------------------------------------------------------------------------
1955 scavenge the large object list.
1956
1957 evac_gen set by caller; similar games played with evac_gen as with
1958 scavenge() - see comment at the top of scavenge(). Most large
1959 objects are (repeatedly) mutable, so most of the time evac_gen will
1960 be zero.
1961 --------------------------------------------------------------------------- */
1962
1963 static void
1964 scavenge_large (gen_workspace *ws)
1965 {
1966 bdescr *bd;
1967 StgPtr p;
1968
1969 gct->evac_gen_no = ws->gen->no;
1970
1971 bd = ws->todo_large_objects;
1972
1973 for (; bd != NULL; bd = ws->todo_large_objects) {
1974
1975 // take this object *off* the large objects list and put it on
1976 // the scavenged large objects list. This is so that we can
1977 // treat todo_large_objects as a stack and push new objects on
1978 // the front when evacuating.
1979 ws->todo_large_objects = bd->link;
1980
1981 ACQUIRE_SPIN_LOCK(&ws->gen->sync);
1982 if (bd->flags & BF_COMPACT) {
1983 dbl_link_onto(bd, &ws->gen->live_compact_objects);
1984 StgCompactNFData *str = ((StgCompactNFDataBlock*)bd->start)->owner;
1985 ws->gen->n_live_compact_blocks += str->totalW / BLOCK_SIZE_W;
1986 p = (StgPtr)str;
1987 } else {
1988 dbl_link_onto(bd, &ws->gen->scavenged_large_objects);
1989 ws->gen->n_scavenged_large_blocks += bd->blocks;
1990 p = bd->start;
1991 }
1992 RELEASE_SPIN_LOCK(&ws->gen->sync);
1993
1994 if (scavenge_one(p)) {
1995 if (ws->gen->no > 0) {
1996 recordMutableGen_GC((StgClosure *)p, ws->gen->no);
1997 }
1998 }
1999
2000 // stats
2001 gct->scanned += closure_sizeW((StgClosure*)p);
2002 }
2003 }
2004
2005 /* ----------------------------------------------------------------------------
2006 Look for work to do.
2007
2008 We look for the oldest gen that has either a todo block that can
2009 be scanned, or a block of work on the global queue that we can
2010 scan.
2011
2012 It is important to take work from the *oldest* generation that we
2013 has work available, because that minimizes the likelihood of
2014 evacuating objects into a young generation when they should have
2015 been eagerly promoted. This really does make a difference (the
2016 cacheprof benchmark is one that is affected).
2017
2018 We also want to scan the todo block if possible before grabbing
2019 work from the global queue, the reason being that we don't want to
2020 steal work from the global queue and starve other threads if there
2021 is other work we can usefully be doing.
2022 ------------------------------------------------------------------------- */
2023
2024 static bool
2025 scavenge_find_work (void)
2026 {
2027 int g;
2028 gen_workspace *ws;
2029 bool did_something, did_anything;
2030 bdescr *bd;
2031
2032 gct->scav_find_work++;
2033
2034 did_anything = false;
2035
2036 loop:
2037 did_something = false;
2038 for (g = RtsFlags.GcFlags.generations-1; g >= 0; g--) {
2039 ws = &gct->gens[g];
2040
2041 gct->scan_bd = NULL;
2042
2043 // If we have a scan block with some work to do,
2044 // scavenge everything up to the free pointer.
2045 if (ws->todo_bd->u.scan < ws->todo_free)
2046 {
2047 scavenge_block(ws->todo_bd);
2048 did_something = true;
2049 break;
2050 }
2051
2052 // If we have any large objects to scavenge, do them now.
2053 if (ws->todo_large_objects) {
2054 scavenge_large(ws);
2055 did_something = true;
2056 break;
2057 }
2058
2059 if ((bd = grab_local_todo_block(ws)) != NULL) {
2060 scavenge_block(bd);
2061 did_something = true;
2062 break;
2063 }
2064 }
2065
2066 if (did_something) {
2067 did_anything = true;
2068 goto loop;
2069 }
2070
2071 #if defined(THREADED_RTS)
2072 if (work_stealing) {
2073 // look for work to steal
2074 for (g = RtsFlags.GcFlags.generations-1; g >= 0; g--) {
2075 if ((bd = steal_todo_block(g)) != NULL) {
2076 scavenge_block(bd);
2077 did_something = true;
2078 break;
2079 }
2080 }
2081
2082 if (did_something) {
2083 did_anything = true;
2084 goto loop;
2085 }
2086 }
2087 #endif
2088
2089 // only return when there is no more work to do
2090
2091 return did_anything;
2092 }
2093
2094 /* ----------------------------------------------------------------------------
2095 Scavenge until we can't find anything more to scavenge.
2096 ------------------------------------------------------------------------- */
2097
2098 void
2099 scavenge_loop(void)
2100 {
2101 bool work_to_do;
2102
2103 loop:
2104 work_to_do = false;
2105
2106 // scavenge static objects
2107 if (major_gc && gct->static_objects != END_OF_STATIC_OBJECT_LIST) {
2108 IF_DEBUG(sanity, checkStaticObjects(gct->static_objects));
2109 scavenge_static();
2110 }
2111
2112 // scavenge objects in compacted generation
2113 if (mark_stack_bd != NULL && !mark_stack_empty()) {
2114 scavenge_mark_stack();
2115 work_to_do = true;
2116 }
2117
2118 // Order is important here: we want to deal in full blocks as
2119 // much as possible, so go for global work in preference to
2120 // local work. Only if all the global work has been exhausted
2121 // do we start scavenging the fragments of blocks in the local
2122 // workspaces.
2123 if (scavenge_find_work()) goto loop;
2124
2125 if (work_to_do) goto loop;
2126 }