Revert "rts: Drop redundant flags for libffi"
[ghc.git] / rts / sm / Scav.c
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team 1998-2008
4 *
5 * Generational garbage collector: scavenging functions
6 *
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
9 *
10 * https://gitlab.haskell.org/ghc/ghc/wikis/commentary/rts/storage/gc
11 *
12 * ---------------------------------------------------------------------------*/
13
14 /* ----------------------------------------------------------------------------
15 We have two main scavenge functions:
16
17 - scavenge_block(bdescr *bd)
18 - scavenge_one(StgPtr p)
19
20 As the names and parameters suggest, first one scavenges a whole block while
21 the second one only scavenges one object. This however is not the only
22 difference. scavenge_block scavenges all SRTs while scavenge_one only
23 scavenges SRTs of stacks. The reason is because scavenge_one is called in two
24 cases:
25
26 - When scavenging a mut_list
27 - When scavenging a large object
28
29 We don't have to scavenge SRTs when scavenging a mut_list, because we only
30 scavenge mut_lists in minor GCs, and static objects are only collected in
31 major GCs.
32
33 However, because scavenge_one is also used to scavenge large objects (which
34 are scavenged even in major GCs), we need to deal with SRTs of large
35 objects. We never allocate large FUNs and THUNKs, but we allocate large
36 STACKs (e.g. in threadStackOverflow), and stack frames can have SRTs. So
37 scavenge_one skips FUN and THUNK SRTs but scavenges stack frame SRTs.
38
39 In summary, in a major GC:
40
41 - scavenge_block() scavenges all SRTs
42 - scavenge_one() scavenges only stack frame SRTs
43 ------------------------------------------------------------------------- */
44
45 #include "PosixSource.h"
46 #include "Rts.h"
47
48 #include "Storage.h"
49 #include "GC.h"
50 #include "GCThread.h"
51 #include "GCUtils.h"
52 #include "Compact.h"
53 #include "MarkStack.h"
54 #include "Evac.h"
55 #include "Scav.h"
56 #include "Apply.h"
57 #include "Trace.h"
58 #include "Sanity.h"
59 #include "Capability.h"
60 #include "LdvProfile.h"
61 #include "HeapUtils.h"
62 #include "Hash.h"
63
64 #include "sm/MarkWeak.h"
65 #include "sm/NonMoving.h" // for nonmoving_set_closure_mark_bit
66 #include "sm/NonMovingScav.h"
67
68 static void scavenge_large_bitmap (StgPtr p,
69 StgLargeBitmap *large_bitmap,
70 StgWord size );
71
72 #if defined(THREADED_RTS) && !defined(PARALLEL_GC)
73 # define evacuate(a) evacuate1(a)
74 # define evacuate_BLACKHOLE(a) evacuate_BLACKHOLE1(a)
75 # define scavenge_loop(a) scavenge_loop1(a)
76 # define scavenge_block(a) scavenge_block1(a)
77 # define scavenge_mutable_list(bd,g) scavenge_mutable_list1(bd,g)
78 # define scavenge_capability_mut_lists(cap) scavenge_capability_mut_Lists1(cap)
79 # define scavengeTSO(tso) scavengeTSO1(tso)
80 # define scavenge_stack(p, stack_end) scavenge_stack1(p, stack_end)
81 # define scavenge_fun_srt(info) scavenge_fun_srt1(info)
82 # define scavenge_fun_srt(info) scavenge_fun_srt1(info)
83 # define scavenge_thunk_srt(info) scavenge_thunk_srt1(info)
84 # define scavenge_mut_arr_ptrs(info) scavenge_mut_arr_ptrs1(info)
85 # define scavenge_PAP(pap) scavenge_PAP1(pap)
86 # define scavenge_AP(ap) scavenge_AP1(ap)
87 # define scavenge_compact(str) scavenge_compact1(str)
88 #endif
89
90 static void do_evacuate(StgClosure **p, void *user STG_UNUSED)
91 {
92 evacuate(p);
93 }
94
95 /* -----------------------------------------------------------------------------
96 Scavenge a TSO.
97 -------------------------------------------------------------------------- */
98
99 void
100 scavengeTSO (StgTSO *tso)
101 {
102 bool saved_eager;
103
104 debugTrace(DEBUG_gc,"scavenging thread %d",(int)tso->id);
105
106 // update the pointer from the InCall.
107 if (tso->bound != NULL) {
108 // NB. We can't just set tso->bound->tso = tso, because this
109 // might be an invalid copy the TSO resulting from multiple
110 // threads evacuating the TSO simultaneously (see
111 // Evac.c:copy_tag()). Calling evacuate() on this pointer
112 // will ensure that we update it to point to the correct copy.
113 evacuate((StgClosure **)&tso->bound->tso);
114 }
115
116 saved_eager = gct->eager_promotion;
117 gct->eager_promotion = false;
118
119 evacuate((StgClosure **)&tso->blocked_exceptions);
120 evacuate((StgClosure **)&tso->bq);
121
122 // scavange current transaction record
123 evacuate((StgClosure **)&tso->trec);
124
125 evacuate((StgClosure **)&tso->stackobj);
126
127 evacuate((StgClosure **)&tso->_link);
128 if ( tso->why_blocked == BlockedOnMVar
129 || tso->why_blocked == BlockedOnMVarRead
130 || tso->why_blocked == BlockedOnBlackHole
131 || tso->why_blocked == BlockedOnMsgThrowTo
132 || tso->why_blocked == NotBlocked
133 ) {
134 evacuate(&tso->block_info.closure);
135 }
136 #if defined(THREADED_RTS)
137 // in the THREADED_RTS, block_info.closure must always point to a
138 // valid closure, because we assume this in throwTo(). In the
139 // non-threaded RTS it might be a FD (for
140 // BlockedOnRead/BlockedOnWrite) or a time value (BlockedOnDelay)
141 else {
142 tso->block_info.closure = (StgClosure *)END_TSO_QUEUE;
143 }
144 #endif
145
146 tso->dirty = gct->failed_to_evac;
147
148 gct->eager_promotion = saved_eager;
149 }
150
151 /* ----------------------------------------------------------------------------
152 Scavenging compact objects
153 ------------------------------------------------------------------------- */
154
155 typedef struct {
156 // We must save gct when calling mapHashTable(), which is compiled
157 // without GCThread.h and so uses a different calling convention.
158 // See also GC.c:mark_root where we do a similar thing.
159 gc_thread *saved_gct;
160 HashTable *newHash;
161 } MapHashData;
162
163 static void
164 evacuate_hash_entry(MapHashData *dat, StgWord key, const void *value)
165 {
166 StgClosure *p = (StgClosure*)key;
167 #if defined(THREADED_RTS)
168 gc_thread *old_gct = gct;
169 #endif
170
171 SET_GCT(dat->saved_gct);
172 evacuate(&p);
173 insertHashTable(dat->newHash, (StgWord)p, value);
174 SET_GCT(old_gct);
175 }
176
177 /* Here we scavenge the sharing-preservation hash-table, which may contain keys
178 * living in from-space.
179 */
180 void
181 scavenge_compact(StgCompactNFData *str)
182 {
183 bool saved_eager;
184 saved_eager = gct->eager_promotion;
185 gct->eager_promotion = false;
186
187 if (str->hash) {
188 MapHashData dat;
189 dat.saved_gct = gct;
190 HashTable *newHash = allocHashTable();
191 dat.newHash = newHash;
192 mapHashTable(str->hash, (void*)&dat, (MapHashFn)evacuate_hash_entry);
193 freeHashTable(str->hash, NULL);
194 str->hash = newHash;
195 }
196
197 debugTrace(DEBUG_compact,
198 "compact alive @%p, gen %d, %" FMT_Word " bytes",
199 str, Bdescr((P_)str)->gen_no, str->totalW * sizeof(W_))
200
201 gct->eager_promotion = saved_eager;
202 if (gct->failed_to_evac) {
203 ((StgClosure *)str)->header.info = &stg_COMPACT_NFDATA_DIRTY_info;
204 } else {
205 ((StgClosure *)str)->header.info = &stg_COMPACT_NFDATA_CLEAN_info;
206 }
207 }
208
209 /* -----------------------------------------------------------------------------
210 Mutable arrays of pointers
211 -------------------------------------------------------------------------- */
212
213 StgPtr scavenge_mut_arr_ptrs (StgMutArrPtrs *a)
214 {
215 W_ m;
216 bool any_failed;
217 StgPtr p, q;
218
219 any_failed = false;
220 p = (StgPtr)&a->payload[0];
221 for (m = 0; (int)m < (int)mutArrPtrsCards(a->ptrs) - 1; m++)
222 {
223 q = p + (1 << MUT_ARR_PTRS_CARD_BITS);
224 for (; p < q; p++) {
225 evacuate((StgClosure**)p);
226 }
227 if (gct->failed_to_evac) {
228 any_failed = true;
229 *mutArrPtrsCard(a,m) = 1;
230 gct->failed_to_evac = false;
231 } else {
232 *mutArrPtrsCard(a,m) = 0;
233 }
234 }
235
236 q = (StgPtr)&a->payload[a->ptrs];
237 if (p < q) {
238 for (; p < q; p++) {
239 evacuate((StgClosure**)p);
240 }
241 if (gct->failed_to_evac) {
242 any_failed = true;
243 *mutArrPtrsCard(a,m) = 1;
244 gct->failed_to_evac = false;
245 } else {
246 *mutArrPtrsCard(a,m) = 0;
247 }
248 }
249
250 gct->failed_to_evac = any_failed;
251 return (StgPtr)a + mut_arr_ptrs_sizeW(a);
252 }
253
254 // scavenge only the marked areas of a MUT_ARR_PTRS
255 static StgPtr scavenge_mut_arr_ptrs_marked (StgMutArrPtrs *a)
256 {
257 W_ m;
258 StgPtr p, q;
259 bool any_failed;
260
261 any_failed = false;
262 for (m = 0; m < mutArrPtrsCards(a->ptrs); m++)
263 {
264 if (*mutArrPtrsCard(a,m) != 0) {
265 p = (StgPtr)&a->payload[m << MUT_ARR_PTRS_CARD_BITS];
266 q = stg_min(p + (1 << MUT_ARR_PTRS_CARD_BITS),
267 (StgPtr)&a->payload[a->ptrs]);
268 for (; p < q; p++) {
269 evacuate((StgClosure**)p);
270 }
271 if (gct->failed_to_evac) {
272 any_failed = true;
273 gct->failed_to_evac = false;
274 } else {
275 *mutArrPtrsCard(a,m) = 0;
276 }
277 }
278 }
279
280 gct->failed_to_evac = any_failed;
281 return (StgPtr)a + mut_arr_ptrs_sizeW(a);
282 }
283
284 STATIC_INLINE StgPtr
285 scavenge_small_bitmap (StgPtr p, StgWord size, StgWord bitmap)
286 {
287 while (size > 0) {
288 if ((bitmap & 1) == 0) {
289 evacuate((StgClosure **)p);
290 }
291 p++;
292 bitmap = bitmap >> 1;
293 size--;
294 }
295 return p;
296 }
297
298 /* -----------------------------------------------------------------------------
299 Blocks of function args occur on the stack (at the top) and
300 in PAPs.
301 -------------------------------------------------------------------------- */
302
303 STATIC_INLINE StgPtr
304 scavenge_arg_block (const StgFunInfoTable *fun_info, StgClosure **args)
305 {
306 StgPtr p;
307 StgWord bitmap;
308 StgWord size;
309
310 p = (StgPtr)args;
311 switch (fun_info->f.fun_type) {
312 case ARG_GEN:
313 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
314 size = BITMAP_SIZE(fun_info->f.b.bitmap);
315 goto small_bitmap;
316 case ARG_GEN_BIG:
317 size = GET_FUN_LARGE_BITMAP(fun_info)->size;
318 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
319 p += size;
320 break;
321 default:
322 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
323 size = BITMAP_SIZE(stg_arg_bitmaps[fun_info->f.fun_type]);
324 small_bitmap:
325 p = scavenge_small_bitmap(p, size, bitmap);
326 break;
327 }
328 return p;
329 }
330
331 STATIC_INLINE GNUC_ATTR_HOT StgPtr
332 scavenge_PAP_payload (StgClosure *fun, StgClosure **payload, StgWord size)
333 {
334 StgPtr p;
335 StgWord bitmap;
336 const StgFunInfoTable *fun_info;
337
338 fun_info = get_fun_itbl(UNTAG_CONST_CLOSURE(fun));
339 ASSERT(fun_info->i.type != PAP);
340 p = (StgPtr)payload;
341
342 switch (fun_info->f.fun_type) {
343 case ARG_GEN:
344 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
345 goto small_bitmap;
346 case ARG_GEN_BIG:
347 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
348 p += size;
349 break;
350 case ARG_BCO:
351 scavenge_large_bitmap((StgPtr)payload, BCO_BITMAP(fun), size);
352 p += size;
353 break;
354 default:
355 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
356 small_bitmap:
357 p = scavenge_small_bitmap(p, size, bitmap);
358 break;
359 }
360 return p;
361 }
362
363 GNUC_ATTR_HOT StgPtr
364 scavenge_PAP (StgPAP *pap)
365 {
366 evacuate(&pap->fun);
367 return scavenge_PAP_payload (pap->fun, pap->payload, pap->n_args);
368 }
369
370 StgPtr
371 scavenge_AP (StgAP *ap)
372 {
373 evacuate(&ap->fun);
374 return scavenge_PAP_payload (ap->fun, ap->payload, ap->n_args);
375 }
376
377 /* -----------------------------------------------------------------------------
378 Scavenge SRTs
379 -------------------------------------------------------------------------- */
380
381 GNUC_ATTR_HOT void
382 scavenge_thunk_srt(const StgInfoTable *info)
383 {
384 StgThunkInfoTable *thunk_info;
385
386 if (!major_gc) return;
387
388 thunk_info = itbl_to_thunk_itbl(info);
389 if (thunk_info->i.srt) {
390 StgClosure *srt = (StgClosure*)GET_SRT(thunk_info);
391 evacuate(&srt);
392 }
393 }
394
395 GNUC_ATTR_HOT void
396 scavenge_fun_srt(const StgInfoTable *info)
397 {
398 StgFunInfoTable *fun_info;
399
400 if (!major_gc) return;
401
402 fun_info = itbl_to_fun_itbl(info);
403 if (fun_info->i.srt) {
404 StgClosure *srt = (StgClosure*)GET_FUN_SRT(fun_info);
405 evacuate(&srt);
406 }
407 }
408
409 /* -----------------------------------------------------------------------------
410 Scavenge a block from the given scan pointer up to bd->free.
411
412 evac_gen_no is set by the caller to be either zero (for a step in a
413 generation < N) or G where G is the generation of the step being
414 scavenged.
415
416 We sometimes temporarily change evac_gen_no back to zero if we're
417 scavenging a mutable object where eager promotion isn't such a good
418 idea.
419 -------------------------------------------------------------------------- */
420
421 static GNUC_ATTR_HOT void
422 scavenge_block (bdescr *bd)
423 {
424 StgPtr p, q;
425 const StgInfoTable *info;
426 bool saved_eager_promotion;
427 gen_workspace *ws;
428
429 debugTrace(DEBUG_gc, "scavenging block %p (gen %d) @ %p",
430 bd->start, bd->gen_no, bd->u.scan);
431
432 gct->scan_bd = bd;
433 gct->evac_gen_no = bd->gen_no;
434 saved_eager_promotion = gct->eager_promotion;
435 gct->failed_to_evac = false;
436
437 ws = &gct->gens[bd->gen->no];
438
439 p = bd->u.scan;
440
441 // we might be evacuating into the very object that we're
442 // scavenging, so we have to check the real bd->free pointer each
443 // time around the loop.
444 while (p < bd->free || (bd == ws->todo_bd && p < ws->todo_free)) {
445
446 ASSERT(bd->link == NULL);
447 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
448 info = get_itbl((StgClosure *)p);
449
450 ASSERT(gct->thunk_selector_depth == 0);
451
452 q = p;
453 switch (info->type) {
454
455 case MVAR_CLEAN:
456 case MVAR_DIRTY:
457 {
458 StgMVar *mvar = ((StgMVar *)p);
459 gct->eager_promotion = false;
460 evacuate((StgClosure **)&mvar->head);
461 evacuate((StgClosure **)&mvar->tail);
462 evacuate((StgClosure **)&mvar->value);
463 gct->eager_promotion = saved_eager_promotion;
464
465 if (gct->failed_to_evac) {
466 mvar->header.info = &stg_MVAR_DIRTY_info;
467 } else {
468 mvar->header.info = &stg_MVAR_CLEAN_info;
469 }
470 p += sizeofW(StgMVar);
471 break;
472 }
473
474 case TVAR:
475 {
476 StgTVar *tvar = ((StgTVar *)p);
477 gct->eager_promotion = false;
478 evacuate((StgClosure **)&tvar->current_value);
479 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
480 gct->eager_promotion = saved_eager_promotion;
481
482 if (gct->failed_to_evac) {
483 tvar->header.info = &stg_TVAR_DIRTY_info;
484 } else {
485 tvar->header.info = &stg_TVAR_CLEAN_info;
486 }
487 p += sizeofW(StgTVar);
488 break;
489 }
490
491 case FUN_2_0:
492 scavenge_fun_srt(info);
493 evacuate(&((StgClosure *)p)->payload[1]);
494 evacuate(&((StgClosure *)p)->payload[0]);
495 p += sizeofW(StgHeader) + 2;
496 break;
497
498 case THUNK_2_0:
499 scavenge_thunk_srt(info);
500 evacuate(&((StgThunk *)p)->payload[1]);
501 evacuate(&((StgThunk *)p)->payload[0]);
502 p += sizeofW(StgThunk) + 2;
503 break;
504
505 case CONSTR_2_0:
506 evacuate(&((StgClosure *)p)->payload[1]);
507 evacuate(&((StgClosure *)p)->payload[0]);
508 p += sizeofW(StgHeader) + 2;
509 break;
510
511 case THUNK_1_0:
512 scavenge_thunk_srt(info);
513 evacuate(&((StgThunk *)p)->payload[0]);
514 p += sizeofW(StgThunk) + 1;
515 break;
516
517 case FUN_1_0:
518 scavenge_fun_srt(info);
519 FALLTHROUGH;
520 case CONSTR_1_0:
521 evacuate(&((StgClosure *)p)->payload[0]);
522 p += sizeofW(StgHeader) + 1;
523 break;
524
525 case THUNK_0_1:
526 scavenge_thunk_srt(info);
527 p += sizeofW(StgThunk) + 1;
528 break;
529
530 case FUN_0_1:
531 scavenge_fun_srt(info);
532 FALLTHROUGH;
533 case CONSTR_0_1:
534 p += sizeofW(StgHeader) + 1;
535 break;
536
537 case THUNK_0_2:
538 scavenge_thunk_srt(info);
539 p += sizeofW(StgThunk) + 2;
540 break;
541
542 case FUN_0_2:
543 scavenge_fun_srt(info);
544 FALLTHROUGH;
545 case CONSTR_0_2:
546 p += sizeofW(StgHeader) + 2;
547 break;
548
549 case THUNK_1_1:
550 scavenge_thunk_srt(info);
551 evacuate(&((StgThunk *)p)->payload[0]);
552 p += sizeofW(StgThunk) + 2;
553 break;
554
555 case FUN_1_1:
556 scavenge_fun_srt(info);
557 FALLTHROUGH;
558 case CONSTR_1_1:
559 evacuate(&((StgClosure *)p)->payload[0]);
560 p += sizeofW(StgHeader) + 2;
561 break;
562
563 case FUN:
564 scavenge_fun_srt(info);
565 goto gen_obj;
566
567 case THUNK:
568 {
569 StgPtr end;
570
571 scavenge_thunk_srt(info);
572 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
573 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
574 evacuate((StgClosure **)p);
575 }
576 p += info->layout.payload.nptrs;
577 break;
578 }
579
580 gen_obj:
581 case CONSTR:
582 case CONSTR_NOCAF:
583 case WEAK:
584 case PRIM:
585 {
586 StgPtr end;
587
588 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
589 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
590 evacuate((StgClosure **)p);
591 }
592 p += info->layout.payload.nptrs;
593 break;
594 }
595
596 case BCO: {
597 StgBCO *bco = (StgBCO *)p;
598 evacuate((StgClosure **)&bco->instrs);
599 evacuate((StgClosure **)&bco->literals);
600 evacuate((StgClosure **)&bco->ptrs);
601 p += bco_sizeW(bco);
602 break;
603 }
604
605 case BLACKHOLE:
606 evacuate(&((StgInd *)p)->indirectee);
607 p += sizeofW(StgInd);
608 break;
609
610 case MUT_VAR_CLEAN:
611 case MUT_VAR_DIRTY:
612 gct->eager_promotion = false;
613 evacuate(&((StgMutVar *)p)->var);
614 gct->eager_promotion = saved_eager_promotion;
615
616 if (gct->failed_to_evac) {
617 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
618 } else {
619 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
620 }
621 p += sizeofW(StgMutVar);
622 break;
623
624 case BLOCKING_QUEUE:
625 {
626 StgBlockingQueue *bq = (StgBlockingQueue *)p;
627
628 gct->eager_promotion = false;
629 evacuate(&bq->bh);
630 evacuate((StgClosure**)&bq->owner);
631 evacuate((StgClosure**)&bq->queue);
632 evacuate((StgClosure**)&bq->link);
633 gct->eager_promotion = saved_eager_promotion;
634
635 if (gct->failed_to_evac) {
636 bq->header.info = &stg_BLOCKING_QUEUE_DIRTY_info;
637 } else {
638 bq->header.info = &stg_BLOCKING_QUEUE_CLEAN_info;
639 }
640 p += sizeofW(StgBlockingQueue);
641 break;
642 }
643
644 case THUNK_SELECTOR:
645 {
646 StgSelector *s = (StgSelector *)p;
647 evacuate(&s->selectee);
648 p += THUNK_SELECTOR_sizeW();
649 break;
650 }
651
652 // A chunk of stack saved in a heap object
653 case AP_STACK:
654 {
655 StgAP_STACK *ap = (StgAP_STACK *)p;
656
657 evacuate(&ap->fun);
658 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
659 p = (StgPtr)ap->payload + ap->size;
660 break;
661 }
662
663 case PAP:
664 p = scavenge_PAP((StgPAP *)p);
665 break;
666
667 case AP:
668 p = scavenge_AP((StgAP *)p);
669 break;
670
671 case ARR_WORDS:
672 // nothing to follow
673 p += arr_words_sizeW((StgArrBytes *)p);
674 break;
675
676 case MUT_ARR_PTRS_CLEAN:
677 case MUT_ARR_PTRS_DIRTY:
678 {
679 // We don't eagerly promote objects pointed to by a mutable
680 // array, but if we find the array only points to objects in
681 // the same or an older generation, we mark it "clean" and
682 // avoid traversing it during minor GCs.
683 gct->eager_promotion = false;
684
685 p = scavenge_mut_arr_ptrs((StgMutArrPtrs*)p);
686
687 if (gct->failed_to_evac) {
688 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
689 } else {
690 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
691 }
692
693 gct->eager_promotion = saved_eager_promotion;
694 gct->failed_to_evac = true; // always put it on the mutable list.
695 break;
696 }
697
698 case MUT_ARR_PTRS_FROZEN_CLEAN:
699 case MUT_ARR_PTRS_FROZEN_DIRTY:
700 // follow everything
701 {
702 p = scavenge_mut_arr_ptrs((StgMutArrPtrs*)p);
703
704 if (gct->failed_to_evac) {
705 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_DIRTY_info;
706 } else {
707 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_CLEAN_info;
708 }
709 break;
710 }
711
712 case SMALL_MUT_ARR_PTRS_CLEAN:
713 case SMALL_MUT_ARR_PTRS_DIRTY:
714 // follow everything
715 {
716 StgPtr next;
717
718 // We don't eagerly promote objects pointed to by a mutable
719 // array, but if we find the array only points to objects in
720 // the same or an older generation, we mark it "clean" and
721 // avoid traversing it during minor GCs.
722 gct->eager_promotion = false;
723 next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
724 for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
725 evacuate((StgClosure **)p);
726 }
727 gct->eager_promotion = saved_eager_promotion;
728
729 if (gct->failed_to_evac) {
730 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_DIRTY_info;
731 } else {
732 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_CLEAN_info;
733 }
734
735 gct->failed_to_evac = true; // always put it on the mutable list.
736 break;
737 }
738
739 case SMALL_MUT_ARR_PTRS_FROZEN_CLEAN:
740 case SMALL_MUT_ARR_PTRS_FROZEN_DIRTY:
741 // follow everything
742 {
743 StgPtr next;
744
745 next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
746 for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
747 evacuate((StgClosure **)p);
748 }
749
750 if (gct->failed_to_evac) {
751 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN_DIRTY_info;
752 } else {
753 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN_CLEAN_info;
754 }
755 break;
756 }
757
758 case TSO:
759 {
760 scavengeTSO((StgTSO *)p);
761 p += sizeofW(StgTSO);
762 break;
763 }
764
765 case STACK:
766 {
767 StgStack *stack = (StgStack*)p;
768
769 gct->eager_promotion = false;
770
771 scavenge_stack(stack->sp, stack->stack + stack->stack_size);
772 stack->dirty = gct->failed_to_evac;
773 p += stack_sizeW(stack);
774
775 gct->eager_promotion = saved_eager_promotion;
776 break;
777 }
778
779 case MUT_PRIM:
780 {
781 StgPtr end;
782
783 gct->eager_promotion = false;
784
785 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
786 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
787 evacuate((StgClosure **)p);
788 }
789 p += info->layout.payload.nptrs;
790
791 gct->eager_promotion = saved_eager_promotion;
792 gct->failed_to_evac = true; // mutable
793 break;
794 }
795
796 case TREC_CHUNK:
797 {
798 StgWord i;
799 StgTRecChunk *tc = ((StgTRecChunk *) p);
800 TRecEntry *e = &(tc -> entries[0]);
801 gct->eager_promotion = false;
802 evacuate((StgClosure **)&tc->prev_chunk);
803 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
804 evacuate((StgClosure **)&e->tvar);
805 evacuate((StgClosure **)&e->expected_value);
806 evacuate((StgClosure **)&e->new_value);
807 }
808 gct->eager_promotion = saved_eager_promotion;
809 gct->failed_to_evac = true; // mutable
810 p += sizeofW(StgTRecChunk);
811 break;
812 }
813
814 default:
815 barf("scavenge: unimplemented/strange closure type %d @ %p",
816 info->type, p);
817 }
818
819 /*
820 * We need to record the current object on the mutable list if
821 * (a) It is actually mutable, or
822 * (b) It contains pointers to a younger generation.
823 * Case (b) arises if we didn't manage to promote everything that
824 * the current object points to into the current generation.
825 */
826 if (gct->failed_to_evac) {
827 gct->failed_to_evac = false;
828 if (bd->gen_no > 0) {
829 recordMutableGen_GC((StgClosure *)q, bd->gen_no);
830 }
831 }
832 }
833
834 if (p > bd->free) {
835 gct->copied += ws->todo_free - bd->free;
836 bd->free = p;
837 }
838
839 debugTrace(DEBUG_gc, " scavenged %ld bytes",
840 (unsigned long)((bd->free - bd->u.scan) * sizeof(W_)));
841
842 // update stats: this is a block that has been scavenged
843 gct->scanned += bd->free - bd->u.scan;
844 bd->u.scan = bd->free;
845
846 if (bd != ws->todo_bd) {
847 // we're not going to evac any more objects into
848 // this block, so push it now.
849 push_scanned_block(bd, ws);
850 }
851
852 gct->scan_bd = NULL;
853 }
854 /* -----------------------------------------------------------------------------
855 Scavenge everything on the mark stack.
856
857 This is slightly different from scavenge():
858 - we don't walk linearly through the objects, so the scavenger
859 doesn't need to advance the pointer on to the next object.
860 -------------------------------------------------------------------------- */
861
862 static void
863 scavenge_mark_stack(void)
864 {
865 StgPtr p, q;
866 const StgInfoTable *info;
867 bool saved_eager_promotion;
868
869 gct->evac_gen_no = oldest_gen->no;
870 saved_eager_promotion = gct->eager_promotion;
871
872 while ((p = pop_mark_stack())) {
873
874 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
875 info = get_itbl((StgClosure *)p);
876
877 q = p;
878 switch (info->type) {
879
880 case MVAR_CLEAN:
881 case MVAR_DIRTY:
882 {
883 StgMVar *mvar = ((StgMVar *)p);
884 gct->eager_promotion = false;
885 evacuate((StgClosure **)&mvar->head);
886 evacuate((StgClosure **)&mvar->tail);
887 evacuate((StgClosure **)&mvar->value);
888 gct->eager_promotion = saved_eager_promotion;
889
890 if (gct->failed_to_evac) {
891 mvar->header.info = &stg_MVAR_DIRTY_info;
892 } else {
893 mvar->header.info = &stg_MVAR_CLEAN_info;
894 }
895 break;
896 }
897
898 case TVAR:
899 {
900 StgTVar *tvar = ((StgTVar *)p);
901 gct->eager_promotion = false;
902 evacuate((StgClosure **)&tvar->current_value);
903 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
904 gct->eager_promotion = saved_eager_promotion;
905
906 if (gct->failed_to_evac) {
907 tvar->header.info = &stg_TVAR_DIRTY_info;
908 } else {
909 tvar->header.info = &stg_TVAR_CLEAN_info;
910 }
911 break;
912 }
913
914 case FUN_2_0:
915 scavenge_fun_srt(info);
916 evacuate(&((StgClosure *)p)->payload[1]);
917 evacuate(&((StgClosure *)p)->payload[0]);
918 break;
919
920 case THUNK_2_0:
921 scavenge_thunk_srt(info);
922 evacuate(&((StgThunk *)p)->payload[1]);
923 evacuate(&((StgThunk *)p)->payload[0]);
924 break;
925
926 case CONSTR_2_0:
927 evacuate(&((StgClosure *)p)->payload[1]);
928 evacuate(&((StgClosure *)p)->payload[0]);
929 break;
930
931 case FUN_1_0:
932 case FUN_1_1:
933 scavenge_fun_srt(info);
934 evacuate(&((StgClosure *)p)->payload[0]);
935 break;
936
937 case THUNK_1_0:
938 case THUNK_1_1:
939 scavenge_thunk_srt(info);
940 evacuate(&((StgThunk *)p)->payload[0]);
941 break;
942
943 case CONSTR_1_0:
944 case CONSTR_1_1:
945 evacuate(&((StgClosure *)p)->payload[0]);
946 break;
947
948 case FUN_0_1:
949 case FUN_0_2:
950 scavenge_fun_srt(info);
951 break;
952
953 case THUNK_0_1:
954 case THUNK_0_2:
955 scavenge_thunk_srt(info);
956 break;
957
958 case CONSTR_0_1:
959 case CONSTR_0_2:
960 break;
961
962 case FUN:
963 scavenge_fun_srt(info);
964 goto gen_obj;
965
966 case THUNK:
967 {
968 StgPtr end;
969
970 scavenge_thunk_srt(info);
971 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
972 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
973 evacuate((StgClosure **)p);
974 }
975 break;
976 }
977
978 gen_obj:
979 case CONSTR:
980 case CONSTR_NOCAF:
981 case WEAK:
982 case PRIM:
983 {
984 StgPtr end;
985
986 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
987 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
988 evacuate((StgClosure **)p);
989 }
990 break;
991 }
992
993 case BCO: {
994 StgBCO *bco = (StgBCO *)p;
995 evacuate((StgClosure **)&bco->instrs);
996 evacuate((StgClosure **)&bco->literals);
997 evacuate((StgClosure **)&bco->ptrs);
998 break;
999 }
1000
1001 case IND:
1002 case BLACKHOLE:
1003 evacuate(&((StgInd *)p)->indirectee);
1004 break;
1005
1006 case MUT_VAR_CLEAN:
1007 case MUT_VAR_DIRTY: {
1008 gct->eager_promotion = false;
1009 evacuate(&((StgMutVar *)p)->var);
1010 gct->eager_promotion = saved_eager_promotion;
1011
1012 if (gct->failed_to_evac) {
1013 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
1014 } else {
1015 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
1016 }
1017 break;
1018 }
1019
1020 case BLOCKING_QUEUE:
1021 {
1022 StgBlockingQueue *bq = (StgBlockingQueue *)p;
1023
1024 gct->eager_promotion = false;
1025 evacuate(&bq->bh);
1026 evacuate((StgClosure**)&bq->owner);
1027 evacuate((StgClosure**)&bq->queue);
1028 evacuate((StgClosure**)&bq->link);
1029 gct->eager_promotion = saved_eager_promotion;
1030
1031 if (gct->failed_to_evac) {
1032 bq->header.info = &stg_BLOCKING_QUEUE_DIRTY_info;
1033 } else {
1034 bq->header.info = &stg_BLOCKING_QUEUE_CLEAN_info;
1035 }
1036 break;
1037 }
1038
1039 case ARR_WORDS:
1040 break;
1041
1042 case THUNK_SELECTOR:
1043 {
1044 StgSelector *s = (StgSelector *)p;
1045 evacuate(&s->selectee);
1046 break;
1047 }
1048
1049 // A chunk of stack saved in a heap object
1050 case AP_STACK:
1051 {
1052 StgAP_STACK *ap = (StgAP_STACK *)p;
1053
1054 evacuate(&ap->fun);
1055 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
1056 break;
1057 }
1058
1059 case PAP:
1060 scavenge_PAP((StgPAP *)p);
1061 break;
1062
1063 case AP:
1064 scavenge_AP((StgAP *)p);
1065 break;
1066
1067 case MUT_ARR_PTRS_CLEAN:
1068 case MUT_ARR_PTRS_DIRTY:
1069 // follow everything
1070 {
1071 // We don't eagerly promote objects pointed to by a mutable
1072 // array, but if we find the array only points to objects in
1073 // the same or an older generation, we mark it "clean" and
1074 // avoid traversing it during minor GCs.
1075 gct->eager_promotion = false;
1076
1077 scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
1078
1079 if (gct->failed_to_evac) {
1080 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
1081 } else {
1082 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
1083 }
1084
1085 gct->eager_promotion = saved_eager_promotion;
1086 gct->failed_to_evac = true; // mutable anyhow.
1087 break;
1088 }
1089
1090 case MUT_ARR_PTRS_FROZEN_CLEAN:
1091 case MUT_ARR_PTRS_FROZEN_DIRTY:
1092 // follow everything
1093 {
1094 StgPtr q = p;
1095
1096 scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
1097
1098 if (gct->failed_to_evac) {
1099 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_DIRTY_info;
1100 } else {
1101 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_CLEAN_info;
1102 }
1103 break;
1104 }
1105
1106 case SMALL_MUT_ARR_PTRS_CLEAN:
1107 case SMALL_MUT_ARR_PTRS_DIRTY:
1108 // follow everything
1109 {
1110 StgPtr next;
1111 bool saved_eager;
1112
1113 // We don't eagerly promote objects pointed to by a mutable
1114 // array, but if we find the array only points to objects in
1115 // the same or an older generation, we mark it "clean" and
1116 // avoid traversing it during minor GCs.
1117 saved_eager = gct->eager_promotion;
1118 gct->eager_promotion = false;
1119 next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
1120 for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
1121 evacuate((StgClosure **)p);
1122 }
1123 gct->eager_promotion = saved_eager;
1124
1125 if (gct->failed_to_evac) {
1126 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_DIRTY_info;
1127 } else {
1128 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_CLEAN_info;
1129 }
1130
1131 gct->failed_to_evac = true; // mutable anyhow.
1132 break;
1133 }
1134
1135 case SMALL_MUT_ARR_PTRS_FROZEN_CLEAN:
1136 case SMALL_MUT_ARR_PTRS_FROZEN_DIRTY:
1137 // follow everything
1138 {
1139 StgPtr next, q = p;
1140
1141 next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
1142 for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
1143 evacuate((StgClosure **)p);
1144 }
1145
1146 if (gct->failed_to_evac) {
1147 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN_DIRTY_info;
1148 } else {
1149 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN_CLEAN_info;
1150 }
1151 break;
1152 }
1153
1154 case TSO:
1155 {
1156 scavengeTSO((StgTSO*)p);
1157 break;
1158 }
1159
1160 case STACK:
1161 {
1162 StgStack *stack = (StgStack*)p;
1163
1164 gct->eager_promotion = false;
1165
1166 scavenge_stack(stack->sp, stack->stack + stack->stack_size);
1167 stack->dirty = gct->failed_to_evac;
1168
1169 gct->eager_promotion = saved_eager_promotion;
1170 break;
1171 }
1172
1173 case MUT_PRIM:
1174 {
1175 StgPtr end;
1176
1177 gct->eager_promotion = false;
1178
1179 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1180 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
1181 evacuate((StgClosure **)p);
1182 }
1183
1184 gct->eager_promotion = saved_eager_promotion;
1185 gct->failed_to_evac = true; // mutable
1186 break;
1187 }
1188
1189 case TREC_CHUNK:
1190 {
1191 StgWord i;
1192 StgTRecChunk *tc = ((StgTRecChunk *) p);
1193 TRecEntry *e = &(tc -> entries[0]);
1194 gct->eager_promotion = false;
1195 evacuate((StgClosure **)&tc->prev_chunk);
1196 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
1197 evacuate((StgClosure **)&e->tvar);
1198 evacuate((StgClosure **)&e->expected_value);
1199 evacuate((StgClosure **)&e->new_value);
1200 }
1201 gct->eager_promotion = saved_eager_promotion;
1202 gct->failed_to_evac = true; // mutable
1203 break;
1204 }
1205
1206 default:
1207 barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p",
1208 info->type, p);
1209 }
1210
1211 if (gct->failed_to_evac) {
1212 gct->failed_to_evac = false;
1213 if (gct->evac_gen_no) {
1214 recordMutableGen_GC((StgClosure *)q, gct->evac_gen_no);
1215 }
1216 }
1217 } // while (p = pop_mark_stack())
1218 }
1219
1220 /* -----------------------------------------------------------------------------
1221 Scavenge one object.
1222
1223 This is used for objects that are temporarily marked as mutable
1224 because they contain old-to-new generation pointers. Only certain
1225 objects can have this property.
1226 -------------------------------------------------------------------------- */
1227
1228 static bool
1229 scavenge_one(StgPtr p)
1230 {
1231 const StgInfoTable *info;
1232 bool no_luck;
1233 bool saved_eager_promotion;
1234
1235 saved_eager_promotion = gct->eager_promotion;
1236
1237 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1238 info = get_itbl((StgClosure *)p);
1239
1240 switch (info->type) {
1241
1242 case MVAR_CLEAN:
1243 case MVAR_DIRTY:
1244 {
1245 StgMVar *mvar = ((StgMVar *)p);
1246 gct->eager_promotion = false;
1247 evacuate((StgClosure **)&mvar->head);
1248 evacuate((StgClosure **)&mvar->tail);
1249 evacuate((StgClosure **)&mvar->value);
1250 gct->eager_promotion = saved_eager_promotion;
1251
1252 if (gct->failed_to_evac) {
1253 mvar->header.info = &stg_MVAR_DIRTY_info;
1254 } else {
1255 mvar->header.info = &stg_MVAR_CLEAN_info;
1256 }
1257 break;
1258 }
1259
1260 case TVAR:
1261 {
1262 StgTVar *tvar = ((StgTVar *)p);
1263 gct->eager_promotion = false;
1264 evacuate((StgClosure **)&tvar->current_value);
1265 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
1266 gct->eager_promotion = saved_eager_promotion;
1267
1268 if (gct->failed_to_evac) {
1269 tvar->header.info = &stg_TVAR_DIRTY_info;
1270 } else {
1271 tvar->header.info = &stg_TVAR_CLEAN_info;
1272 }
1273 break;
1274 }
1275
1276 case THUNK:
1277 case THUNK_1_0:
1278 case THUNK_0_1:
1279 case THUNK_1_1:
1280 case THUNK_0_2:
1281 case THUNK_2_0:
1282 {
1283 StgPtr q, end;
1284
1285 end = (StgPtr)((StgThunk *)p)->payload + info->layout.payload.ptrs;
1286 for (q = (StgPtr)((StgThunk *)p)->payload; q < end; q++) {
1287 evacuate((StgClosure **)q);
1288 }
1289 break;
1290 }
1291
1292 case FUN:
1293 case FUN_1_0: // hardly worth specialising these guys
1294 case FUN_0_1:
1295 case FUN_1_1:
1296 case FUN_0_2:
1297 case FUN_2_0:
1298 case CONSTR:
1299 case CONSTR_NOCAF:
1300 case CONSTR_1_0:
1301 case CONSTR_0_1:
1302 case CONSTR_1_1:
1303 case CONSTR_0_2:
1304 case CONSTR_2_0:
1305 case PRIM:
1306 {
1307 StgPtr q, end;
1308
1309 end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1310 for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) {
1311 evacuate((StgClosure **)q);
1312 }
1313 break;
1314 }
1315
1316 case WEAK:
1317 // This WEAK object will not be considered by tidyWeakList during this
1318 // collection because it is in a generation > N, but it is on the
1319 // mutable list so we must evacuate all of its pointers because some
1320 // of them may point into a younger generation.
1321 scavengeLiveWeak((StgWeak *)p);
1322 break;
1323
1324 case MUT_VAR_CLEAN:
1325 case MUT_VAR_DIRTY: {
1326 StgPtr q = p;
1327
1328 gct->eager_promotion = false;
1329 evacuate(&((StgMutVar *)p)->var);
1330 gct->eager_promotion = saved_eager_promotion;
1331
1332 if (gct->failed_to_evac) {
1333 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
1334 } else {
1335 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
1336 }
1337 break;
1338 }
1339
1340 case BLOCKING_QUEUE:
1341 {
1342 StgBlockingQueue *bq = (StgBlockingQueue *)p;
1343
1344 gct->eager_promotion = false;
1345 evacuate(&bq->bh);
1346 evacuate((StgClosure**)&bq->owner);
1347 evacuate((StgClosure**)&bq->queue);
1348 evacuate((StgClosure**)&bq->link);
1349 gct->eager_promotion = saved_eager_promotion;
1350
1351 if (gct->failed_to_evac) {
1352 bq->header.info = &stg_BLOCKING_QUEUE_DIRTY_info;
1353 } else {
1354 bq->header.info = &stg_BLOCKING_QUEUE_CLEAN_info;
1355 }
1356 break;
1357 }
1358
1359 case THUNK_SELECTOR:
1360 {
1361 StgSelector *s = (StgSelector *)p;
1362 evacuate(&s->selectee);
1363 break;
1364 }
1365
1366 case AP_STACK:
1367 {
1368 StgAP_STACK *ap = (StgAP_STACK *)p;
1369
1370 evacuate(&ap->fun);
1371 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
1372 p = (StgPtr)ap->payload + ap->size;
1373 break;
1374 }
1375
1376 case PAP:
1377 p = scavenge_PAP((StgPAP *)p);
1378 break;
1379
1380 case AP:
1381 p = scavenge_AP((StgAP *)p);
1382 break;
1383
1384 case ARR_WORDS:
1385 // nothing to follow
1386 break;
1387
1388 case MUT_ARR_PTRS_CLEAN:
1389 case MUT_ARR_PTRS_DIRTY:
1390 {
1391 // We don't eagerly promote objects pointed to by a mutable
1392 // array, but if we find the array only points to objects in
1393 // the same or an older generation, we mark it "clean" and
1394 // avoid traversing it during minor GCs.
1395 gct->eager_promotion = false;
1396
1397 scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
1398
1399 if (gct->failed_to_evac) {
1400 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
1401 } else {
1402 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
1403 }
1404
1405 gct->eager_promotion = saved_eager_promotion;
1406 gct->failed_to_evac = true;
1407 break;
1408 }
1409
1410 case MUT_ARR_PTRS_FROZEN_CLEAN:
1411 case MUT_ARR_PTRS_FROZEN_DIRTY:
1412 {
1413 // follow everything
1414 scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
1415
1416 if (gct->failed_to_evac) {
1417 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_FROZEN_DIRTY_info;
1418 } else {
1419 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_FROZEN_CLEAN_info;
1420 }
1421 break;
1422 }
1423
1424 case SMALL_MUT_ARR_PTRS_CLEAN:
1425 case SMALL_MUT_ARR_PTRS_DIRTY:
1426 {
1427 StgPtr next, q;
1428 bool saved_eager;
1429
1430 // We don't eagerly promote objects pointed to by a mutable
1431 // array, but if we find the array only points to objects in
1432 // the same or an older generation, we mark it "clean" and
1433 // avoid traversing it during minor GCs.
1434 saved_eager = gct->eager_promotion;
1435 gct->eager_promotion = false;
1436 q = p;
1437 next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
1438 for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
1439 evacuate((StgClosure **)p);
1440 }
1441 gct->eager_promotion = saved_eager;
1442
1443 if (gct->failed_to_evac) {
1444 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_DIRTY_info;
1445 } else {
1446 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_CLEAN_info;
1447 }
1448
1449 gct->failed_to_evac = true;
1450 break;
1451 }
1452
1453 case SMALL_MUT_ARR_PTRS_FROZEN_CLEAN:
1454 case SMALL_MUT_ARR_PTRS_FROZEN_DIRTY:
1455 {
1456 // follow everything
1457 StgPtr next, q=p;
1458
1459 next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
1460 for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
1461 evacuate((StgClosure **)p);
1462 }
1463
1464 if (gct->failed_to_evac) {
1465 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN_DIRTY_info;
1466 } else {
1467 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN_CLEAN_info;
1468 }
1469 break;
1470 }
1471
1472 case TSO:
1473 {
1474 scavengeTSO((StgTSO*)p);
1475 break;
1476 }
1477
1478 case STACK:
1479 {
1480 StgStack *stack = (StgStack*)p;
1481
1482 gct->eager_promotion = false;
1483
1484 scavenge_stack(stack->sp, stack->stack + stack->stack_size);
1485 stack->dirty = gct->failed_to_evac;
1486
1487 gct->eager_promotion = saved_eager_promotion;
1488 break;
1489 }
1490
1491 case MUT_PRIM:
1492 {
1493 StgPtr end;
1494
1495 gct->eager_promotion = false;
1496
1497 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1498 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
1499 evacuate((StgClosure **)p);
1500 }
1501
1502 gct->eager_promotion = saved_eager_promotion;
1503 gct->failed_to_evac = true; // mutable
1504 break;
1505
1506 }
1507
1508 case TREC_CHUNK:
1509 {
1510 StgWord i;
1511 StgTRecChunk *tc = ((StgTRecChunk *) p);
1512 TRecEntry *e = &(tc -> entries[0]);
1513 gct->eager_promotion = false;
1514 evacuate((StgClosure **)&tc->prev_chunk);
1515 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
1516 evacuate((StgClosure **)&e->tvar);
1517 evacuate((StgClosure **)&e->expected_value);
1518 evacuate((StgClosure **)&e->new_value);
1519 }
1520 gct->eager_promotion = saved_eager_promotion;
1521 gct->failed_to_evac = true; // mutable
1522 break;
1523 }
1524
1525 case IND:
1526 // IND can happen, for example, when the interpreter allocates
1527 // a gigantic AP closure (more than one block), which ends up
1528 // on the large-object list and then gets updated. See #3424.
1529 case BLACKHOLE:
1530 case IND_STATIC:
1531 evacuate(&((StgInd *)p)->indirectee);
1532
1533 #if 0 && defined(DEBUG)
1534 if (RtsFlags.DebugFlags.gc)
1535 /* Debugging code to print out the size of the thing we just
1536 * promoted
1537 */
1538 {
1539 StgPtr start = gen->scan;
1540 bdescr *start_bd = gen->scan_bd;
1541 StgWord size = 0;
1542 scavenge(&gen);
1543 if (start_bd != gen->scan_bd) {
1544 size += (P_)BLOCK_ROUND_UP(start) - start;
1545 start_bd = start_bd->link;
1546 while (start_bd != gen->scan_bd) {
1547 size += BLOCK_SIZE_W;
1548 start_bd = start_bd->link;
1549 }
1550 size += gen->scan -
1551 (P_)BLOCK_ROUND_DOWN(gen->scan);
1552 } else {
1553 size = gen->scan - start;
1554 }
1555 debugBelch("evac IND: %ld bytes", size * sizeof(W_));
1556 }
1557 #endif
1558 break;
1559
1560 case COMPACT_NFDATA:
1561 scavenge_compact((StgCompactNFData*)p);
1562 break;
1563
1564 default:
1565 barf("scavenge_one: strange object %d", (int)(info->type));
1566 }
1567
1568 no_luck = gct->failed_to_evac;
1569 gct->failed_to_evac = false;
1570 return (no_luck);
1571 }
1572
1573 /* -----------------------------------------------------------------------------
1574 Scavenging mutable lists.
1575
1576 We treat the mutable list of each generation > N (i.e. all the
1577 generations older than the one being collected) as roots. We also
1578 remove non-mutable objects from the mutable list at this point.
1579 -------------------------------------------------------------------------- */
1580
1581 static void
1582 scavenge_mutable_list(bdescr *bd, generation *gen)
1583 {
1584 StgPtr p, q;
1585
1586 uint32_t gen_no = gen->no;
1587 gct->evac_gen_no = gen_no;
1588
1589 for (; bd != NULL; bd = bd->link) {
1590 for (q = bd->start; q < bd->free; q++) {
1591 p = (StgPtr)*q;
1592 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1593
1594 #if defined(DEBUG)
1595 const StgInfoTable *pinfo;
1596 switch (get_itbl((StgClosure *)p)->type) {
1597 case MUT_VAR_CLEAN:
1598 // can happen due to concurrent writeMutVars
1599 case MUT_VAR_DIRTY:
1600 mutlist_MUTVARS++; break;
1601 case MUT_ARR_PTRS_CLEAN:
1602 case MUT_ARR_PTRS_DIRTY:
1603 case MUT_ARR_PTRS_FROZEN_CLEAN:
1604 case MUT_ARR_PTRS_FROZEN_DIRTY:
1605 mutlist_MUTARRS++; break;
1606 case MVAR_CLEAN:
1607 barf("MVAR_CLEAN on mutable list");
1608 case MVAR_DIRTY:
1609 mutlist_MVARS++; break;
1610 case TVAR:
1611 mutlist_TVAR++; break;
1612 case TREC_CHUNK:
1613 mutlist_TREC_CHUNK++; break;
1614 case MUT_PRIM:
1615 pinfo = ((StgClosure*)p)->header.info;
1616 if (pinfo == &stg_TVAR_WATCH_QUEUE_info)
1617 mutlist_TVAR_WATCH_QUEUE++;
1618 else if (pinfo == &stg_TREC_HEADER_info)
1619 mutlist_TREC_HEADER++;
1620 else
1621 mutlist_OTHERS++;
1622 break;
1623 default:
1624 mutlist_OTHERS++; break;
1625 }
1626 #endif
1627
1628 // Check whether this object is "clean", that is it
1629 // definitely doesn't point into a young generation.
1630 // Clean objects don't need to be scavenged. Some clean
1631 // objects (MUT_VAR_CLEAN) are not kept on the mutable
1632 // list at all; others, such as MUT_ARR_PTRS
1633 // are always on the mutable list.
1634 //
1635 switch (get_itbl((StgClosure *)p)->type) {
1636 case MUT_ARR_PTRS_CLEAN:
1637 case SMALL_MUT_ARR_PTRS_CLEAN:
1638 recordMutableGen_GC((StgClosure *)p,gen_no);
1639 continue;
1640 case MUT_ARR_PTRS_DIRTY:
1641 {
1642 bool saved_eager_promotion;
1643 saved_eager_promotion = gct->eager_promotion;
1644 gct->eager_promotion = false;
1645
1646 scavenge_mut_arr_ptrs_marked((StgMutArrPtrs *)p);
1647
1648 if (gct->failed_to_evac) {
1649 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
1650 } else {
1651 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
1652 }
1653
1654 gct->eager_promotion = saved_eager_promotion;
1655 gct->failed_to_evac = false;
1656 recordMutableGen_GC((StgClosure *)p,gen_no);
1657 continue;
1658 }
1659 default:
1660 ;
1661 }
1662
1663 if (RtsFlags.GcFlags.useNonmoving && major_gc && gen == oldest_gen) {
1664 // We can't use scavenge_one here as we need to scavenge SRTs
1665 nonmovingScavengeOne((StgClosure *)p);
1666 } else if (scavenge_one(p)) {
1667 // didn't manage to promote everything, so put the
1668 // object back on the list.
1669 recordMutableGen_GC((StgClosure *)p,gen_no);
1670 }
1671 }
1672 }
1673 }
1674
1675 void
1676 scavenge_capability_mut_lists (Capability *cap)
1677 {
1678 // In a major GC only nonmoving heap's mut list is root
1679 if (RtsFlags.GcFlags.useNonmoving && major_gc) {
1680 uint32_t g = oldest_gen->no;
1681 scavenge_mutable_list(cap->saved_mut_lists[g], oldest_gen);
1682 freeChain_sync(cap->saved_mut_lists[g]);
1683 cap->saved_mut_lists[g] = NULL;
1684 return;
1685 }
1686
1687 /* Mutable lists from each generation > N
1688 * we want to *scavenge* these roots, not evacuate them: they're not
1689 * going to move in this GC.
1690 * Also do them in reverse generation order, for the usual reason:
1691 * namely to reduce the likelihood of spurious old->new pointers.
1692 */
1693 for (uint32_t g = RtsFlags.GcFlags.generations-1; g > N; g--) {
1694 scavenge_mutable_list(cap->saved_mut_lists[g], &generations[g]);
1695 freeChain_sync(cap->saved_mut_lists[g]);
1696 cap->saved_mut_lists[g] = NULL;
1697 }
1698 }
1699
1700 /* -----------------------------------------------------------------------------
1701 Scavenging the static objects.
1702
1703 We treat the mutable list of each generation > N (i.e. all the
1704 generations older than the one being collected) as roots. We also
1705 remove non-mutable objects from the mutable list at this point.
1706 -------------------------------------------------------------------------- */
1707
1708 static void
1709 scavenge_static(void)
1710 {
1711 StgClosure *flagged_p, *p;
1712 const StgInfoTable *info;
1713
1714 debugTrace(DEBUG_gc, "scavenging static objects");
1715
1716 /* Always evacuate straight to the oldest generation for static
1717 * objects */
1718 gct->evac_gen_no = oldest_gen->no;
1719
1720 /* keep going until we've scavenged all the objects on the linked
1721 list... */
1722
1723 while (1) {
1724
1725 /* get the next static object from the list. Remember, there might
1726 * be more stuff on this list after each evacuation...
1727 * (static_objects is a global)
1728 */
1729 flagged_p = gct->static_objects;
1730 if (flagged_p == END_OF_STATIC_OBJECT_LIST) {
1731 break;
1732 }
1733 p = UNTAG_STATIC_LIST_PTR(flagged_p);
1734
1735 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1736 info = get_itbl(p);
1737 // make sure the info pointer is into text space
1738
1739 /* Take this object *off* the static_objects list,
1740 * and put it on the scavenged_static_objects list.
1741 */
1742 gct->static_objects = *STATIC_LINK(info,p);
1743 *STATIC_LINK(info,p) = gct->scavenged_static_objects;
1744 gct->scavenged_static_objects = flagged_p;
1745
1746 switch (info -> type) {
1747
1748 case IND_STATIC:
1749 {
1750 StgInd *ind = (StgInd *)p;
1751 evacuate(&ind->indirectee);
1752
1753 /* might fail to evacuate it, in which case we have to pop it
1754 * back on the mutable list of the oldest generation. We
1755 * leave it *on* the scavenged_static_objects list, though,
1756 * in case we visit this object again.
1757 */
1758 if (gct->failed_to_evac) {
1759 gct->failed_to_evac = false;
1760 recordMutableGen_GC((StgClosure *)p,oldest_gen->no);
1761 }
1762 break;
1763 }
1764
1765 case THUNK_STATIC:
1766 scavenge_thunk_srt(info);
1767 break;
1768
1769 case FUN_STATIC:
1770 scavenge_fun_srt(info);
1771 FALLTHROUGH;
1772
1773 // a FUN_STATIC can also be an SRT, so it may have pointer
1774 // fields. See Note [SRTs] in CmmBuildInfoTables, specifically
1775 // the [FUN] optimisation.
1776
1777 case CONSTR:
1778 case CONSTR_NOCAF:
1779 case CONSTR_1_0:
1780 case CONSTR_0_1:
1781 case CONSTR_2_0:
1782 case CONSTR_1_1:
1783 case CONSTR_0_2:
1784 {
1785 StgPtr q, next;
1786
1787 next = (P_)p->payload + info->layout.payload.ptrs;
1788 // evacuate the pointers
1789 for (q = (P_)p->payload; q < next; q++) {
1790 evacuate((StgClosure **)q);
1791 }
1792 break;
1793 }
1794
1795 default:
1796 barf("scavenge_static: strange closure %d", (int)(info->type));
1797 }
1798
1799 ASSERT(gct->failed_to_evac == false);
1800 }
1801 }
1802
1803 /* -----------------------------------------------------------------------------
1804 scavenge a chunk of memory described by a bitmap
1805 -------------------------------------------------------------------------- */
1806
1807 static void
1808 scavenge_large_bitmap( StgPtr p, StgLargeBitmap *large_bitmap, StgWord size )
1809 {
1810 walk_large_bitmap(do_evacuate, (StgClosure **) p, large_bitmap, size, NULL);
1811 }
1812
1813
1814 /* -----------------------------------------------------------------------------
1815 scavenge_stack walks over a section of stack and evacuates all the
1816 objects pointed to by it. We can use the same code for walking
1817 AP_STACK_UPDs, since these are just sections of copied stack.
1818 -------------------------------------------------------------------------- */
1819
1820 void
1821 scavenge_stack(StgPtr p, StgPtr stack_end)
1822 {
1823 const StgRetInfoTable* info;
1824 StgWord bitmap;
1825 StgWord size;
1826
1827 /*
1828 * Each time around this loop, we are looking at a chunk of stack
1829 * that starts with an activation record.
1830 */
1831
1832 while (p < stack_end) {
1833 info = get_ret_itbl((StgClosure *)p);
1834
1835 switch (info->i.type) {
1836
1837 case UPDATE_FRAME:
1838 // Note [upd-black-hole]
1839 //
1840 // In SMP, we can get update frames that point to indirections
1841 // when two threads evaluate the same thunk. We do attempt to
1842 // discover this situation in threadPaused(), but it's
1843 // possible that the following sequence occurs:
1844 //
1845 // A B
1846 // enter T
1847 // enter T
1848 // blackhole T
1849 // update T
1850 // GC
1851 //
1852 // Now T is an indirection, and the update frame is already
1853 // marked on A's stack, so we won't traverse it again in
1854 // threadPaused(). We could traverse the whole stack again
1855 // before GC, but that would be too expensive.
1856 //
1857 // Scavenging this update frame as normal would be disastrous;
1858 // the indirection will be shorted out, and the updatee would
1859 // end up pointing to the value. The update code will then
1860 // overwrite the value, instead of the BLACKHOLE it is
1861 // expecting to write to.
1862 //
1863 // One way we could try to fix this is to detect when the
1864 // BLACKHOLE has been updated by another thread, and then
1865 // replace this update frame with a special frame that just
1866 // enters the value. But this introduces some other
1867 // complexities:
1868 //
1869 // - we must be careful to call checkBlockingQueues() in this
1870 // special frame, because we might otherwise miss wakeups
1871 // for threads that blocked on the original BLACKHOLE,
1872 // - we must spot this frame when we're stripping the stack in
1873 // raiseAsync() and raiseExceptionHelper(), and arrange to call
1874 // checkBlockingQueues() there too.
1875 //
1876 // This is hard to get right, indeed we previously got it
1877 // wrong (see #13751). So we now take a different approach:
1878 // always copy the BLACKHOLE, even if it is actually an
1879 // indirection. This way we keep the update frame, we're
1880 // guaranteed to still perform the update, and check for
1881 // missed wakeups even when stripping the stack in
1882 // raiseAsync() and raiseExceptionHelper(). This is also a
1883 // little more efficient, because evacuating a known BLACKHOLE
1884 // is faster than evacuating an unknown closure.
1885 //
1886 // NOTE: for the reasons above, blackholing (either lazy or
1887 // eager) is NOT optional. See also Note [avoiding
1888 // threadPaused] in Interpreter.c.
1889 //
1890 // There are a couple of alternative solutions:
1891 // - if we see an update frame that points to an indirection,
1892 // arrange to call checkBlockingQueues() on that thread
1893 // after GC.
1894 // - spot a BLOCKING_QUEUE that points to a value and
1895 // arrange to wake it up after the GC.
1896 //
1897 // These are more difficult to implement, requiring an extra
1898 // list to be maintained during GC. They also rely on more
1899 // subtle invariants than the solution implemented here.
1900 //
1901
1902 {
1903 StgUpdateFrame *frame = (StgUpdateFrame *)p;
1904
1905 evacuate_BLACKHOLE(&frame->updatee);
1906 p += sizeofW(StgUpdateFrame);
1907 continue;
1908 }
1909
1910 // small bitmap (< 32 entries, or 64 on a 64-bit machine)
1911 case CATCH_STM_FRAME:
1912 case CATCH_RETRY_FRAME:
1913 case ATOMICALLY_FRAME:
1914 case UNDERFLOW_FRAME:
1915 case STOP_FRAME:
1916 case CATCH_FRAME:
1917 case RET_SMALL:
1918 bitmap = BITMAP_BITS(info->i.layout.bitmap);
1919 size = BITMAP_SIZE(info->i.layout.bitmap);
1920 // NOTE: the payload starts immediately after the info-ptr, we
1921 // don't have an StgHeader in the same sense as a heap closure.
1922 p++;
1923 p = scavenge_small_bitmap(p, size, bitmap);
1924
1925 follow_srt:
1926 if (major_gc && info->i.srt) {
1927 StgClosure *srt = (StgClosure*)GET_SRT(info);
1928 evacuate(&srt);
1929 }
1930 continue;
1931
1932 case RET_BCO: {
1933 StgBCO *bco;
1934 StgWord size;
1935
1936 p++;
1937 evacuate((StgClosure **)p);
1938 bco = (StgBCO *)*p;
1939 p++;
1940 size = BCO_BITMAP_SIZE(bco);
1941 scavenge_large_bitmap(p, BCO_BITMAP(bco), size);
1942 p += size;
1943 continue;
1944 }
1945
1946 // large bitmap (> 32 entries, or > 64 on a 64-bit machine)
1947 case RET_BIG:
1948 {
1949 StgWord size;
1950
1951 size = GET_LARGE_BITMAP(&info->i)->size;
1952 p++;
1953 scavenge_large_bitmap(p, GET_LARGE_BITMAP(&info->i), size);
1954 p += size;
1955 // and don't forget to follow the SRT
1956 goto follow_srt;
1957 }
1958
1959 case RET_FUN:
1960 {
1961 StgRetFun *ret_fun = (StgRetFun *)p;
1962 const StgFunInfoTable *fun_info;
1963
1964 evacuate(&ret_fun->fun);
1965 fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun));
1966 p = scavenge_arg_block(fun_info, ret_fun->payload);
1967 goto follow_srt;
1968 }
1969
1970 default:
1971 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->i.type));
1972 }
1973 }
1974 }
1975
1976 /*-----------------------------------------------------------------------------
1977 scavenge the large object list.
1978
1979 evac_gen set by caller; similar games played with evac_gen as with
1980 scavenge() - see comment at the top of scavenge(). Most large
1981 objects are (repeatedly) mutable, so most of the time evac_gen will
1982 be zero.
1983 --------------------------------------------------------------------------- */
1984
1985 static void
1986 scavenge_large (gen_workspace *ws)
1987 {
1988 bdescr *bd;
1989 StgPtr p;
1990
1991 gct->evac_gen_no = ws->gen->no;
1992
1993 bd = ws->todo_large_objects;
1994
1995 for (; bd != NULL; bd = ws->todo_large_objects) {
1996
1997 // take this object *off* the large objects list and put it on
1998 // the scavenged large objects list. This is so that we can
1999 // treat todo_large_objects as a stack and push new objects on
2000 // the front when evacuating.
2001 ws->todo_large_objects = bd->link;
2002
2003 ACQUIRE_SPIN_LOCK(&ws->gen->sync);
2004 if (bd->flags & BF_COMPACT) {
2005 dbl_link_onto(bd, &ws->gen->live_compact_objects);
2006 StgCompactNFData *str = ((StgCompactNFDataBlock*)bd->start)->owner;
2007 ws->gen->n_live_compact_blocks += str->totalW / BLOCK_SIZE_W;
2008 p = (StgPtr)str;
2009 } else {
2010 dbl_link_onto(bd, &ws->gen->scavenged_large_objects);
2011 ws->gen->n_scavenged_large_blocks += bd->blocks;
2012 p = bd->start;
2013 }
2014 RELEASE_SPIN_LOCK(&ws->gen->sync);
2015
2016 if (scavenge_one(p)) {
2017 if (ws->gen->no > 0) {
2018 recordMutableGen_GC((StgClosure *)p, ws->gen->no);
2019 }
2020 }
2021
2022 // stats
2023 gct->scanned += closure_sizeW((StgClosure*)p);
2024 }
2025 }
2026
2027 /* ----------------------------------------------------------------------------
2028 Look for work to do.
2029
2030 We look for the oldest gen that has either a todo block that can
2031 be scanned, or a block of work on the global queue that we can
2032 scan.
2033
2034 It is important to take work from the *oldest* generation that we
2035 has work available, because that minimizes the likelihood of
2036 evacuating objects into a young generation when they should have
2037 been eagerly promoted. This really does make a difference (the
2038 cacheprof benchmark is one that is affected).
2039
2040 We also want to scan the todo block if possible before grabbing
2041 work from the global queue, the reason being that we don't want to
2042 steal work from the global queue and starve other threads if there
2043 is other work we can usefully be doing.
2044 ------------------------------------------------------------------------- */
2045
2046 static bool
2047 scavenge_find_work (void)
2048 {
2049 int g;
2050 gen_workspace *ws;
2051 bool did_something, did_anything;
2052 bdescr *bd;
2053
2054 gct->scav_find_work++;
2055
2056 did_anything = false;
2057
2058 loop:
2059 did_something = false;
2060 for (g = RtsFlags.GcFlags.generations-1; g >= 0; g--) {
2061 ws = &gct->gens[g];
2062
2063 if (ws->todo_seg != END_NONMOVING_TODO_LIST) {
2064 struct NonmovingSegment *seg = ws->todo_seg;
2065 ASSERT(seg->todo_link);
2066 ws->todo_seg = seg->todo_link;
2067 seg->todo_link = NULL;
2068 scavengeNonmovingSegment(seg);
2069 did_something = true;
2070 break;
2071 }
2072
2073 gct->scan_bd = NULL;
2074
2075 // If we have a scan block with some work to do,
2076 // scavenge everything up to the free pointer.
2077 if (ws->todo_bd->u.scan < ws->todo_free)
2078 {
2079 scavenge_block(ws->todo_bd);
2080 did_something = true;
2081 break;
2082 }
2083
2084 // If we have any large objects to scavenge, do them now.
2085 if (ws->todo_large_objects) {
2086 scavenge_large(ws);
2087 did_something = true;
2088 break;
2089 }
2090
2091 if ((bd = grab_local_todo_block(ws)) != NULL) {
2092 scavenge_block(bd);
2093 did_something = true;
2094 break;
2095 }
2096 }
2097
2098 if (did_something) {
2099 did_anything = true;
2100 goto loop;
2101 }
2102
2103 #if defined(THREADED_RTS)
2104 if (work_stealing) {
2105 // look for work to steal
2106 for (g = RtsFlags.GcFlags.generations-1; g >= 0; g--) {
2107 if ((bd = steal_todo_block(g)) != NULL) {
2108 scavenge_block(bd);
2109 did_something = true;
2110 break;
2111 }
2112 }
2113
2114 if (did_something) {
2115 did_anything = true;
2116 goto loop;
2117 }
2118 }
2119 #endif
2120
2121 // only return when there is no more work to do
2122
2123 return did_anything;
2124 }
2125
2126 /* ----------------------------------------------------------------------------
2127 Scavenge until we can't find anything more to scavenge.
2128 ------------------------------------------------------------------------- */
2129
2130 void
2131 scavenge_loop(void)
2132 {
2133 bool work_to_do;
2134
2135 loop:
2136 work_to_do = false;
2137
2138 // scavenge static objects
2139 if (major_gc && gct->static_objects != END_OF_STATIC_OBJECT_LIST) {
2140 IF_DEBUG(sanity, checkStaticObjects(gct->static_objects));
2141 scavenge_static();
2142 }
2143
2144 // scavenge objects in compacted generation
2145 if (mark_stack_bd != NULL && !mark_stack_empty()) {
2146 scavenge_mark_stack();
2147 work_to_do = true;
2148 }
2149
2150 // Order is important here: we want to deal in full blocks as
2151 // much as possible, so go for global work in preference to
2152 // local work. Only if all the global work has been exhausted
2153 // do we start scavenging the fragments of blocks in the local
2154 // workspaces.
2155 if (scavenge_find_work()) goto loop;
2156
2157 if (work_to_do) goto loop;
2158 }