f92ef494a5e32f037f8fe0ce0674dc1df3081640
[ghc.git] / rts / sm / Scav.c
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team 1998-2006
4 *
5 * Generational garbage collector: scavenging functions
6 *
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
9 *
10 * http://hackage.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/GC
11 *
12 * ---------------------------------------------------------------------------*/
13
14 #include "Rts.h"
15 #include "RtsFlags.h"
16 #include "Storage.h"
17 #include "MBlock.h"
18 #include "GC.h"
19 #include "GCUtils.h"
20 #include "Compact.h"
21 #include "Evac.h"
22 #include "Scav.h"
23 #include "Apply.h"
24 #include "Trace.h"
25 #include "LdvProfile.h"
26 #include "Sanity.h"
27
28 static void scavenge_stack (StgPtr p, StgPtr stack_end);
29
30 static void scavenge_large_bitmap (StgPtr p,
31 StgLargeBitmap *large_bitmap,
32 nat size );
33
34
35 /* Similar to scavenge_large_bitmap(), but we don't write back the
36 * pointers we get back from evacuate().
37 */
38 static void
39 scavenge_large_srt_bitmap( StgLargeSRT *large_srt )
40 {
41 nat i, b, size;
42 StgWord bitmap;
43 StgClosure **p;
44
45 b = 0;
46 bitmap = large_srt->l.bitmap[b];
47 size = (nat)large_srt->l.size;
48 p = (StgClosure **)large_srt->srt;
49 for (i = 0; i < size; ) {
50 if ((bitmap & 1) != 0) {
51 evacuate(p);
52 }
53 i++;
54 p++;
55 if (i % BITS_IN(W_) == 0) {
56 b++;
57 bitmap = large_srt->l.bitmap[b];
58 } else {
59 bitmap = bitmap >> 1;
60 }
61 }
62 }
63
64 /* evacuate the SRT. If srt_bitmap is zero, then there isn't an
65 * srt field in the info table. That's ok, because we'll
66 * never dereference it.
67 */
68 STATIC_INLINE void
69 scavenge_srt (StgClosure **srt, nat srt_bitmap)
70 {
71 nat bitmap;
72 StgClosure **p;
73
74 bitmap = srt_bitmap;
75 p = srt;
76
77 if (bitmap == (StgHalfWord)(-1)) {
78 scavenge_large_srt_bitmap( (StgLargeSRT *)srt );
79 return;
80 }
81
82 while (bitmap != 0) {
83 if ((bitmap & 1) != 0) {
84 #if defined(__PIC__) && defined(mingw32_TARGET_OS)
85 // Special-case to handle references to closures hiding out in DLLs, since
86 // double indirections required to get at those. The code generator knows
87 // which is which when generating the SRT, so it stores the (indirect)
88 // reference to the DLL closure in the table by first adding one to it.
89 // We check for this here, and undo the addition before evacuating it.
90 //
91 // If the SRT entry hasn't got bit 0 set, the SRT entry points to a
92 // closure that's fixed at link-time, and no extra magic is required.
93 if ( (unsigned long)(*srt) & 0x1 ) {
94 evacuate(stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
95 } else {
96 evacuate(p);
97 }
98 #else
99 evacuate(p);
100 #endif
101 }
102 p++;
103 bitmap = bitmap >> 1;
104 }
105 }
106
107
108 STATIC_INLINE void
109 scavenge_thunk_srt(const StgInfoTable *info)
110 {
111 StgThunkInfoTable *thunk_info;
112
113 if (!major_gc) return;
114
115 thunk_info = itbl_to_thunk_itbl(info);
116 scavenge_srt((StgClosure **)GET_SRT(thunk_info), thunk_info->i.srt_bitmap);
117 }
118
119 STATIC_INLINE void
120 scavenge_fun_srt(const StgInfoTable *info)
121 {
122 StgFunInfoTable *fun_info;
123
124 if (!major_gc) return;
125
126 fun_info = itbl_to_fun_itbl(info);
127 scavenge_srt((StgClosure **)GET_FUN_SRT(fun_info), fun_info->i.srt_bitmap);
128 }
129
130 /* -----------------------------------------------------------------------------
131 Scavenge a TSO.
132 -------------------------------------------------------------------------- */
133
134 static void
135 scavengeTSO (StgTSO *tso)
136 {
137 rtsBool saved_eager;
138
139 if (tso->what_next == ThreadRelocated) {
140 // the only way this can happen is if the old TSO was on the
141 // mutable list. We might have other links to this defunct
142 // TSO, so we must update its link field.
143 evacuate((StgClosure**)&tso->_link);
144 return;
145 }
146
147 saved_eager = gct->eager_promotion;
148 gct->eager_promotion = rtsFalse;
149
150 if ( tso->why_blocked == BlockedOnMVar
151 || tso->why_blocked == BlockedOnBlackHole
152 || tso->why_blocked == BlockedOnException
153 ) {
154 evacuate(&tso->block_info.closure);
155 }
156 evacuate((StgClosure **)&tso->blocked_exceptions);
157
158 // We don't always chase the link field: TSOs on the blackhole
159 // queue are not automatically alive, so the link field is a
160 // "weak" pointer in that case.
161 if (tso->why_blocked != BlockedOnBlackHole) {
162 evacuate((StgClosure **)&tso->link);
163 }
164
165 // scavange current transaction record
166 evacuate((StgClosure **)&tso->trec);
167
168 // scavenge this thread's stack
169 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
170
171 if (gct->failed_to_evac) {
172 tso->flags |= TSO_DIRTY;
173 } else {
174 tso->flags &= ~TSO_DIRTY;
175 }
176
177 gct->eager_promotion = saved_eager;
178 }
179
180 /* -----------------------------------------------------------------------------
181 Blocks of function args occur on the stack (at the top) and
182 in PAPs.
183 -------------------------------------------------------------------------- */
184
185 STATIC_INLINE StgPtr
186 scavenge_arg_block (StgFunInfoTable *fun_info, StgClosure **args)
187 {
188 StgPtr p;
189 StgWord bitmap;
190 nat size;
191
192 p = (StgPtr)args;
193 switch (fun_info->f.fun_type) {
194 case ARG_GEN:
195 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
196 size = BITMAP_SIZE(fun_info->f.b.bitmap);
197 goto small_bitmap;
198 case ARG_GEN_BIG:
199 size = GET_FUN_LARGE_BITMAP(fun_info)->size;
200 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
201 p += size;
202 break;
203 default:
204 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
205 size = BITMAP_SIZE(stg_arg_bitmaps[fun_info->f.fun_type]);
206 small_bitmap:
207 while (size > 0) {
208 if ((bitmap & 1) == 0) {
209 evacuate((StgClosure **)p);
210 }
211 p++;
212 bitmap = bitmap >> 1;
213 size--;
214 }
215 break;
216 }
217 return p;
218 }
219
220 STATIC_INLINE StgPtr
221 scavenge_PAP_payload (StgClosure *fun, StgClosure **payload, StgWord size)
222 {
223 StgPtr p;
224 StgWord bitmap;
225 StgFunInfoTable *fun_info;
226
227 fun_info = get_fun_itbl(UNTAG_CLOSURE(fun));
228 ASSERT(fun_info->i.type != PAP);
229 p = (StgPtr)payload;
230
231 switch (fun_info->f.fun_type) {
232 case ARG_GEN:
233 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
234 goto small_bitmap;
235 case ARG_GEN_BIG:
236 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
237 p += size;
238 break;
239 case ARG_BCO:
240 scavenge_large_bitmap((StgPtr)payload, BCO_BITMAP(fun), size);
241 p += size;
242 break;
243 default:
244 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
245 small_bitmap:
246 while (size > 0) {
247 if ((bitmap & 1) == 0) {
248 evacuate((StgClosure **)p);
249 }
250 p++;
251 bitmap = bitmap >> 1;
252 size--;
253 }
254 break;
255 }
256 return p;
257 }
258
259 STATIC_INLINE StgPtr
260 scavenge_PAP (StgPAP *pap)
261 {
262 evacuate(&pap->fun);
263 return scavenge_PAP_payload (pap->fun, pap->payload, pap->n_args);
264 }
265
266 STATIC_INLINE StgPtr
267 scavenge_AP (StgAP *ap)
268 {
269 evacuate(&ap->fun);
270 return scavenge_PAP_payload (ap->fun, ap->payload, ap->n_args);
271 }
272
273 /* -----------------------------------------------------------------------------
274 Scavenge everything on the mark stack.
275
276 This is slightly different from scavenge():
277 - we don't walk linearly through the objects, so the scavenger
278 doesn't need to advance the pointer on to the next object.
279 -------------------------------------------------------------------------- */
280
281 static void
282 scavenge_mark_stack(void)
283 {
284 StgPtr p, q;
285 StgInfoTable *info;
286 step *saved_evac_step;
287
288 gct->evac_step = &oldest_gen->steps[0];
289 saved_evac_step = gct->evac_step;
290
291 linear_scan:
292 while (!mark_stack_empty()) {
293 p = pop_mark_stack();
294
295 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
296 info = get_itbl((StgClosure *)p);
297
298 q = p;
299 switch (((volatile StgWord *)info)[1] & 0xffff) {
300
301 case MVAR_CLEAN:
302 case MVAR_DIRTY:
303 {
304 rtsBool saved_eager_promotion = gct->eager_promotion;
305
306 StgMVar *mvar = ((StgMVar *)p);
307 gct->eager_promotion = rtsFalse;
308 evacuate((StgClosure **)&mvar->head);
309 evacuate((StgClosure **)&mvar->tail);
310 evacuate((StgClosure **)&mvar->value);
311 gct->eager_promotion = saved_eager_promotion;
312
313 if (gct->failed_to_evac) {
314 mvar->header.info = &stg_MVAR_DIRTY_info;
315 } else {
316 mvar->header.info = &stg_MVAR_CLEAN_info;
317 }
318 break;
319 }
320
321 case FUN_2_0:
322 scavenge_fun_srt(info);
323 evacuate(&((StgClosure *)p)->payload[1]);
324 evacuate(&((StgClosure *)p)->payload[0]);
325 break;
326
327 case THUNK_2_0:
328 scavenge_thunk_srt(info);
329 evacuate(&((StgThunk *)p)->payload[1]);
330 evacuate(&((StgThunk *)p)->payload[0]);
331 break;
332
333 case CONSTR_2_0:
334 evacuate(&((StgClosure *)p)->payload[1]);
335 evacuate(&((StgClosure *)p)->payload[0]);
336 break;
337
338 case FUN_1_0:
339 case FUN_1_1:
340 scavenge_fun_srt(info);
341 evacuate(&((StgClosure *)p)->payload[0]);
342 break;
343
344 case THUNK_1_0:
345 case THUNK_1_1:
346 scavenge_thunk_srt(info);
347 evacuate(&((StgThunk *)p)->payload[0]);
348 break;
349
350 case CONSTR_1_0:
351 case CONSTR_1_1:
352 evacuate(&((StgClosure *)p)->payload[0]);
353 break;
354
355 case FUN_0_1:
356 case FUN_0_2:
357 scavenge_fun_srt(info);
358 break;
359
360 case THUNK_0_1:
361 case THUNK_0_2:
362 scavenge_thunk_srt(info);
363 break;
364
365 case CONSTR_0_1:
366 case CONSTR_0_2:
367 break;
368
369 case FUN:
370 scavenge_fun_srt(info);
371 goto gen_obj;
372
373 case THUNK:
374 {
375 StgPtr end;
376
377 scavenge_thunk_srt(info);
378 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
379 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
380 evacuate((StgClosure **)p);
381 }
382 break;
383 }
384
385 gen_obj:
386 case CONSTR:
387 case WEAK:
388 case STABLE_NAME:
389 {
390 StgPtr end;
391
392 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
393 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
394 evacuate((StgClosure **)p);
395 }
396 break;
397 }
398
399 case BCO: {
400 StgBCO *bco = (StgBCO *)p;
401 evacuate((StgClosure **)&bco->instrs);
402 evacuate((StgClosure **)&bco->literals);
403 evacuate((StgClosure **)&bco->ptrs);
404 break;
405 }
406
407 case IND_PERM:
408 // don't need to do anything here: the only possible case
409 // is that we're in a 1-space compacting collector, with
410 // no "old" generation.
411 break;
412
413 case IND_OLDGEN:
414 case IND_OLDGEN_PERM:
415 evacuate(&((StgInd *)p)->indirectee);
416 break;
417
418 case MUT_VAR_CLEAN:
419 case MUT_VAR_DIRTY: {
420 rtsBool saved_eager_promotion = gct->eager_promotion;
421
422 gct->eager_promotion = rtsFalse;
423 evacuate(&((StgMutVar *)p)->var);
424 gct->eager_promotion = saved_eager_promotion;
425
426 if (gct->failed_to_evac) {
427 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
428 } else {
429 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
430 }
431 break;
432 }
433
434 case CAF_BLACKHOLE:
435 case SE_CAF_BLACKHOLE:
436 case SE_BLACKHOLE:
437 case BLACKHOLE:
438 case ARR_WORDS:
439 break;
440
441 case THUNK_SELECTOR:
442 {
443 StgSelector *s = (StgSelector *)p;
444 evacuate(&s->selectee);
445 break;
446 }
447
448 // A chunk of stack saved in a heap object
449 case AP_STACK:
450 {
451 StgAP_STACK *ap = (StgAP_STACK *)p;
452
453 evacuate(&ap->fun);
454 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
455 break;
456 }
457
458 case PAP:
459 scavenge_PAP((StgPAP *)p);
460 break;
461
462 case AP:
463 scavenge_AP((StgAP *)p);
464 break;
465
466 case MUT_ARR_PTRS_CLEAN:
467 case MUT_ARR_PTRS_DIRTY:
468 // follow everything
469 {
470 StgPtr next;
471 rtsBool saved_eager;
472
473 // We don't eagerly promote objects pointed to by a mutable
474 // array, but if we find the array only points to objects in
475 // the same or an older generation, we mark it "clean" and
476 // avoid traversing it during minor GCs.
477 saved_eager = gct->eager_promotion;
478 gct->eager_promotion = rtsFalse;
479 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
480 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
481 evacuate((StgClosure **)p);
482 }
483 gct->eager_promotion = saved_eager;
484
485 if (gct->failed_to_evac) {
486 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
487 } else {
488 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
489 }
490
491 gct->failed_to_evac = rtsTrue; // mutable anyhow.
492 break;
493 }
494
495 case MUT_ARR_PTRS_FROZEN:
496 case MUT_ARR_PTRS_FROZEN0:
497 // follow everything
498 {
499 StgPtr next, q = p;
500
501 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
502 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
503 evacuate((StgClosure **)p);
504 }
505
506 // If we're going to put this object on the mutable list, then
507 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
508 if (gct->failed_to_evac) {
509 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
510 } else {
511 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
512 }
513 break;
514 }
515
516 case TSO:
517 {
518 scavengeTSO((StgTSO*)p);
519 gct->failed_to_evac = rtsTrue; // always on the mutable list
520 break;
521 }
522
523 case TVAR_WATCH_QUEUE:
524 {
525 StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
526 gct->evac_step = 0;
527 evacuate((StgClosure **)&wq->closure);
528 evacuate((StgClosure **)&wq->next_queue_entry);
529 evacuate((StgClosure **)&wq->prev_queue_entry);
530 gct->evac_step = saved_evac_step;
531 gct->failed_to_evac = rtsTrue; // mutable
532 break;
533 }
534
535 case TVAR:
536 {
537 StgTVar *tvar = ((StgTVar *) p);
538 gct->evac_step = 0;
539 evacuate((StgClosure **)&tvar->current_value);
540 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
541 gct->evac_step = saved_evac_step;
542 gct->failed_to_evac = rtsTrue; // mutable
543 break;
544 }
545
546 case TREC_CHUNK:
547 {
548 StgWord i;
549 StgTRecChunk *tc = ((StgTRecChunk *) p);
550 TRecEntry *e = &(tc -> entries[0]);
551 gct->evac_step = 0;
552 evacuate((StgClosure **)&tc->prev_chunk);
553 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
554 evacuate((StgClosure **)&e->tvar);
555 evacuate((StgClosure **)&e->expected_value);
556 evacuate((StgClosure **)&e->new_value);
557 }
558 gct->evac_step = saved_evac_step;
559 gct->failed_to_evac = rtsTrue; // mutable
560 break;
561 }
562
563 case TREC_HEADER:
564 {
565 StgTRecHeader *trec = ((StgTRecHeader *) p);
566 gct->evac_step = 0;
567 evacuate((StgClosure **)&trec->enclosing_trec);
568 evacuate((StgClosure **)&trec->current_chunk);
569 evacuate((StgClosure **)&trec->invariants_to_check);
570 gct->evac_step = saved_evac_step;
571 gct->failed_to_evac = rtsTrue; // mutable
572 break;
573 }
574
575 case ATOMIC_INVARIANT:
576 {
577 StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
578 gct->evac_step = 0;
579 evacuate(&invariant->code);
580 evacuate((StgClosure **)&invariant->last_execution);
581 gct->evac_step = saved_evac_step;
582 gct->failed_to_evac = rtsTrue; // mutable
583 break;
584 }
585
586 case INVARIANT_CHECK_QUEUE:
587 {
588 StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
589 gct->evac_step = 0;
590 evacuate((StgClosure **)&queue->invariant);
591 evacuate((StgClosure **)&queue->my_execution);
592 evacuate((StgClosure **)&queue->next_queue_entry);
593 gct->evac_step = saved_evac_step;
594 gct->failed_to_evac = rtsTrue; // mutable
595 break;
596 }
597
598 default:
599 barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p",
600 info->type, p);
601 }
602
603 if (gct->failed_to_evac) {
604 gct->failed_to_evac = rtsFalse;
605 if (gct->evac_step) {
606 recordMutableGen_GC((StgClosure *)q, gct->evac_step->gen);
607 }
608 }
609
610 // mark the next bit to indicate "scavenged"
611 mark(q+1, Bdescr(q));
612
613 } // while (!mark_stack_empty())
614
615 // start a new linear scan if the mark stack overflowed at some point
616 if (mark_stack_overflowed && oldgen_scan_bd == NULL) {
617 debugTrace(DEBUG_gc, "scavenge_mark_stack: starting linear scan");
618 mark_stack_overflowed = rtsFalse;
619 oldgen_scan_bd = oldest_gen->steps[0].old_blocks;
620 oldgen_scan = oldgen_scan_bd->start;
621 }
622
623 if (oldgen_scan_bd) {
624 // push a new thing on the mark stack
625 loop:
626 // find a closure that is marked but not scavenged, and start
627 // from there.
628 while (oldgen_scan < oldgen_scan_bd->free
629 && !is_marked(oldgen_scan,oldgen_scan_bd)) {
630 oldgen_scan++;
631 }
632
633 if (oldgen_scan < oldgen_scan_bd->free) {
634
635 // already scavenged?
636 if (is_marked(oldgen_scan+1,oldgen_scan_bd)) {
637 oldgen_scan += sizeofW(StgHeader) + MIN_PAYLOAD_SIZE;
638 goto loop;
639 }
640 push_mark_stack(oldgen_scan);
641 // ToDo: bump the linear scan by the actual size of the object
642 oldgen_scan += sizeofW(StgHeader) + MIN_PAYLOAD_SIZE;
643 goto linear_scan;
644 }
645
646 oldgen_scan_bd = oldgen_scan_bd->link;
647 if (oldgen_scan_bd != NULL) {
648 oldgen_scan = oldgen_scan_bd->start;
649 goto loop;
650 }
651 }
652 }
653
654 /* -----------------------------------------------------------------------------
655 Scavenge one object.
656
657 This is used for objects that are temporarily marked as mutable
658 because they contain old-to-new generation pointers. Only certain
659 objects can have this property.
660 -------------------------------------------------------------------------- */
661
662 static rtsBool
663 scavenge_one(StgPtr p)
664 {
665 const StgInfoTable *info;
666 step *saved_evac_step = gct->evac_step;
667 rtsBool no_luck;
668
669 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
670 info = get_itbl((StgClosure *)p);
671
672 switch (info->type) {
673
674 case MVAR_CLEAN:
675 case MVAR_DIRTY:
676 {
677 rtsBool saved_eager_promotion = gct->eager_promotion;
678
679 StgMVar *mvar = ((StgMVar *)p);
680 gct->eager_promotion = rtsFalse;
681 evacuate((StgClosure **)&mvar->head);
682 evacuate((StgClosure **)&mvar->tail);
683 evacuate((StgClosure **)&mvar->value);
684 gct->eager_promotion = saved_eager_promotion;
685
686 if (gct->failed_to_evac) {
687 mvar->header.info = &stg_MVAR_DIRTY_info;
688 } else {
689 mvar->header.info = &stg_MVAR_CLEAN_info;
690 }
691 break;
692 }
693
694 case THUNK:
695 case THUNK_1_0:
696 case THUNK_0_1:
697 case THUNK_1_1:
698 case THUNK_0_2:
699 case THUNK_2_0:
700 {
701 StgPtr q, end;
702
703 end = (StgPtr)((StgThunk *)p)->payload + info->layout.payload.ptrs;
704 for (q = (StgPtr)((StgThunk *)p)->payload; q < end; q++) {
705 evacuate((StgClosure **)q);
706 }
707 break;
708 }
709
710 case FUN:
711 case FUN_1_0: // hardly worth specialising these guys
712 case FUN_0_1:
713 case FUN_1_1:
714 case FUN_0_2:
715 case FUN_2_0:
716 case CONSTR:
717 case CONSTR_1_0:
718 case CONSTR_0_1:
719 case CONSTR_1_1:
720 case CONSTR_0_2:
721 case CONSTR_2_0:
722 case WEAK:
723 case IND_PERM:
724 {
725 StgPtr q, end;
726
727 end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs;
728 for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) {
729 evacuate((StgClosure **)q);
730 }
731 break;
732 }
733
734 case MUT_VAR_CLEAN:
735 case MUT_VAR_DIRTY: {
736 StgPtr q = p;
737 rtsBool saved_eager_promotion = gct->eager_promotion;
738
739 gct->eager_promotion = rtsFalse;
740 evacuate(&((StgMutVar *)p)->var);
741 gct->eager_promotion = saved_eager_promotion;
742
743 if (gct->failed_to_evac) {
744 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
745 } else {
746 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
747 }
748 break;
749 }
750
751 case CAF_BLACKHOLE:
752 case SE_CAF_BLACKHOLE:
753 case SE_BLACKHOLE:
754 case BLACKHOLE:
755 break;
756
757 case THUNK_SELECTOR:
758 {
759 StgSelector *s = (StgSelector *)p;
760 evacuate(&s->selectee);
761 break;
762 }
763
764 case AP_STACK:
765 {
766 StgAP_STACK *ap = (StgAP_STACK *)p;
767
768 evacuate(&ap->fun);
769 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
770 p = (StgPtr)ap->payload + ap->size;
771 break;
772 }
773
774 case PAP:
775 p = scavenge_PAP((StgPAP *)p);
776 break;
777
778 case AP:
779 p = scavenge_AP((StgAP *)p);
780 break;
781
782 case ARR_WORDS:
783 // nothing to follow
784 break;
785
786 case MUT_ARR_PTRS_CLEAN:
787 case MUT_ARR_PTRS_DIRTY:
788 {
789 StgPtr next, q;
790 rtsBool saved_eager;
791
792 // We don't eagerly promote objects pointed to by a mutable
793 // array, but if we find the array only points to objects in
794 // the same or an older generation, we mark it "clean" and
795 // avoid traversing it during minor GCs.
796 saved_eager = gct->eager_promotion;
797 gct->eager_promotion = rtsFalse;
798 q = p;
799 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
800 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
801 evacuate((StgClosure **)p);
802 }
803 gct->eager_promotion = saved_eager;
804
805 if (gct->failed_to_evac) {
806 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
807 } else {
808 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
809 }
810
811 gct->failed_to_evac = rtsTrue;
812 break;
813 }
814
815 case MUT_ARR_PTRS_FROZEN:
816 case MUT_ARR_PTRS_FROZEN0:
817 {
818 // follow everything
819 StgPtr next, q=p;
820
821 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
822 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
823 evacuate((StgClosure **)p);
824 }
825
826 // If we're going to put this object on the mutable list, then
827 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
828 if (gct->failed_to_evac) {
829 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
830 } else {
831 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
832 }
833 break;
834 }
835
836 case TSO:
837 {
838 scavengeTSO((StgTSO*)p);
839 gct->failed_to_evac = rtsTrue; // always on the mutable list
840 break;
841 }
842
843 case TVAR_WATCH_QUEUE:
844 {
845 StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
846 gct->evac_step = 0;
847 evacuate((StgClosure **)&wq->closure);
848 evacuate((StgClosure **)&wq->next_queue_entry);
849 evacuate((StgClosure **)&wq->prev_queue_entry);
850 gct->evac_step = saved_evac_step;
851 gct->failed_to_evac = rtsTrue; // mutable
852 break;
853 }
854
855 case TVAR:
856 {
857 StgTVar *tvar = ((StgTVar *) p);
858 gct->evac_step = 0;
859 evacuate((StgClosure **)&tvar->current_value);
860 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
861 gct->evac_step = saved_evac_step;
862 gct->failed_to_evac = rtsTrue; // mutable
863 break;
864 }
865
866 case TREC_HEADER:
867 {
868 StgTRecHeader *trec = ((StgTRecHeader *) p);
869 gct->evac_step = 0;
870 evacuate((StgClosure **)&trec->enclosing_trec);
871 evacuate((StgClosure **)&trec->current_chunk);
872 evacuate((StgClosure **)&trec->invariants_to_check);
873 gct->evac_step = saved_evac_step;
874 gct->failed_to_evac = rtsTrue; // mutable
875 break;
876 }
877
878 case TREC_CHUNK:
879 {
880 StgWord i;
881 StgTRecChunk *tc = ((StgTRecChunk *) p);
882 TRecEntry *e = &(tc -> entries[0]);
883 gct->evac_step = 0;
884 evacuate((StgClosure **)&tc->prev_chunk);
885 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
886 evacuate((StgClosure **)&e->tvar);
887 evacuate((StgClosure **)&e->expected_value);
888 evacuate((StgClosure **)&e->new_value);
889 }
890 gct->evac_step = saved_evac_step;
891 gct->failed_to_evac = rtsTrue; // mutable
892 break;
893 }
894
895 case ATOMIC_INVARIANT:
896 {
897 StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
898 gct->evac_step = 0;
899 evacuate(&invariant->code);
900 evacuate((StgClosure **)&invariant->last_execution);
901 gct->evac_step = saved_evac_step;
902 gct->failed_to_evac = rtsTrue; // mutable
903 break;
904 }
905
906 case INVARIANT_CHECK_QUEUE:
907 {
908 StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
909 gct->evac_step = 0;
910 evacuate((StgClosure **)&queue->invariant);
911 evacuate((StgClosure **)&queue->my_execution);
912 evacuate((StgClosure **)&queue->next_queue_entry);
913 gct->evac_step = saved_evac_step;
914 gct->failed_to_evac = rtsTrue; // mutable
915 break;
916 }
917
918 case IND_OLDGEN:
919 case IND_OLDGEN_PERM:
920 case IND_STATIC:
921 {
922 /* Careful here: a THUNK can be on the mutable list because
923 * it contains pointers to young gen objects. If such a thunk
924 * is updated, the IND_OLDGEN will be added to the mutable
925 * list again, and we'll scavenge it twice. evacuate()
926 * doesn't check whether the object has already been
927 * evacuated, so we perform that check here.
928 */
929 StgClosure *q = ((StgInd *)p)->indirectee;
930 if (HEAP_ALLOCED(q) && Bdescr((StgPtr)q)->flags & BF_EVACUATED) {
931 break;
932 }
933 evacuate(&((StgInd *)p)->indirectee);
934 }
935
936 #if 0 && defined(DEBUG)
937 if (RtsFlags.DebugFlags.gc)
938 /* Debugging code to print out the size of the thing we just
939 * promoted
940 */
941 {
942 StgPtr start = gen->steps[0].scan;
943 bdescr *start_bd = gen->steps[0].scan_bd;
944 nat size = 0;
945 scavenge(&gen->steps[0]);
946 if (start_bd != gen->steps[0].scan_bd) {
947 size += (P_)BLOCK_ROUND_UP(start) - start;
948 start_bd = start_bd->link;
949 while (start_bd != gen->steps[0].scan_bd) {
950 size += BLOCK_SIZE_W;
951 start_bd = start_bd->link;
952 }
953 size += gen->steps[0].scan -
954 (P_)BLOCK_ROUND_DOWN(gen->steps[0].scan);
955 } else {
956 size = gen->steps[0].scan - start;
957 }
958 debugBelch("evac IND_OLDGEN: %ld bytes", size * sizeof(W_));
959 }
960 #endif
961 break;
962
963 default:
964 barf("scavenge_one: strange object %d", (int)(info->type));
965 }
966
967 no_luck = gct->failed_to_evac;
968 gct->failed_to_evac = rtsFalse;
969 return (no_luck);
970 }
971
972 /* -----------------------------------------------------------------------------
973 Scavenging mutable lists.
974
975 We treat the mutable list of each generation > N (i.e. all the
976 generations older than the one being collected) as roots. We also
977 remove non-mutable objects from the mutable list at this point.
978 -------------------------------------------------------------------------- */
979
980 void
981 scavenge_mutable_list(generation *gen)
982 {
983 bdescr *bd;
984 StgPtr p, q;
985
986 bd = gen->saved_mut_list;
987
988 gct->evac_step = &gen->steps[0];
989 for (; bd != NULL; bd = bd->link) {
990 for (q = bd->start; q < bd->free; q++) {
991 p = (StgPtr)*q;
992 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
993
994 #ifdef DEBUG
995 switch (get_itbl((StgClosure *)p)->type) {
996 case MUT_VAR_CLEAN:
997 barf("MUT_VAR_CLEAN on mutable list");
998 case MUT_VAR_DIRTY:
999 mutlist_MUTVARS++; break;
1000 case MUT_ARR_PTRS_CLEAN:
1001 case MUT_ARR_PTRS_DIRTY:
1002 case MUT_ARR_PTRS_FROZEN:
1003 case MUT_ARR_PTRS_FROZEN0:
1004 mutlist_MUTARRS++; break;
1005 case MVAR_CLEAN:
1006 barf("MVAR_CLEAN on mutable list");
1007 case MVAR_DIRTY:
1008 mutlist_MVARS++; break;
1009 default:
1010 mutlist_OTHERS++; break;
1011 }
1012 #endif
1013
1014 // Check whether this object is "clean", that is it
1015 // definitely doesn't point into a young generation.
1016 // Clean objects don't need to be scavenged. Some clean
1017 // objects (MUT_VAR_CLEAN) are not kept on the mutable
1018 // list at all; others, such as MUT_ARR_PTRS_CLEAN and
1019 // TSO, are always on the mutable list.
1020 //
1021 switch (get_itbl((StgClosure *)p)->type) {
1022 case MUT_ARR_PTRS_CLEAN:
1023 recordMutableGen_GC((StgClosure *)p,gen);
1024 continue;
1025 case TSO: {
1026 StgTSO *tso = (StgTSO *)p;
1027 if ((tso->flags & TSO_DIRTY) == 0) {
1028 // A clean TSO: we don't have to traverse its
1029 // stack. However, we *do* follow the link field:
1030 // we don't want to have to mark a TSO dirty just
1031 // because we put it on a different queue.
1032 if (tso->why_blocked != BlockedOnBlackHole) {
1033 evacuate((StgClosure **)&tso->link);
1034 }
1035 recordMutableGen_GC((StgClosure *)p,gen);
1036 continue;
1037 }
1038 }
1039 default:
1040 ;
1041 }
1042
1043 if (scavenge_one(p)) {
1044 // didn't manage to promote everything, so put the
1045 // object back on the list.
1046 recordMutableGen_GC((StgClosure *)p,gen);
1047 }
1048 }
1049 }
1050
1051 // free the old mut_list
1052 freeChain_sync(gen->saved_mut_list);
1053 gen->saved_mut_list = NULL;
1054 }
1055
1056 /* -----------------------------------------------------------------------------
1057 Scavenging the static objects.
1058
1059 We treat the mutable list of each generation > N (i.e. all the
1060 generations older than the one being collected) as roots. We also
1061 remove non-mutable objects from the mutable list at this point.
1062 -------------------------------------------------------------------------- */
1063
1064 static void
1065 scavenge_static(void)
1066 {
1067 StgClosure* p;
1068 const StgInfoTable *info;
1069
1070 debugTrace(DEBUG_gc, "scavenging static objects");
1071
1072 /* Always evacuate straight to the oldest generation for static
1073 * objects */
1074 gct->evac_step = &oldest_gen->steps[0];
1075
1076 /* keep going until we've scavenged all the objects on the linked
1077 list... */
1078
1079 while (1) {
1080
1081 /* get the next static object from the list. Remember, there might
1082 * be more stuff on this list after each evacuation...
1083 * (static_objects is a global)
1084 */
1085 p = gct->static_objects;
1086 if (p == END_OF_STATIC_LIST) {
1087 break;
1088 }
1089
1090 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1091 info = get_itbl(p);
1092 /*
1093 if (info->type==RBH)
1094 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
1095 */
1096 // make sure the info pointer is into text space
1097
1098 /* Take this object *off* the static_objects list,
1099 * and put it on the scavenged_static_objects list.
1100 */
1101 gct->static_objects = *STATIC_LINK(info,p);
1102 *STATIC_LINK(info,p) = gct->scavenged_static_objects;
1103 gct->scavenged_static_objects = p;
1104
1105 switch (info -> type) {
1106
1107 case IND_STATIC:
1108 {
1109 StgInd *ind = (StgInd *)p;
1110 evacuate(&ind->indirectee);
1111
1112 /* might fail to evacuate it, in which case we have to pop it
1113 * back on the mutable list of the oldest generation. We
1114 * leave it *on* the scavenged_static_objects list, though,
1115 * in case we visit this object again.
1116 */
1117 if (gct->failed_to_evac) {
1118 gct->failed_to_evac = rtsFalse;
1119 recordMutableGen_GC((StgClosure *)p,oldest_gen);
1120 }
1121 break;
1122 }
1123
1124 case THUNK_STATIC:
1125 scavenge_thunk_srt(info);
1126 break;
1127
1128 case FUN_STATIC:
1129 scavenge_fun_srt(info);
1130 break;
1131
1132 case CONSTR_STATIC:
1133 {
1134 StgPtr q, next;
1135
1136 next = (P_)p->payload + info->layout.payload.ptrs;
1137 // evacuate the pointers
1138 for (q = (P_)p->payload; q < next; q++) {
1139 evacuate((StgClosure **)q);
1140 }
1141 break;
1142 }
1143
1144 default:
1145 barf("scavenge_static: strange closure %d", (int)(info->type));
1146 }
1147
1148 ASSERT(gct->failed_to_evac == rtsFalse);
1149 }
1150 }
1151
1152 /* -----------------------------------------------------------------------------
1153 scavenge a chunk of memory described by a bitmap
1154 -------------------------------------------------------------------------- */
1155
1156 static void
1157 scavenge_large_bitmap( StgPtr p, StgLargeBitmap *large_bitmap, nat size )
1158 {
1159 nat i, b;
1160 StgWord bitmap;
1161
1162 b = 0;
1163 bitmap = large_bitmap->bitmap[b];
1164 for (i = 0; i < size; ) {
1165 if ((bitmap & 1) == 0) {
1166 evacuate((StgClosure **)p);
1167 }
1168 i++;
1169 p++;
1170 if (i % BITS_IN(W_) == 0) {
1171 b++;
1172 bitmap = large_bitmap->bitmap[b];
1173 } else {
1174 bitmap = bitmap >> 1;
1175 }
1176 }
1177 }
1178
1179 STATIC_INLINE StgPtr
1180 scavenge_small_bitmap (StgPtr p, nat size, StgWord bitmap)
1181 {
1182 while (size > 0) {
1183 if ((bitmap & 1) == 0) {
1184 evacuate((StgClosure **)p);
1185 }
1186 p++;
1187 bitmap = bitmap >> 1;
1188 size--;
1189 }
1190 return p;
1191 }
1192
1193 /* -----------------------------------------------------------------------------
1194 scavenge_stack walks over a section of stack and evacuates all the
1195 objects pointed to by it. We can use the same code for walking
1196 AP_STACK_UPDs, since these are just sections of copied stack.
1197 -------------------------------------------------------------------------- */
1198
1199 static void
1200 scavenge_stack(StgPtr p, StgPtr stack_end)
1201 {
1202 const StgRetInfoTable* info;
1203 StgWord bitmap;
1204 nat size;
1205
1206 /*
1207 * Each time around this loop, we are looking at a chunk of stack
1208 * that starts with an activation record.
1209 */
1210
1211 while (p < stack_end) {
1212 info = get_ret_itbl((StgClosure *)p);
1213
1214 switch (info->i.type) {
1215
1216 case UPDATE_FRAME:
1217 // In SMP, we can get update frames that point to indirections
1218 // when two threads evaluate the same thunk. We do attempt to
1219 // discover this situation in threadPaused(), but it's
1220 // possible that the following sequence occurs:
1221 //
1222 // A B
1223 // enter T
1224 // enter T
1225 // blackhole T
1226 // update T
1227 // GC
1228 //
1229 // Now T is an indirection, and the update frame is already
1230 // marked on A's stack, so we won't traverse it again in
1231 // threadPaused(). We could traverse the whole stack again
1232 // before GC, but that seems like overkill.
1233 //
1234 // Scavenging this update frame as normal would be disastrous;
1235 // the updatee would end up pointing to the value. So we turn
1236 // the indirection into an IND_PERM, so that evacuate will
1237 // copy the indirection into the old generation instead of
1238 // discarding it.
1239 {
1240 nat type;
1241 type = get_itbl(((StgUpdateFrame *)p)->updatee)->type;
1242 if (type == IND) {
1243 ((StgUpdateFrame *)p)->updatee->header.info =
1244 (StgInfoTable *)&stg_IND_PERM_info;
1245 } else if (type == IND_OLDGEN) {
1246 ((StgUpdateFrame *)p)->updatee->header.info =
1247 (StgInfoTable *)&stg_IND_OLDGEN_PERM_info;
1248 }
1249 evacuate(&((StgUpdateFrame *)p)->updatee);
1250 p += sizeofW(StgUpdateFrame);
1251 continue;
1252 }
1253
1254 // small bitmap (< 32 entries, or 64 on a 64-bit machine)
1255 case CATCH_STM_FRAME:
1256 case CATCH_RETRY_FRAME:
1257 case ATOMICALLY_FRAME:
1258 case STOP_FRAME:
1259 case CATCH_FRAME:
1260 case RET_SMALL:
1261 bitmap = BITMAP_BITS(info->i.layout.bitmap);
1262 size = BITMAP_SIZE(info->i.layout.bitmap);
1263 // NOTE: the payload starts immediately after the info-ptr, we
1264 // don't have an StgHeader in the same sense as a heap closure.
1265 p++;
1266 p = scavenge_small_bitmap(p, size, bitmap);
1267
1268 follow_srt:
1269 if (major_gc)
1270 scavenge_srt((StgClosure **)GET_SRT(info), info->i.srt_bitmap);
1271 continue;
1272
1273 case RET_BCO: {
1274 StgBCO *bco;
1275 nat size;
1276
1277 p++;
1278 evacuate((StgClosure **)p);
1279 bco = (StgBCO *)*p;
1280 p++;
1281 size = BCO_BITMAP_SIZE(bco);
1282 scavenge_large_bitmap(p, BCO_BITMAP(bco), size);
1283 p += size;
1284 continue;
1285 }
1286
1287 // large bitmap (> 32 entries, or > 64 on a 64-bit machine)
1288 case RET_BIG:
1289 {
1290 nat size;
1291
1292 size = GET_LARGE_BITMAP(&info->i)->size;
1293 p++;
1294 scavenge_large_bitmap(p, GET_LARGE_BITMAP(&info->i), size);
1295 p += size;
1296 // and don't forget to follow the SRT
1297 goto follow_srt;
1298 }
1299
1300 // Dynamic bitmap: the mask is stored on the stack, and
1301 // there are a number of non-pointers followed by a number
1302 // of pointers above the bitmapped area. (see StgMacros.h,
1303 // HEAP_CHK_GEN).
1304 case RET_DYN:
1305 {
1306 StgWord dyn;
1307 dyn = ((StgRetDyn *)p)->liveness;
1308
1309 // traverse the bitmap first
1310 bitmap = RET_DYN_LIVENESS(dyn);
1311 p = (P_)&((StgRetDyn *)p)->payload[0];
1312 size = RET_DYN_BITMAP_SIZE;
1313 p = scavenge_small_bitmap(p, size, bitmap);
1314
1315 // skip over the non-ptr words
1316 p += RET_DYN_NONPTRS(dyn) + RET_DYN_NONPTR_REGS_SIZE;
1317
1318 // follow the ptr words
1319 for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
1320 evacuate((StgClosure **)p);
1321 p++;
1322 }
1323 continue;
1324 }
1325
1326 case RET_FUN:
1327 {
1328 StgRetFun *ret_fun = (StgRetFun *)p;
1329 StgFunInfoTable *fun_info;
1330
1331 evacuate(&ret_fun->fun);
1332 fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun));
1333 p = scavenge_arg_block(fun_info, ret_fun->payload);
1334 goto follow_srt;
1335 }
1336
1337 default:
1338 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->i.type));
1339 }
1340 }
1341 }
1342
1343 /*-----------------------------------------------------------------------------
1344 scavenge the large object list.
1345
1346 evac_step set by caller; similar games played with evac_step as with
1347 scavenge() - see comment at the top of scavenge(). Most large
1348 objects are (repeatedly) mutable, so most of the time evac_step will
1349 be zero.
1350 --------------------------------------------------------------------------- */
1351
1352 static void
1353 scavenge_large (step_workspace *ws)
1354 {
1355 bdescr *bd;
1356 StgPtr p;
1357
1358 gct->evac_step = ws->step;
1359
1360 bd = ws->todo_large_objects;
1361
1362 for (; bd != NULL; bd = ws->todo_large_objects) {
1363
1364 // take this object *off* the large objects list and put it on
1365 // the scavenged large objects list. This is so that we can
1366 // treat new_large_objects as a stack and push new objects on
1367 // the front when evacuating.
1368 ws->todo_large_objects = bd->link;
1369
1370 ACQUIRE_SPIN_LOCK(&ws->step->sync_large_objects);
1371 dbl_link_onto(bd, &ws->step->scavenged_large_objects);
1372 ws->step->n_scavenged_large_blocks += bd->blocks;
1373 RELEASE_SPIN_LOCK(&ws->step->sync_large_objects);
1374
1375 p = bd->start;
1376 if (scavenge_one(p)) {
1377 if (ws->step->gen_no > 0) {
1378 recordMutableGen_GC((StgClosure *)p, ws->step->gen);
1379 }
1380 }
1381
1382 // stats
1383 gct->scanned += closure_sizeW((StgClosure*)p);
1384 }
1385 }
1386
1387 /* ----------------------------------------------------------------------------
1388 Scavenge a block
1389 ------------------------------------------------------------------------- */
1390
1391 #define PARALLEL_GC
1392 #include "Scav.c-inc"
1393 #undef PARALLEL_GC
1394 #include "Scav.c-inc"
1395
1396 /* ----------------------------------------------------------------------------
1397 Look for work to do.
1398
1399 We look for the oldest step that has either a todo block that can
1400 be scanned, or a block of work on the global queue that we can
1401 scan.
1402
1403 It is important to take work from the *oldest* generation that we
1404 has work available, because that minimizes the likelihood of
1405 evacuating objects into a young generation when they should have
1406 been eagerly promoted. This really does make a difference (the
1407 cacheprof benchmark is one that is affected).
1408
1409 We also want to scan the todo block if possible before grabbing
1410 work from the global queue, the reason being that we don't want to
1411 steal work from the global queue and starve other threads if there
1412 is other work we can usefully be doing.
1413 ------------------------------------------------------------------------- */
1414
1415 static rtsBool
1416 scavenge_find_work (void)
1417 {
1418 int s;
1419 step_workspace *ws;
1420 rtsBool did_something, did_anything;
1421 bdescr *bd;
1422
1423 gct->scav_find_work++;
1424
1425 did_anything = rtsFalse;
1426
1427 loop:
1428 did_something = rtsFalse;
1429 for (s = total_steps-1; s >= 0; s--) {
1430 if (s == 0 && RtsFlags.GcFlags.generations > 1) {
1431 continue;
1432 }
1433 ws = &gct->steps[s];
1434
1435 gct->scan_bd = NULL;
1436
1437 // If we have a scan block with some work to do,
1438 // scavenge everything up to the free pointer.
1439 if (ws->todo_bd->u.scan < ws->todo_free)
1440 {
1441 if (n_gc_threads == 1) {
1442 scavenge_block1(ws->todo_bd);
1443 } else {
1444 scavenge_block(ws->todo_bd);
1445 }
1446 did_something = rtsTrue;
1447 break;
1448 }
1449
1450 // If we have any large objects to scavenge, do them now.
1451 if (ws->todo_large_objects) {
1452 scavenge_large(ws);
1453 did_something = rtsTrue;
1454 break;
1455 }
1456
1457 if ((bd = grab_todo_block(ws)) != NULL) {
1458 if (n_gc_threads == 1) {
1459 scavenge_block1(bd);
1460 } else {
1461 scavenge_block(bd);
1462 }
1463 did_something = rtsTrue;
1464 break;
1465 }
1466 }
1467
1468 if (did_something) {
1469 did_anything = rtsTrue;
1470 goto loop;
1471 }
1472 // only return when there is no more work to do
1473
1474 return did_anything;
1475 }
1476
1477 /* ----------------------------------------------------------------------------
1478 Scavenge until we can't find anything more to scavenge.
1479 ------------------------------------------------------------------------- */
1480
1481 void
1482 scavenge_loop(void)
1483 {
1484 rtsBool work_to_do;
1485
1486 loop:
1487 work_to_do = rtsFalse;
1488
1489 // scavenge static objects
1490 if (major_gc && gct->static_objects != END_OF_STATIC_LIST) {
1491 IF_DEBUG(sanity, checkStaticObjects(gct->static_objects));
1492 scavenge_static();
1493 }
1494
1495 // scavenge objects in compacted generation
1496 if (mark_stack_overflowed || oldgen_scan_bd != NULL ||
1497 (mark_stack_bdescr != NULL && !mark_stack_empty())) {
1498 scavenge_mark_stack();
1499 work_to_do = rtsTrue;
1500 }
1501
1502 // Order is important here: we want to deal in full blocks as
1503 // much as possible, so go for global work in preference to
1504 // local work. Only if all the global work has been exhausted
1505 // do we start scavenging the fragments of blocks in the local
1506 // workspaces.
1507 if (scavenge_find_work()) goto loop;
1508
1509 if (work_to_do) goto loop;
1510 }
1511
1512 rtsBool
1513 any_work (void)
1514 {
1515 int s;
1516 step_workspace *ws;
1517
1518 gct->any_work++;
1519
1520 write_barrier();
1521
1522 // scavenge objects in compacted generation
1523 if (mark_stack_overflowed || oldgen_scan_bd != NULL ||
1524 (mark_stack_bdescr != NULL && !mark_stack_empty())) {
1525 return rtsTrue;
1526 }
1527
1528 // Check for global work in any step. We don't need to check for
1529 // local work, because we have already exited scavenge_loop(),
1530 // which means there is no local work for this thread.
1531 for (s = total_steps-1; s >= 0; s--) {
1532 if (s == 0 && RtsFlags.GcFlags.generations > 1) {
1533 continue;
1534 }
1535 ws = &gct->steps[s];
1536 if (ws->todo_large_objects) return rtsTrue;
1537 if (ws->step->todos) return rtsTrue;
1538 }
1539
1540 gct->no_work++;
1541
1542 return rtsFalse;
1543 }