b22f2442a175f8eb2f20f1811c034fafdec7a859
[ghc.git] / rts / sm / Scav.c
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team 1998-2006
4 *
5 * Generational garbage collector: scavenging functions
6 *
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
9 *
10 * http://hackage.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/GC
11 *
12 * ---------------------------------------------------------------------------*/
13
14 #include "Rts.h"
15 #include "RtsFlags.h"
16 #include "Storage.h"
17 #include "MBlock.h"
18 #include "GC.h"
19 #include "GCUtils.h"
20 #include "Compact.h"
21 #include "Evac.h"
22 #include "Scav.h"
23 #include "Apply.h"
24 #include "Trace.h"
25 #include "LdvProfile.h"
26 #include "Sanity.h"
27
28 static void scavenge_stack (StgPtr p, StgPtr stack_end);
29
30 static void scavenge_large_bitmap (StgPtr p,
31 StgLargeBitmap *large_bitmap,
32 nat size );
33
34
35 /* Similar to scavenge_large_bitmap(), but we don't write back the
36 * pointers we get back from evacuate().
37 */
38 static void
39 scavenge_large_srt_bitmap( StgLargeSRT *large_srt )
40 {
41 nat i, b, size;
42 StgWord bitmap;
43 StgClosure **p;
44
45 b = 0;
46 bitmap = large_srt->l.bitmap[b];
47 size = (nat)large_srt->l.size;
48 p = (StgClosure **)large_srt->srt;
49 for (i = 0; i < size; ) {
50 if ((bitmap & 1) != 0) {
51 evacuate(p);
52 }
53 i++;
54 p++;
55 if (i % BITS_IN(W_) == 0) {
56 b++;
57 bitmap = large_srt->l.bitmap[b];
58 } else {
59 bitmap = bitmap >> 1;
60 }
61 }
62 }
63
64 /* evacuate the SRT. If srt_bitmap is zero, then there isn't an
65 * srt field in the info table. That's ok, because we'll
66 * never dereference it.
67 */
68 STATIC_INLINE void
69 scavenge_srt (StgClosure **srt, nat srt_bitmap)
70 {
71 nat bitmap;
72 StgClosure **p;
73
74 bitmap = srt_bitmap;
75 p = srt;
76
77 if (bitmap == (StgHalfWord)(-1)) {
78 scavenge_large_srt_bitmap( (StgLargeSRT *)srt );
79 return;
80 }
81
82 while (bitmap != 0) {
83 if ((bitmap & 1) != 0) {
84 #if defined(__PIC__) && defined(mingw32_TARGET_OS)
85 // Special-case to handle references to closures hiding out in DLLs, since
86 // double indirections required to get at those. The code generator knows
87 // which is which when generating the SRT, so it stores the (indirect)
88 // reference to the DLL closure in the table by first adding one to it.
89 // We check for this here, and undo the addition before evacuating it.
90 //
91 // If the SRT entry hasn't got bit 0 set, the SRT entry points to a
92 // closure that's fixed at link-time, and no extra magic is required.
93 if ( (unsigned long)(*srt) & 0x1 ) {
94 evacuate(stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
95 } else {
96 evacuate(p);
97 }
98 #else
99 evacuate(p);
100 #endif
101 }
102 p++;
103 bitmap = bitmap >> 1;
104 }
105 }
106
107
108 STATIC_INLINE void
109 scavenge_thunk_srt(const StgInfoTable *info)
110 {
111 StgThunkInfoTable *thunk_info;
112
113 if (!major_gc) return;
114
115 thunk_info = itbl_to_thunk_itbl(info);
116 scavenge_srt((StgClosure **)GET_SRT(thunk_info), thunk_info->i.srt_bitmap);
117 }
118
119 STATIC_INLINE void
120 scavenge_fun_srt(const StgInfoTable *info)
121 {
122 StgFunInfoTable *fun_info;
123
124 if (!major_gc) return;
125
126 fun_info = itbl_to_fun_itbl(info);
127 scavenge_srt((StgClosure **)GET_FUN_SRT(fun_info), fun_info->i.srt_bitmap);
128 }
129
130 /* -----------------------------------------------------------------------------
131 Scavenge a TSO.
132 -------------------------------------------------------------------------- */
133
134 static void
135 scavengeTSO (StgTSO *tso)
136 {
137 rtsBool saved_eager;
138
139 if (tso->what_next == ThreadRelocated) {
140 // the only way this can happen is if the old TSO was on the
141 // mutable list. We might have other links to this defunct
142 // TSO, so we must update its link field.
143 evacuate((StgClosure**)&tso->_link);
144 return;
145 }
146
147 saved_eager = gct->eager_promotion;
148 gct->eager_promotion = rtsFalse;
149
150 if ( tso->why_blocked == BlockedOnMVar
151 || tso->why_blocked == BlockedOnBlackHole
152 || tso->why_blocked == BlockedOnException
153 ) {
154 evacuate(&tso->block_info.closure);
155 }
156 evacuate((StgClosure **)&tso->blocked_exceptions);
157
158 // We don't always chase the link field: TSOs on the blackhole
159 // queue are not automatically alive, so the link field is a
160 // "weak" pointer in that case.
161 if (tso->why_blocked != BlockedOnBlackHole) {
162 evacuate((StgClosure **)&tso->link);
163 }
164
165 // scavange current transaction record
166 evacuate((StgClosure **)&tso->trec);
167
168 // scavenge this thread's stack
169 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
170
171 if (gct->failed_to_evac) {
172 tso->flags |= TSO_DIRTY;
173 } else {
174 tso->flags &= ~TSO_DIRTY;
175 }
176
177 gct->eager_promotion = saved_eager;
178 }
179
180 /* -----------------------------------------------------------------------------
181 Blocks of function args occur on the stack (at the top) and
182 in PAPs.
183 -------------------------------------------------------------------------- */
184
185 STATIC_INLINE StgPtr
186 scavenge_arg_block (StgFunInfoTable *fun_info, StgClosure **args)
187 {
188 StgPtr p;
189 StgWord bitmap;
190 nat size;
191
192 p = (StgPtr)args;
193 switch (fun_info->f.fun_type) {
194 case ARG_GEN:
195 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
196 size = BITMAP_SIZE(fun_info->f.b.bitmap);
197 goto small_bitmap;
198 case ARG_GEN_BIG:
199 size = GET_FUN_LARGE_BITMAP(fun_info)->size;
200 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
201 p += size;
202 break;
203 default:
204 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
205 size = BITMAP_SIZE(stg_arg_bitmaps[fun_info->f.fun_type]);
206 small_bitmap:
207 while (size > 0) {
208 if ((bitmap & 1) == 0) {
209 evacuate((StgClosure **)p);
210 }
211 p++;
212 bitmap = bitmap >> 1;
213 size--;
214 }
215 break;
216 }
217 return p;
218 }
219
220 STATIC_INLINE StgPtr
221 scavenge_PAP_payload (StgClosure *fun, StgClosure **payload, StgWord size)
222 {
223 StgPtr p;
224 StgWord bitmap;
225 StgFunInfoTable *fun_info;
226
227 fun_info = get_fun_itbl(UNTAG_CLOSURE(fun));
228 ASSERT(fun_info->i.type != PAP);
229 p = (StgPtr)payload;
230
231 switch (fun_info->f.fun_type) {
232 case ARG_GEN:
233 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
234 goto small_bitmap;
235 case ARG_GEN_BIG:
236 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
237 p += size;
238 break;
239 case ARG_BCO:
240 scavenge_large_bitmap((StgPtr)payload, BCO_BITMAP(fun), size);
241 p += size;
242 break;
243 default:
244 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
245 small_bitmap:
246 while (size > 0) {
247 if ((bitmap & 1) == 0) {
248 evacuate((StgClosure **)p);
249 }
250 p++;
251 bitmap = bitmap >> 1;
252 size--;
253 }
254 break;
255 }
256 return p;
257 }
258
259 STATIC_INLINE StgPtr
260 scavenge_PAP (StgPAP *pap)
261 {
262 evacuate(&pap->fun);
263 return scavenge_PAP_payload (pap->fun, pap->payload, pap->n_args);
264 }
265
266 STATIC_INLINE StgPtr
267 scavenge_AP (StgAP *ap)
268 {
269 evacuate(&ap->fun);
270 return scavenge_PAP_payload (ap->fun, ap->payload, ap->n_args);
271 }
272
273 /* -----------------------------------------------------------------------------
274 Scavenge everything on the mark stack.
275
276 This is slightly different from scavenge():
277 - we don't walk linearly through the objects, so the scavenger
278 doesn't need to advance the pointer on to the next object.
279 -------------------------------------------------------------------------- */
280
281 static void
282 scavenge_mark_stack(void)
283 {
284 StgPtr p, q;
285 StgInfoTable *info;
286 step *saved_evac_step;
287
288 gct->evac_step = &oldest_gen->steps[0];
289 saved_evac_step = gct->evac_step;
290
291 linear_scan:
292 while (!mark_stack_empty()) {
293 p = pop_mark_stack();
294
295 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
296 info = get_itbl((StgClosure *)p);
297
298 q = p;
299 switch (((volatile StgWord *)info)[1] & 0xffff) {
300
301 case MVAR_CLEAN:
302 case MVAR_DIRTY:
303 {
304 rtsBool saved_eager_promotion = gct->eager_promotion;
305
306 StgMVar *mvar = ((StgMVar *)p);
307 gct->eager_promotion = rtsFalse;
308 evacuate((StgClosure **)&mvar->head);
309 evacuate((StgClosure **)&mvar->tail);
310 evacuate((StgClosure **)&mvar->value);
311 gct->eager_promotion = saved_eager_promotion;
312
313 if (gct->failed_to_evac) {
314 mvar->header.info = &stg_MVAR_DIRTY_info;
315 } else {
316 mvar->header.info = &stg_MVAR_CLEAN_info;
317 }
318 break;
319 }
320
321 case FUN_2_0:
322 scavenge_fun_srt(info);
323 evacuate(&((StgClosure *)p)->payload[1]);
324 evacuate(&((StgClosure *)p)->payload[0]);
325 break;
326
327 case THUNK_2_0:
328 scavenge_thunk_srt(info);
329 evacuate(&((StgThunk *)p)->payload[1]);
330 evacuate(&((StgThunk *)p)->payload[0]);
331 break;
332
333 case CONSTR_2_0:
334 evacuate(&((StgClosure *)p)->payload[1]);
335 evacuate(&((StgClosure *)p)->payload[0]);
336 break;
337
338 case FUN_1_0:
339 case FUN_1_1:
340 scavenge_fun_srt(info);
341 evacuate(&((StgClosure *)p)->payload[0]);
342 break;
343
344 case THUNK_1_0:
345 case THUNK_1_1:
346 scavenge_thunk_srt(info);
347 evacuate(&((StgThunk *)p)->payload[0]);
348 break;
349
350 case CONSTR_1_0:
351 case CONSTR_1_1:
352 evacuate(&((StgClosure *)p)->payload[0]);
353 break;
354
355 case FUN_0_1:
356 case FUN_0_2:
357 scavenge_fun_srt(info);
358 break;
359
360 case THUNK_0_1:
361 case THUNK_0_2:
362 scavenge_thunk_srt(info);
363 break;
364
365 case CONSTR_0_1:
366 case CONSTR_0_2:
367 break;
368
369 case FUN:
370 scavenge_fun_srt(info);
371 goto gen_obj;
372
373 case THUNK:
374 {
375 StgPtr end;
376
377 scavenge_thunk_srt(info);
378 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
379 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
380 evacuate((StgClosure **)p);
381 }
382 break;
383 }
384
385 gen_obj:
386 case CONSTR:
387 case WEAK:
388 case STABLE_NAME:
389 {
390 StgPtr end;
391
392 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
393 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
394 evacuate((StgClosure **)p);
395 }
396 break;
397 }
398
399 case BCO: {
400 StgBCO *bco = (StgBCO *)p;
401 evacuate((StgClosure **)&bco->instrs);
402 evacuate((StgClosure **)&bco->literals);
403 evacuate((StgClosure **)&bco->ptrs);
404 break;
405 }
406
407 case IND_PERM:
408 // don't need to do anything here: the only possible case
409 // is that we're in a 1-space compacting collector, with
410 // no "old" generation.
411 break;
412
413 case IND_OLDGEN:
414 case IND_OLDGEN_PERM:
415 evacuate(&((StgInd *)p)->indirectee);
416 break;
417
418 case MUT_VAR_CLEAN:
419 case MUT_VAR_DIRTY: {
420 rtsBool saved_eager_promotion = gct->eager_promotion;
421
422 gct->eager_promotion = rtsFalse;
423 evacuate(&((StgMutVar *)p)->var);
424 gct->eager_promotion = saved_eager_promotion;
425
426 if (gct->failed_to_evac) {
427 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
428 } else {
429 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
430 }
431 break;
432 }
433
434 case CAF_BLACKHOLE:
435 case SE_CAF_BLACKHOLE:
436 case SE_BLACKHOLE:
437 case BLACKHOLE:
438 case ARR_WORDS:
439 break;
440
441 case THUNK_SELECTOR:
442 {
443 StgSelector *s = (StgSelector *)p;
444 evacuate(&s->selectee);
445 break;
446 }
447
448 // A chunk of stack saved in a heap object
449 case AP_STACK:
450 {
451 StgAP_STACK *ap = (StgAP_STACK *)p;
452
453 evacuate(&ap->fun);
454 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
455 break;
456 }
457
458 case PAP:
459 scavenge_PAP((StgPAP *)p);
460 break;
461
462 case AP:
463 scavenge_AP((StgAP *)p);
464 break;
465
466 case MUT_ARR_PTRS_CLEAN:
467 case MUT_ARR_PTRS_DIRTY:
468 // follow everything
469 {
470 StgPtr next;
471 rtsBool saved_eager;
472
473 // We don't eagerly promote objects pointed to by a mutable
474 // array, but if we find the array only points to objects in
475 // the same or an older generation, we mark it "clean" and
476 // avoid traversing it during minor GCs.
477 saved_eager = gct->eager_promotion;
478 gct->eager_promotion = rtsFalse;
479 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
480 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
481 evacuate((StgClosure **)p);
482 }
483 gct->eager_promotion = saved_eager;
484
485 if (gct->failed_to_evac) {
486 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
487 } else {
488 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
489 }
490
491 gct->failed_to_evac = rtsTrue; // mutable anyhow.
492 break;
493 }
494
495 case MUT_ARR_PTRS_FROZEN:
496 case MUT_ARR_PTRS_FROZEN0:
497 // follow everything
498 {
499 StgPtr next, q = p;
500
501 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
502 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
503 evacuate((StgClosure **)p);
504 }
505
506 // If we're going to put this object on the mutable list, then
507 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
508 if (gct->failed_to_evac) {
509 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
510 } else {
511 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
512 }
513 break;
514 }
515
516 case TSO:
517 {
518 scavengeTSO((StgTSO*)p);
519 gct->failed_to_evac = rtsTrue; // always on the mutable list
520 break;
521 }
522
523 case TVAR_WATCH_QUEUE:
524 {
525 StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
526 gct->evac_step = 0;
527 evacuate((StgClosure **)&wq->closure);
528 evacuate((StgClosure **)&wq->next_queue_entry);
529 evacuate((StgClosure **)&wq->prev_queue_entry);
530 gct->evac_step = saved_evac_step;
531 gct->failed_to_evac = rtsTrue; // mutable
532 break;
533 }
534
535 case TVAR:
536 {
537 StgTVar *tvar = ((StgTVar *) p);
538 gct->evac_step = 0;
539 evacuate((StgClosure **)&tvar->current_value);
540 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
541 gct->evac_step = saved_evac_step;
542 gct->failed_to_evac = rtsTrue; // mutable
543 break;
544 }
545
546 case TREC_CHUNK:
547 {
548 StgWord i;
549 StgTRecChunk *tc = ((StgTRecChunk *) p);
550 TRecEntry *e = &(tc -> entries[0]);
551 gct->evac_step = 0;
552 evacuate((StgClosure **)&tc->prev_chunk);
553 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
554 evacuate((StgClosure **)&e->tvar);
555 evacuate((StgClosure **)&e->expected_value);
556 evacuate((StgClosure **)&e->new_value);
557 }
558 gct->evac_step = saved_evac_step;
559 gct->failed_to_evac = rtsTrue; // mutable
560 break;
561 }
562
563 case TREC_HEADER:
564 {
565 StgTRecHeader *trec = ((StgTRecHeader *) p);
566 gct->evac_step = 0;
567 evacuate((StgClosure **)&trec->enclosing_trec);
568 evacuate((StgClosure **)&trec->current_chunk);
569 evacuate((StgClosure **)&trec->invariants_to_check);
570 gct->evac_step = saved_evac_step;
571 gct->failed_to_evac = rtsTrue; // mutable
572 break;
573 }
574
575 case ATOMIC_INVARIANT:
576 {
577 StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
578 gct->evac_step = 0;
579 evacuate(&invariant->code);
580 evacuate((StgClosure **)&invariant->last_execution);
581 gct->evac_step = saved_evac_step;
582 gct->failed_to_evac = rtsTrue; // mutable
583 break;
584 }
585
586 case INVARIANT_CHECK_QUEUE:
587 {
588 StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
589 gct->evac_step = 0;
590 evacuate((StgClosure **)&queue->invariant);
591 evacuate((StgClosure **)&queue->my_execution);
592 evacuate((StgClosure **)&queue->next_queue_entry);
593 gct->evac_step = saved_evac_step;
594 gct->failed_to_evac = rtsTrue; // mutable
595 break;
596 }
597
598 default:
599 barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p",
600 info->type, p);
601 }
602
603 if (gct->failed_to_evac) {
604 gct->failed_to_evac = rtsFalse;
605 if (gct->evac_step) {
606 recordMutableGen_GC((StgClosure *)q, gct->evac_step->gen);
607 }
608 }
609
610 // mark the next bit to indicate "scavenged"
611 mark(q+1, Bdescr(q));
612
613 } // while (!mark_stack_empty())
614
615 // start a new linear scan if the mark stack overflowed at some point
616 if (mark_stack_overflowed && oldgen_scan_bd == NULL) {
617 debugTrace(DEBUG_gc, "scavenge_mark_stack: starting linear scan");
618 mark_stack_overflowed = rtsFalse;
619 oldgen_scan_bd = oldest_gen->steps[0].old_blocks;
620 oldgen_scan = oldgen_scan_bd->start;
621 }
622
623 if (oldgen_scan_bd) {
624 // push a new thing on the mark stack
625 loop:
626 // find a closure that is marked but not scavenged, and start
627 // from there.
628 while (oldgen_scan < oldgen_scan_bd->free
629 && !is_marked(oldgen_scan,oldgen_scan_bd)) {
630 oldgen_scan++;
631 }
632
633 if (oldgen_scan < oldgen_scan_bd->free) {
634
635 // already scavenged?
636 if (is_marked(oldgen_scan+1,oldgen_scan_bd)) {
637 oldgen_scan += sizeofW(StgHeader) + MIN_PAYLOAD_SIZE;
638 goto loop;
639 }
640 push_mark_stack(oldgen_scan);
641 // ToDo: bump the linear scan by the actual size of the object
642 oldgen_scan += sizeofW(StgHeader) + MIN_PAYLOAD_SIZE;
643 goto linear_scan;
644 }
645
646 oldgen_scan_bd = oldgen_scan_bd->link;
647 if (oldgen_scan_bd != NULL) {
648 oldgen_scan = oldgen_scan_bd->start;
649 goto loop;
650 }
651 }
652 }
653
654 /* -----------------------------------------------------------------------------
655 Scavenge one object.
656
657 This is used for objects that are temporarily marked as mutable
658 because they contain old-to-new generation pointers. Only certain
659 objects can have this property.
660 -------------------------------------------------------------------------- */
661
662 static rtsBool
663 scavenge_one(StgPtr p)
664 {
665 const StgInfoTable *info;
666 step *saved_evac_step = gct->evac_step;
667 rtsBool no_luck;
668
669 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
670 info = get_itbl((StgClosure *)p);
671
672 switch (info->type) {
673
674 case MVAR_CLEAN:
675 case MVAR_DIRTY:
676 {
677 rtsBool saved_eager_promotion = gct->eager_promotion;
678
679 StgMVar *mvar = ((StgMVar *)p);
680 gct->eager_promotion = rtsFalse;
681 evacuate((StgClosure **)&mvar->head);
682 evacuate((StgClosure **)&mvar->tail);
683 evacuate((StgClosure **)&mvar->value);
684 gct->eager_promotion = saved_eager_promotion;
685
686 if (gct->failed_to_evac) {
687 mvar->header.info = &stg_MVAR_DIRTY_info;
688 } else {
689 mvar->header.info = &stg_MVAR_CLEAN_info;
690 }
691 break;
692 }
693
694 case THUNK:
695 case THUNK_1_0:
696 case THUNK_0_1:
697 case THUNK_1_1:
698 case THUNK_0_2:
699 case THUNK_2_0:
700 {
701 StgPtr q, end;
702
703 end = (StgPtr)((StgThunk *)p)->payload + info->layout.payload.ptrs;
704 for (q = (StgPtr)((StgThunk *)p)->payload; q < end; q++) {
705 evacuate((StgClosure **)q);
706 }
707 break;
708 }
709
710 case FUN:
711 case FUN_1_0: // hardly worth specialising these guys
712 case FUN_0_1:
713 case FUN_1_1:
714 case FUN_0_2:
715 case FUN_2_0:
716 case CONSTR:
717 case CONSTR_1_0:
718 case CONSTR_0_1:
719 case CONSTR_1_1:
720 case CONSTR_0_2:
721 case CONSTR_2_0:
722 case WEAK:
723 case IND_PERM:
724 {
725 StgPtr q, end;
726
727 end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs;
728 for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) {
729 evacuate((StgClosure **)q);
730 }
731 break;
732 }
733
734 case MUT_VAR_CLEAN:
735 case MUT_VAR_DIRTY: {
736 StgPtr q = p;
737 rtsBool saved_eager_promotion = gct->eager_promotion;
738
739 gct->eager_promotion = rtsFalse;
740 evacuate(&((StgMutVar *)p)->var);
741 gct->eager_promotion = saved_eager_promotion;
742
743 if (gct->failed_to_evac) {
744 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
745 } else {
746 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
747 }
748 break;
749 }
750
751 case CAF_BLACKHOLE:
752 case SE_CAF_BLACKHOLE:
753 case SE_BLACKHOLE:
754 case BLACKHOLE:
755 break;
756
757 case THUNK_SELECTOR:
758 {
759 StgSelector *s = (StgSelector *)p;
760 evacuate(&s->selectee);
761 break;
762 }
763
764 case AP_STACK:
765 {
766 StgAP_STACK *ap = (StgAP_STACK *)p;
767
768 evacuate(&ap->fun);
769 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
770 p = (StgPtr)ap->payload + ap->size;
771 break;
772 }
773
774 case PAP:
775 p = scavenge_PAP((StgPAP *)p);
776 break;
777
778 case AP:
779 p = scavenge_AP((StgAP *)p);
780 break;
781
782 case ARR_WORDS:
783 // nothing to follow
784 break;
785
786 case MUT_ARR_PTRS_CLEAN:
787 case MUT_ARR_PTRS_DIRTY:
788 {
789 StgPtr next, q;
790 rtsBool saved_eager;
791
792 // We don't eagerly promote objects pointed to by a mutable
793 // array, but if we find the array only points to objects in
794 // the same or an older generation, we mark it "clean" and
795 // avoid traversing it during minor GCs.
796 saved_eager = gct->eager_promotion;
797 gct->eager_promotion = rtsFalse;
798 q = p;
799 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
800 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
801 evacuate((StgClosure **)p);
802 }
803 gct->eager_promotion = saved_eager;
804
805 if (gct->failed_to_evac) {
806 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
807 } else {
808 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
809 }
810
811 gct->failed_to_evac = rtsTrue;
812 break;
813 }
814
815 case MUT_ARR_PTRS_FROZEN:
816 case MUT_ARR_PTRS_FROZEN0:
817 {
818 // follow everything
819 StgPtr next, q=p;
820
821 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
822 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
823 evacuate((StgClosure **)p);
824 }
825
826 // If we're going to put this object on the mutable list, then
827 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
828 if (gct->failed_to_evac) {
829 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
830 } else {
831 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
832 }
833 break;
834 }
835
836 case TSO:
837 {
838 scavengeTSO((StgTSO*)p);
839 gct->failed_to_evac = rtsTrue; // always on the mutable list
840 break;
841 }
842
843 case TVAR_WATCH_QUEUE:
844 {
845 StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
846 gct->evac_step = 0;
847 evacuate((StgClosure **)&wq->closure);
848 evacuate((StgClosure **)&wq->next_queue_entry);
849 evacuate((StgClosure **)&wq->prev_queue_entry);
850 gct->evac_step = saved_evac_step;
851 gct->failed_to_evac = rtsTrue; // mutable
852 break;
853 }
854
855 case TVAR:
856 {
857 StgTVar *tvar = ((StgTVar *) p);
858 gct->evac_step = 0;
859 evacuate((StgClosure **)&tvar->current_value);
860 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
861 gct->evac_step = saved_evac_step;
862 gct->failed_to_evac = rtsTrue; // mutable
863 break;
864 }
865
866 case TREC_HEADER:
867 {
868 StgTRecHeader *trec = ((StgTRecHeader *) p);
869 gct->evac_step = 0;
870 evacuate((StgClosure **)&trec->enclosing_trec);
871 evacuate((StgClosure **)&trec->current_chunk);
872 evacuate((StgClosure **)&trec->invariants_to_check);
873 gct->evac_step = saved_evac_step;
874 gct->failed_to_evac = rtsTrue; // mutable
875 break;
876 }
877
878 case TREC_CHUNK:
879 {
880 StgWord i;
881 StgTRecChunk *tc = ((StgTRecChunk *) p);
882 TRecEntry *e = &(tc -> entries[0]);
883 gct->evac_step = 0;
884 evacuate((StgClosure **)&tc->prev_chunk);
885 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
886 evacuate((StgClosure **)&e->tvar);
887 evacuate((StgClosure **)&e->expected_value);
888 evacuate((StgClosure **)&e->new_value);
889 }
890 gct->evac_step = saved_evac_step;
891 gct->failed_to_evac = rtsTrue; // mutable
892 break;
893 }
894
895 case ATOMIC_INVARIANT:
896 {
897 StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
898 gct->evac_step = 0;
899 evacuate(&invariant->code);
900 evacuate((StgClosure **)&invariant->last_execution);
901 gct->evac_step = saved_evac_step;
902 gct->failed_to_evac = rtsTrue; // mutable
903 break;
904 }
905
906 case INVARIANT_CHECK_QUEUE:
907 {
908 StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
909 gct->evac_step = 0;
910 evacuate((StgClosure **)&queue->invariant);
911 evacuate((StgClosure **)&queue->my_execution);
912 evacuate((StgClosure **)&queue->next_queue_entry);
913 gct->evac_step = saved_evac_step;
914 gct->failed_to_evac = rtsTrue; // mutable
915 break;
916 }
917
918 case IND_OLDGEN:
919 case IND_OLDGEN_PERM:
920 case IND_STATIC:
921 {
922 /* Careful here: a THUNK can be on the mutable list because
923 * it contains pointers to young gen objects. If such a thunk
924 * is updated, the IND_OLDGEN will be added to the mutable
925 * list again, and we'll scavenge it twice. evacuate()
926 * doesn't check whether the object has already been
927 * evacuated, so we perform that check here.
928 */
929 StgClosure *q = ((StgInd *)p)->indirectee;
930 if (HEAP_ALLOCED(q) && Bdescr((StgPtr)q)->flags & BF_EVACUATED) {
931 break;
932 }
933 evacuate(&((StgInd *)p)->indirectee);
934 }
935
936 #if 0 && defined(DEBUG)
937 if (RtsFlags.DebugFlags.gc)
938 /* Debugging code to print out the size of the thing we just
939 * promoted
940 */
941 {
942 StgPtr start = gen->steps[0].scan;
943 bdescr *start_bd = gen->steps[0].scan_bd;
944 nat size = 0;
945 scavenge(&gen->steps[0]);
946 if (start_bd != gen->steps[0].scan_bd) {
947 size += (P_)BLOCK_ROUND_UP(start) - start;
948 start_bd = start_bd->link;
949 while (start_bd != gen->steps[0].scan_bd) {
950 size += BLOCK_SIZE_W;
951 start_bd = start_bd->link;
952 }
953 size += gen->steps[0].scan -
954 (P_)BLOCK_ROUND_DOWN(gen->steps[0].scan);
955 } else {
956 size = gen->steps[0].scan - start;
957 }
958 debugBelch("evac IND_OLDGEN: %ld bytes", size * sizeof(W_));
959 }
960 #endif
961 break;
962
963 default:
964 barf("scavenge_one: strange object %d", (int)(info->type));
965 }
966
967 no_luck = gct->failed_to_evac;
968 gct->failed_to_evac = rtsFalse;
969 return (no_luck);
970 }
971
972 /* -----------------------------------------------------------------------------
973 Scavenging mutable lists.
974
975 We treat the mutable list of each generation > N (i.e. all the
976 generations older than the one being collected) as roots. We also
977 remove non-mutable objects from the mutable list at this point.
978 -------------------------------------------------------------------------- */
979
980 void
981 scavenge_mutable_list(generation *gen)
982 {
983 bdescr *bd;
984 StgPtr p, q;
985
986 bd = gen->saved_mut_list;
987
988 gct->evac_step = &gen->steps[0];
989 for (; bd != NULL; bd = bd->link) {
990 for (q = bd->start; q < bd->free; q++) {
991 p = (StgPtr)*q;
992 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
993
994 #ifdef DEBUG
995 switch (get_itbl((StgClosure *)p)->type) {
996 case MUT_VAR_CLEAN:
997 barf("MUT_VAR_CLEAN on mutable list");
998 case MUT_VAR_DIRTY:
999 mutlist_MUTVARS++; break;
1000 case MUT_ARR_PTRS_CLEAN:
1001 case MUT_ARR_PTRS_DIRTY:
1002 case MUT_ARR_PTRS_FROZEN:
1003 case MUT_ARR_PTRS_FROZEN0:
1004 mutlist_MUTARRS++; break;
1005 case MVAR_CLEAN:
1006 barf("MVAR_CLEAN on mutable list");
1007 case MVAR_DIRTY:
1008 mutlist_MVARS++; break;
1009 default:
1010 mutlist_OTHERS++; break;
1011 }
1012 #endif
1013
1014 // Check whether this object is "clean", that is it
1015 // definitely doesn't point into a young generation.
1016 // Clean objects don't need to be scavenged. Some clean
1017 // objects (MUT_VAR_CLEAN) are not kept on the mutable
1018 // list at all; others, such as MUT_ARR_PTRS_CLEAN and
1019 // TSO, are always on the mutable list.
1020 //
1021 switch (get_itbl((StgClosure *)p)->type) {
1022 case MUT_ARR_PTRS_CLEAN:
1023 recordMutableGen_GC((StgClosure *)p,gen);
1024 continue;
1025 case TSO: {
1026 StgTSO *tso = (StgTSO *)p;
1027 if ((tso->flags & TSO_DIRTY) == 0) {
1028 // A clean TSO: we don't have to traverse its
1029 // stack. However, we *do* follow the link field:
1030 // we don't want to have to mark a TSO dirty just
1031 // because we put it on a different queue.
1032 if (tso->why_blocked != BlockedOnBlackHole) {
1033 evacuate((StgClosure **)&tso->link);
1034 }
1035 recordMutableGen_GC((StgClosure *)p,gen);
1036 continue;
1037 }
1038 }
1039 default:
1040 ;
1041 }
1042
1043 if (scavenge_one(p)) {
1044 // didn't manage to promote everything, so put the
1045 // object back on the list.
1046 recordMutableGen_GC((StgClosure *)p,gen);
1047 }
1048 }
1049 }
1050
1051 // free the old mut_list
1052 freeChain_sync(gen->saved_mut_list);
1053 gen->saved_mut_list = NULL;
1054 }
1055
1056 /* -----------------------------------------------------------------------------
1057 Scavenging the static objects.
1058
1059 We treat the mutable list of each generation > N (i.e. all the
1060 generations older than the one being collected) as roots. We also
1061 remove non-mutable objects from the mutable list at this point.
1062 -------------------------------------------------------------------------- */
1063
1064 static void
1065 scavenge_static(void)
1066 {
1067 StgClosure* p;
1068 const StgInfoTable *info;
1069
1070 /* Always evacuate straight to the oldest generation for static
1071 * objects */
1072 gct->evac_step = &oldest_gen->steps[0];
1073
1074 /* keep going until we've scavenged all the objects on the linked
1075 list... */
1076
1077 while (1) {
1078
1079 ACQUIRE_SPIN_LOCK(&static_objects_sync);
1080
1081 /* get the next static object from the list. Remember, there might
1082 * be more stuff on this list after each evacuation...
1083 * (static_objects is a global)
1084 */
1085 p = static_objects;
1086 if (p == END_OF_STATIC_LIST) {
1087 RELEASE_SPIN_LOCK(&static_objects_sync);
1088 break;
1089 }
1090
1091 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1092 info = get_itbl(p);
1093 /*
1094 if (info->type==RBH)
1095 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
1096 */
1097 // make sure the info pointer is into text space
1098
1099 /* Take this object *off* the static_objects list,
1100 * and put it on the scavenged_static_objects list.
1101 */
1102 static_objects = *STATIC_LINK(info,p);
1103 *STATIC_LINK(info,p) = scavenged_static_objects;
1104 scavenged_static_objects = p;
1105
1106 RELEASE_SPIN_LOCK(&static_objects_sync);
1107
1108 switch (info -> type) {
1109
1110 case IND_STATIC:
1111 {
1112 StgInd *ind = (StgInd *)p;
1113 evacuate(&ind->indirectee);
1114
1115 /* might fail to evacuate it, in which case we have to pop it
1116 * back on the mutable list of the oldest generation. We
1117 * leave it *on* the scavenged_static_objects list, though,
1118 * in case we visit this object again.
1119 */
1120 if (gct->failed_to_evac) {
1121 gct->failed_to_evac = rtsFalse;
1122 recordMutableGen_GC((StgClosure *)p,oldest_gen);
1123 }
1124 break;
1125 }
1126
1127 case THUNK_STATIC:
1128 scavenge_thunk_srt(info);
1129 break;
1130
1131 case FUN_STATIC:
1132 scavenge_fun_srt(info);
1133 break;
1134
1135 case CONSTR_STATIC:
1136 {
1137 StgPtr q, next;
1138
1139 next = (P_)p->payload + info->layout.payload.ptrs;
1140 // evacuate the pointers
1141 for (q = (P_)p->payload; q < next; q++) {
1142 evacuate((StgClosure **)q);
1143 }
1144 break;
1145 }
1146
1147 default:
1148 barf("scavenge_static: strange closure %d", (int)(info->type));
1149 }
1150
1151 ASSERT(gct->failed_to_evac == rtsFalse);
1152 }
1153 }
1154
1155 /* -----------------------------------------------------------------------------
1156 scavenge a chunk of memory described by a bitmap
1157 -------------------------------------------------------------------------- */
1158
1159 static void
1160 scavenge_large_bitmap( StgPtr p, StgLargeBitmap *large_bitmap, nat size )
1161 {
1162 nat i, b;
1163 StgWord bitmap;
1164
1165 b = 0;
1166 bitmap = large_bitmap->bitmap[b];
1167 for (i = 0; i < size; ) {
1168 if ((bitmap & 1) == 0) {
1169 evacuate((StgClosure **)p);
1170 }
1171 i++;
1172 p++;
1173 if (i % BITS_IN(W_) == 0) {
1174 b++;
1175 bitmap = large_bitmap->bitmap[b];
1176 } else {
1177 bitmap = bitmap >> 1;
1178 }
1179 }
1180 }
1181
1182 STATIC_INLINE StgPtr
1183 scavenge_small_bitmap (StgPtr p, nat size, StgWord bitmap)
1184 {
1185 while (size > 0) {
1186 if ((bitmap & 1) == 0) {
1187 evacuate((StgClosure **)p);
1188 }
1189 p++;
1190 bitmap = bitmap >> 1;
1191 size--;
1192 }
1193 return p;
1194 }
1195
1196 /* -----------------------------------------------------------------------------
1197 scavenge_stack walks over a section of stack and evacuates all the
1198 objects pointed to by it. We can use the same code for walking
1199 AP_STACK_UPDs, since these are just sections of copied stack.
1200 -------------------------------------------------------------------------- */
1201
1202 static void
1203 scavenge_stack(StgPtr p, StgPtr stack_end)
1204 {
1205 const StgRetInfoTable* info;
1206 StgWord bitmap;
1207 nat size;
1208
1209 /*
1210 * Each time around this loop, we are looking at a chunk of stack
1211 * that starts with an activation record.
1212 */
1213
1214 while (p < stack_end) {
1215 info = get_ret_itbl((StgClosure *)p);
1216
1217 switch (info->i.type) {
1218
1219 case UPDATE_FRAME:
1220 // In SMP, we can get update frames that point to indirections
1221 // when two threads evaluate the same thunk. We do attempt to
1222 // discover this situation in threadPaused(), but it's
1223 // possible that the following sequence occurs:
1224 //
1225 // A B
1226 // enter T
1227 // enter T
1228 // blackhole T
1229 // update T
1230 // GC
1231 //
1232 // Now T is an indirection, and the update frame is already
1233 // marked on A's stack, so we won't traverse it again in
1234 // threadPaused(). We could traverse the whole stack again
1235 // before GC, but that seems like overkill.
1236 //
1237 // Scavenging this update frame as normal would be disastrous;
1238 // the updatee would end up pointing to the value. So we turn
1239 // the indirection into an IND_PERM, so that evacuate will
1240 // copy the indirection into the old generation instead of
1241 // discarding it.
1242 {
1243 nat type;
1244 type = get_itbl(((StgUpdateFrame *)p)->updatee)->type;
1245 if (type == IND) {
1246 ((StgUpdateFrame *)p)->updatee->header.info =
1247 (StgInfoTable *)&stg_IND_PERM_info;
1248 } else if (type == IND_OLDGEN) {
1249 ((StgUpdateFrame *)p)->updatee->header.info =
1250 (StgInfoTable *)&stg_IND_OLDGEN_PERM_info;
1251 }
1252 evacuate(&((StgUpdateFrame *)p)->updatee);
1253 p += sizeofW(StgUpdateFrame);
1254 continue;
1255 }
1256
1257 // small bitmap (< 32 entries, or 64 on a 64-bit machine)
1258 case CATCH_STM_FRAME:
1259 case CATCH_RETRY_FRAME:
1260 case ATOMICALLY_FRAME:
1261 case STOP_FRAME:
1262 case CATCH_FRAME:
1263 case RET_SMALL:
1264 bitmap = BITMAP_BITS(info->i.layout.bitmap);
1265 size = BITMAP_SIZE(info->i.layout.bitmap);
1266 // NOTE: the payload starts immediately after the info-ptr, we
1267 // don't have an StgHeader in the same sense as a heap closure.
1268 p++;
1269 p = scavenge_small_bitmap(p, size, bitmap);
1270
1271 follow_srt:
1272 if (major_gc)
1273 scavenge_srt((StgClosure **)GET_SRT(info), info->i.srt_bitmap);
1274 continue;
1275
1276 case RET_BCO: {
1277 StgBCO *bco;
1278 nat size;
1279
1280 p++;
1281 evacuate((StgClosure **)p);
1282 bco = (StgBCO *)*p;
1283 p++;
1284 size = BCO_BITMAP_SIZE(bco);
1285 scavenge_large_bitmap(p, BCO_BITMAP(bco), size);
1286 p += size;
1287 continue;
1288 }
1289
1290 // large bitmap (> 32 entries, or > 64 on a 64-bit machine)
1291 case RET_BIG:
1292 {
1293 nat size;
1294
1295 size = GET_LARGE_BITMAP(&info->i)->size;
1296 p++;
1297 scavenge_large_bitmap(p, GET_LARGE_BITMAP(&info->i), size);
1298 p += size;
1299 // and don't forget to follow the SRT
1300 goto follow_srt;
1301 }
1302
1303 // Dynamic bitmap: the mask is stored on the stack, and
1304 // there are a number of non-pointers followed by a number
1305 // of pointers above the bitmapped area. (see StgMacros.h,
1306 // HEAP_CHK_GEN).
1307 case RET_DYN:
1308 {
1309 StgWord dyn;
1310 dyn = ((StgRetDyn *)p)->liveness;
1311
1312 // traverse the bitmap first
1313 bitmap = RET_DYN_LIVENESS(dyn);
1314 p = (P_)&((StgRetDyn *)p)->payload[0];
1315 size = RET_DYN_BITMAP_SIZE;
1316 p = scavenge_small_bitmap(p, size, bitmap);
1317
1318 // skip over the non-ptr words
1319 p += RET_DYN_NONPTRS(dyn) + RET_DYN_NONPTR_REGS_SIZE;
1320
1321 // follow the ptr words
1322 for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
1323 evacuate((StgClosure **)p);
1324 p++;
1325 }
1326 continue;
1327 }
1328
1329 case RET_FUN:
1330 {
1331 StgRetFun *ret_fun = (StgRetFun *)p;
1332 StgFunInfoTable *fun_info;
1333
1334 evacuate(&ret_fun->fun);
1335 fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun));
1336 p = scavenge_arg_block(fun_info, ret_fun->payload);
1337 goto follow_srt;
1338 }
1339
1340 default:
1341 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->i.type));
1342 }
1343 }
1344 }
1345
1346 /*-----------------------------------------------------------------------------
1347 scavenge the large object list.
1348
1349 evac_step set by caller; similar games played with evac_step as with
1350 scavenge() - see comment at the top of scavenge(). Most large
1351 objects are (repeatedly) mutable, so most of the time evac_step will
1352 be zero.
1353 --------------------------------------------------------------------------- */
1354
1355 static void
1356 scavenge_large (step_workspace *ws)
1357 {
1358 bdescr *bd;
1359 StgPtr p;
1360
1361 gct->evac_step = ws->stp;
1362
1363 bd = ws->todo_large_objects;
1364
1365 for (; bd != NULL; bd = ws->todo_large_objects) {
1366
1367 // take this object *off* the large objects list and put it on
1368 // the scavenged large objects list. This is so that we can
1369 // treat new_large_objects as a stack and push new objects on
1370 // the front when evacuating.
1371 ws->todo_large_objects = bd->link;
1372
1373 ACQUIRE_SPIN_LOCK(&ws->stp->sync_large_objects);
1374 dbl_link_onto(bd, &ws->stp->scavenged_large_objects);
1375 ws->stp->n_scavenged_large_blocks += bd->blocks;
1376 RELEASE_SPIN_LOCK(&ws->stp->sync_large_objects);
1377
1378 p = bd->start;
1379 if (scavenge_one(p)) {
1380 if (ws->stp->gen_no > 0) {
1381 recordMutableGen_GC((StgClosure *)p, ws->stp->gen);
1382 }
1383 }
1384 }
1385 }
1386
1387 /* ----------------------------------------------------------------------------
1388 Scavenge a block
1389 ------------------------------------------------------------------------- */
1390
1391 #define MINOR_GC
1392 #include "Scav.c-inc"
1393 #undef MINOR_GC
1394 #include "Scav.c-inc"
1395
1396 /* ----------------------------------------------------------------------------
1397 Find the oldest full block to scavenge, and scavenge it.
1398 ------------------------------------------------------------------------- */
1399
1400 static rtsBool
1401 scavenge_find_global_work (void)
1402 {
1403 bdescr *bd;
1404 int s;
1405 rtsBool flag;
1406 step_workspace *ws;
1407
1408 flag = rtsFalse;
1409 for (s = total_steps-1; s>=0; s--)
1410 {
1411 if (s == 0 && RtsFlags.GcFlags.generations > 1) {
1412 continue;
1413 }
1414 ws = &gct->steps[s];
1415
1416 // If we have any large objects to scavenge, do them now.
1417 if (ws->todo_large_objects) {
1418 scavenge_large(ws);
1419 flag = rtsTrue;
1420 }
1421
1422 if ((bd = grab_todo_block(ws)) != NULL) {
1423 // no need to assign this to ws->scan_bd, we're going
1424 // to scavenge the whole thing and then push it on
1425 // our scavd list. This saves pushing out the
1426 // scan_bd block, which might be partial.
1427 if (N == 0) {
1428 scavenge_block0(bd, bd->start);
1429 } else {
1430 scavenge_block(bd, bd->start);
1431 }
1432 push_scan_block(bd, ws);
1433 return rtsTrue;
1434 }
1435
1436 if (flag) return rtsTrue;
1437 }
1438 return rtsFalse;
1439 }
1440
1441 /* ----------------------------------------------------------------------------
1442 Look for local work to do.
1443
1444 We can have outstanding scavenging to do if, for any of the workspaces,
1445
1446 - the scan block is the same as the todo block, and new objects
1447 have been evacuated to the todo block.
1448
1449 - the scan block *was* the same as the todo block, but the todo
1450 block filled up and a new one has been allocated.
1451 ------------------------------------------------------------------------- */
1452
1453 static rtsBool
1454 scavenge_find_local_work (void)
1455 {
1456 int s;
1457 step_workspace *ws;
1458 rtsBool flag;
1459
1460 flag = rtsFalse;
1461 for (s = total_steps-1; s >= 0; s--) {
1462 if (s == 0 && RtsFlags.GcFlags.generations > 1) {
1463 continue;
1464 }
1465 ws = &gct->steps[s];
1466
1467 if (ws->todo_bd != NULL)
1468 {
1469 ws->todo_bd->free = ws->todo_free;
1470 }
1471
1472 // If we have a todo block and no scan block, start
1473 // scanning the todo block.
1474 if (ws->scan_bd == NULL && ws->todo_bd != NULL)
1475 {
1476 ws->scan_bd = ws->todo_bd;
1477 ws->scan = ws->scan_bd->start;
1478 }
1479
1480 // If we have a scan block with some work to do,
1481 // scavenge everything up to the free pointer.
1482 if (ws->scan != NULL && ws->scan < ws->scan_bd->free)
1483 {
1484 if (N == 0) {
1485 scavenge_block0(ws->scan_bd, ws->scan);
1486 } else {
1487 scavenge_block(ws->scan_bd, ws->scan);
1488 }
1489 ws->scan = ws->scan_bd->free;
1490 flag = rtsTrue;
1491 }
1492
1493 if (ws->scan_bd != NULL && ws->scan == ws->scan_bd->free
1494 && ws->scan_bd != ws->todo_bd)
1495 {
1496 // we're not going to evac any more objects into
1497 // this block, so push it now.
1498 push_scan_block(ws->scan_bd, ws);
1499 ws->scan_bd = NULL;
1500 ws->scan = NULL;
1501 // we might be able to scan the todo block now. But
1502 // don't do it right away: there might be full blocks
1503 // waiting to be scanned as a result of scavenge_block above.
1504 flag = rtsTrue;
1505 }
1506
1507 if (flag) return rtsTrue;
1508 }
1509 return rtsFalse;
1510 }
1511
1512 /* ----------------------------------------------------------------------------
1513 Scavenge until we can't find anything more to scavenge.
1514 ------------------------------------------------------------------------- */
1515
1516 void
1517 scavenge_loop(void)
1518 {
1519 rtsBool work_to_do;
1520
1521 loop:
1522 work_to_do = rtsFalse;
1523
1524 // scavenge static objects
1525 if (major_gc && static_objects != END_OF_STATIC_LIST) {
1526 IF_DEBUG(sanity, checkStaticObjects(static_objects));
1527 scavenge_static();
1528 }
1529
1530 // scavenge objects in compacted generation
1531 if (mark_stack_overflowed || oldgen_scan_bd != NULL ||
1532 (mark_stack_bdescr != NULL && !mark_stack_empty())) {
1533 scavenge_mark_stack();
1534 work_to_do = rtsTrue;
1535 }
1536
1537 // Order is important here: we want to deal in full blocks as
1538 // much as possible, so go for global work in preference to
1539 // local work. Only if all the global work has been exhausted
1540 // do we start scavenging the fragments of blocks in the local
1541 // workspaces.
1542 if (scavenge_find_global_work()) goto loop;
1543 if (scavenge_find_local_work()) goto loop;
1544
1545 if (work_to_do) goto loop;
1546 }
1547
1548 rtsBool
1549 any_work (void)
1550 {
1551 int s;
1552 step_workspace *ws;
1553
1554 write_barrier();
1555
1556 // scavenge static objects
1557 if (major_gc && static_objects != END_OF_STATIC_LIST) {
1558 return rtsTrue;
1559 }
1560
1561 // scavenge objects in compacted generation
1562 if (mark_stack_overflowed || oldgen_scan_bd != NULL ||
1563 (mark_stack_bdescr != NULL && !mark_stack_empty())) {
1564 return rtsTrue;
1565 }
1566
1567 // Check for global work in any step. We don't need to check for
1568 // local work, because we have already exited scavenge_loop(),
1569 // which means there is no local work for this thread.
1570 for (s = total_steps-1; s >= 0; s--) {
1571 if (s == 0 && RtsFlags.GcFlags.generations > 1) {
1572 continue;
1573 }
1574 ws = &gct->steps[s];
1575 if (ws->todo_large_objects) return rtsTrue;
1576 if (ws->stp->todos) return rtsTrue;
1577 }
1578
1579 return rtsFalse;
1580 }