Remove CONSTR_STATIC
[ghc.git] / rts / sm / Scav.c
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team 1998-2008
4 *
5 * Generational garbage collector: scavenging functions
6 *
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
9 *
10 * http://ghc.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/GC
11 *
12 * ---------------------------------------------------------------------------*/
13
14 #include "PosixSource.h"
15 #include "Rts.h"
16
17 #include "Storage.h"
18 #include "GC.h"
19 #include "GCThread.h"
20 #include "GCUtils.h"
21 #include "Compact.h"
22 #include "MarkStack.h"
23 #include "Evac.h"
24 #include "Scav.h"
25 #include "Apply.h"
26 #include "Trace.h"
27 #include "Sanity.h"
28 #include "Capability.h"
29 #include "LdvProfile.h"
30
31 #include "sm/MarkWeak.h"
32
33 static void scavenge_stack (StgPtr p, StgPtr stack_end);
34
35 static void scavenge_large_bitmap (StgPtr p,
36 StgLargeBitmap *large_bitmap,
37 StgWord size );
38
39 #if defined(THREADED_RTS) && !defined(PARALLEL_GC)
40 # define evacuate(a) evacuate1(a)
41 # define scavenge_loop(a) scavenge_loop1(a)
42 # define scavenge_block(a) scavenge_block1(a)
43 # define scavenge_mutable_list(bd,g) scavenge_mutable_list1(bd,g)
44 # define scavenge_capability_mut_lists(cap) scavenge_capability_mut_Lists1(cap)
45 #endif
46
47 /* -----------------------------------------------------------------------------
48 Scavenge a TSO.
49 -------------------------------------------------------------------------- */
50
51 static void
52 scavengeTSO (StgTSO *tso)
53 {
54 rtsBool saved_eager;
55
56 debugTrace(DEBUG_gc,"scavenging thread %d",(int)tso->id);
57
58 // update the pointer from the InCall.
59 if (tso->bound != NULL) {
60 // NB. We can't just set tso->bound->tso = tso, because this
61 // might be an invalid copy the TSO resulting from multiple
62 // threads evacuating the TSO simultaneously (see
63 // Evac.c:copy_tag()). Calling evacuate() on this pointer
64 // will ensure that we update it to point to the correct copy.
65 evacuate((StgClosure **)&tso->bound->tso);
66 }
67
68 saved_eager = gct->eager_promotion;
69 gct->eager_promotion = rtsFalse;
70
71 evacuate((StgClosure **)&tso->blocked_exceptions);
72 evacuate((StgClosure **)&tso->bq);
73
74 // scavange current transaction record
75 evacuate((StgClosure **)&tso->trec);
76
77 evacuate((StgClosure **)&tso->stackobj);
78
79 evacuate((StgClosure **)&tso->_link);
80 if ( tso->why_blocked == BlockedOnMVar
81 || tso->why_blocked == BlockedOnMVarRead
82 || tso->why_blocked == BlockedOnBlackHole
83 || tso->why_blocked == BlockedOnMsgThrowTo
84 || tso->why_blocked == NotBlocked
85 ) {
86 evacuate(&tso->block_info.closure);
87 }
88 #ifdef THREADED_RTS
89 // in the THREADED_RTS, block_info.closure must always point to a
90 // valid closure, because we assume this in throwTo(). In the
91 // non-threaded RTS it might be a FD (for
92 // BlockedOnRead/BlockedOnWrite) or a time value (BlockedOnDelay)
93 else {
94 tso->block_info.closure = (StgClosure *)END_TSO_QUEUE;
95 }
96 #endif
97
98 tso->dirty = gct->failed_to_evac;
99
100 gct->eager_promotion = saved_eager;
101 }
102
103 /* -----------------------------------------------------------------------------
104 Mutable arrays of pointers
105 -------------------------------------------------------------------------- */
106
107 static StgPtr scavenge_mut_arr_ptrs (StgMutArrPtrs *a)
108 {
109 W_ m;
110 rtsBool any_failed;
111 StgPtr p, q;
112
113 any_failed = rtsFalse;
114 p = (StgPtr)&a->payload[0];
115 for (m = 0; (int)m < (int)mutArrPtrsCards(a->ptrs) - 1; m++)
116 {
117 q = p + (1 << MUT_ARR_PTRS_CARD_BITS);
118 for (; p < q; p++) {
119 evacuate((StgClosure**)p);
120 }
121 if (gct->failed_to_evac) {
122 any_failed = rtsTrue;
123 *mutArrPtrsCard(a,m) = 1;
124 gct->failed_to_evac = rtsFalse;
125 } else {
126 *mutArrPtrsCard(a,m) = 0;
127 }
128 }
129
130 q = (StgPtr)&a->payload[a->ptrs];
131 if (p < q) {
132 for (; p < q; p++) {
133 evacuate((StgClosure**)p);
134 }
135 if (gct->failed_to_evac) {
136 any_failed = rtsTrue;
137 *mutArrPtrsCard(a,m) = 1;
138 gct->failed_to_evac = rtsFalse;
139 } else {
140 *mutArrPtrsCard(a,m) = 0;
141 }
142 }
143
144 gct->failed_to_evac = any_failed;
145 return (StgPtr)a + mut_arr_ptrs_sizeW(a);
146 }
147
148 // scavenge only the marked areas of a MUT_ARR_PTRS
149 static StgPtr scavenge_mut_arr_ptrs_marked (StgMutArrPtrs *a)
150 {
151 W_ m;
152 StgPtr p, q;
153 rtsBool any_failed;
154
155 any_failed = rtsFalse;
156 for (m = 0; m < mutArrPtrsCards(a->ptrs); m++)
157 {
158 if (*mutArrPtrsCard(a,m) != 0) {
159 p = (StgPtr)&a->payload[m << MUT_ARR_PTRS_CARD_BITS];
160 q = stg_min(p + (1 << MUT_ARR_PTRS_CARD_BITS),
161 (StgPtr)&a->payload[a->ptrs]);
162 for (; p < q; p++) {
163 evacuate((StgClosure**)p);
164 }
165 if (gct->failed_to_evac) {
166 any_failed = rtsTrue;
167 gct->failed_to_evac = rtsFalse;
168 } else {
169 *mutArrPtrsCard(a,m) = 0;
170 }
171 }
172 }
173
174 gct->failed_to_evac = any_failed;
175 return (StgPtr)a + mut_arr_ptrs_sizeW(a);
176 }
177
178 STATIC_INLINE StgPtr
179 scavenge_small_bitmap (StgPtr p, StgWord size, StgWord bitmap)
180 {
181 while (size > 0) {
182 if ((bitmap & 1) == 0) {
183 evacuate((StgClosure **)p);
184 }
185 p++;
186 bitmap = bitmap >> 1;
187 size--;
188 }
189 return p;
190 }
191
192 /* -----------------------------------------------------------------------------
193 Blocks of function args occur on the stack (at the top) and
194 in PAPs.
195 -------------------------------------------------------------------------- */
196
197 STATIC_INLINE StgPtr
198 scavenge_arg_block (const StgFunInfoTable *fun_info, StgClosure **args)
199 {
200 StgPtr p;
201 StgWord bitmap;
202 StgWord size;
203
204 p = (StgPtr)args;
205 switch (fun_info->f.fun_type) {
206 case ARG_GEN:
207 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
208 size = BITMAP_SIZE(fun_info->f.b.bitmap);
209 goto small_bitmap;
210 case ARG_GEN_BIG:
211 size = GET_FUN_LARGE_BITMAP(fun_info)->size;
212 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
213 p += size;
214 break;
215 default:
216 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
217 size = BITMAP_SIZE(stg_arg_bitmaps[fun_info->f.fun_type]);
218 small_bitmap:
219 p = scavenge_small_bitmap(p, size, bitmap);
220 break;
221 }
222 return p;
223 }
224
225 STATIC_INLINE GNUC_ATTR_HOT StgPtr
226 scavenge_PAP_payload (StgClosure *fun, StgClosure **payload, StgWord size)
227 {
228 StgPtr p;
229 StgWord bitmap;
230 const StgFunInfoTable *fun_info;
231
232 fun_info = get_fun_itbl(UNTAG_CONST_CLOSURE(fun));
233 ASSERT(fun_info->i.type != PAP);
234 p = (StgPtr)payload;
235
236 switch (fun_info->f.fun_type) {
237 case ARG_GEN:
238 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
239 goto small_bitmap;
240 case ARG_GEN_BIG:
241 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
242 p += size;
243 break;
244 case ARG_BCO:
245 scavenge_large_bitmap((StgPtr)payload, BCO_BITMAP(fun), size);
246 p += size;
247 break;
248 default:
249 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
250 small_bitmap:
251 p = scavenge_small_bitmap(p, size, bitmap);
252 break;
253 }
254 return p;
255 }
256
257 STATIC_INLINE GNUC_ATTR_HOT StgPtr
258 scavenge_PAP (StgPAP *pap)
259 {
260 evacuate(&pap->fun);
261 return scavenge_PAP_payload (pap->fun, pap->payload, pap->n_args);
262 }
263
264 STATIC_INLINE StgPtr
265 scavenge_AP (StgAP *ap)
266 {
267 evacuate(&ap->fun);
268 return scavenge_PAP_payload (ap->fun, ap->payload, ap->n_args);
269 }
270
271 /* -----------------------------------------------------------------------------
272 Scavenge SRTs
273 -------------------------------------------------------------------------- */
274
275 /* Similar to scavenge_large_bitmap(), but we don't write back the
276 * pointers we get back from evacuate().
277 */
278 static void
279 scavenge_large_srt_bitmap( StgLargeSRT *large_srt )
280 {
281 uint32_t i, j, size;
282 StgWord bitmap;
283 StgClosure **p;
284
285 size = (uint32_t)large_srt->l.size;
286 p = (StgClosure **)large_srt->srt;
287
288 for (i = 0; i < size / BITS_IN(W_); i++) {
289 bitmap = large_srt->l.bitmap[i];
290 // skip zero words: bitmaps can be very sparse, and this helps
291 // performance a lot in some cases.
292 if (bitmap != 0) {
293 for (j = 0; j < BITS_IN(W_); j++) {
294 if ((bitmap & 1) != 0) {
295 evacuate(p);
296 }
297 p++;
298 bitmap = bitmap >> 1;
299 }
300 } else {
301 p += BITS_IN(W_);
302 }
303 }
304 if (size % BITS_IN(W_) != 0) {
305 bitmap = large_srt->l.bitmap[i];
306 for (j = 0; j < size % BITS_IN(W_); j++) {
307 if ((bitmap & 1) != 0) {
308 evacuate(p);
309 }
310 p++;
311 bitmap = bitmap >> 1;
312 }
313 }
314 }
315
316 /* evacuate the SRT. If srt_bitmap is zero, then there isn't an
317 * srt field in the info table. That's ok, because we'll
318 * never dereference it.
319 */
320 STATIC_INLINE GNUC_ATTR_HOT void
321 scavenge_srt (StgClosure **srt, uint32_t srt_bitmap)
322 {
323 uint32_t bitmap;
324 StgClosure **p;
325
326 bitmap = srt_bitmap;
327 p = srt;
328
329 if (bitmap == (StgHalfWord)(-1)) {
330 scavenge_large_srt_bitmap( (StgLargeSRT *)srt );
331 return;
332 }
333
334 while (bitmap != 0) {
335 if ((bitmap & 1) != 0) {
336 #if defined(COMPILING_WINDOWS_DLL)
337 // Special-case to handle references to closures hiding out in DLLs, since
338 // double indirections required to get at those. The code generator knows
339 // which is which when generating the SRT, so it stores the (indirect)
340 // reference to the DLL closure in the table by first adding one to it.
341 // We check for this here, and undo the addition before evacuating it.
342 //
343 // If the SRT entry hasn't got bit 0 set, the SRT entry points to a
344 // closure that's fixed at link-time, and no extra magic is required.
345 if ( (W_)(*srt) & 0x1 ) {
346 evacuate( (StgClosure**) ((W_) (*srt) & ~0x1));
347 } else {
348 evacuate(p);
349 }
350 #else
351 evacuate(p);
352 #endif
353 }
354 p++;
355 bitmap = bitmap >> 1;
356 }
357 }
358
359
360 STATIC_INLINE GNUC_ATTR_HOT void
361 scavenge_thunk_srt(const StgInfoTable *info)
362 {
363 StgThunkInfoTable *thunk_info;
364 uint32_t bitmap;
365
366 if (!major_gc) return;
367
368 thunk_info = itbl_to_thunk_itbl(info);
369 bitmap = thunk_info->i.srt_bitmap;
370 if (bitmap) {
371 // don't read srt_offset if bitmap==0, because it doesn't exist
372 // and so the memory might not be readable.
373 scavenge_srt((StgClosure **)GET_SRT(thunk_info), bitmap);
374 }
375 }
376
377 STATIC_INLINE GNUC_ATTR_HOT void
378 scavenge_fun_srt(const StgInfoTable *info)
379 {
380 StgFunInfoTable *fun_info;
381 uint32_t bitmap;
382
383 if (!major_gc) return;
384
385 fun_info = itbl_to_fun_itbl(info);
386 bitmap = fun_info->i.srt_bitmap;
387 if (bitmap) {
388 // don't read srt_offset if bitmap==0, because it doesn't exist
389 // and so the memory might not be readable.
390 scavenge_srt((StgClosure **)GET_FUN_SRT(fun_info), bitmap);
391 }
392 }
393
394 /* -----------------------------------------------------------------------------
395 Scavenge a block from the given scan pointer up to bd->free.
396
397 evac_gen_no is set by the caller to be either zero (for a step in a
398 generation < N) or G where G is the generation of the step being
399 scavenged.
400
401 We sometimes temporarily change evac_gen_no back to zero if we're
402 scavenging a mutable object where eager promotion isn't such a good
403 idea.
404 -------------------------------------------------------------------------- */
405
406 static GNUC_ATTR_HOT void
407 scavenge_block (bdescr *bd)
408 {
409 StgPtr p, q;
410 const StgInfoTable *info;
411 rtsBool saved_eager_promotion;
412 gen_workspace *ws;
413
414 debugTrace(DEBUG_gc, "scavenging block %p (gen %d) @ %p",
415 bd->start, bd->gen_no, bd->u.scan);
416
417 gct->scan_bd = bd;
418 gct->evac_gen_no = bd->gen_no;
419 saved_eager_promotion = gct->eager_promotion;
420 gct->failed_to_evac = rtsFalse;
421
422 ws = &gct->gens[bd->gen->no];
423
424 p = bd->u.scan;
425
426 // we might be evacuating into the very object that we're
427 // scavenging, so we have to check the real bd->free pointer each
428 // time around the loop.
429 while (p < bd->free || (bd == ws->todo_bd && p < ws->todo_free)) {
430
431 ASSERT(bd->link == NULL);
432 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
433 info = get_itbl((StgClosure *)p);
434
435 ASSERT(gct->thunk_selector_depth == 0);
436
437 q = p;
438 switch (info->type) {
439
440 case MVAR_CLEAN:
441 case MVAR_DIRTY:
442 {
443 StgMVar *mvar = ((StgMVar *)p);
444 gct->eager_promotion = rtsFalse;
445 evacuate((StgClosure **)&mvar->head);
446 evacuate((StgClosure **)&mvar->tail);
447 evacuate((StgClosure **)&mvar->value);
448 gct->eager_promotion = saved_eager_promotion;
449
450 if (gct->failed_to_evac) {
451 mvar->header.info = &stg_MVAR_DIRTY_info;
452 } else {
453 mvar->header.info = &stg_MVAR_CLEAN_info;
454 }
455 p += sizeofW(StgMVar);
456 break;
457 }
458
459 case TVAR:
460 {
461 StgTVar *tvar = ((StgTVar *)p);
462 gct->eager_promotion = rtsFalse;
463 evacuate((StgClosure **)&tvar->current_value);
464 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
465 gct->eager_promotion = saved_eager_promotion;
466
467 if (gct->failed_to_evac) {
468 tvar->header.info = &stg_TVAR_DIRTY_info;
469 } else {
470 tvar->header.info = &stg_TVAR_CLEAN_info;
471 }
472 p += sizeofW(StgTVar);
473 break;
474 }
475
476 case FUN_2_0:
477 scavenge_fun_srt(info);
478 evacuate(&((StgClosure *)p)->payload[1]);
479 evacuate(&((StgClosure *)p)->payload[0]);
480 p += sizeofW(StgHeader) + 2;
481 break;
482
483 case THUNK_2_0:
484 scavenge_thunk_srt(info);
485 evacuate(&((StgThunk *)p)->payload[1]);
486 evacuate(&((StgThunk *)p)->payload[0]);
487 p += sizeofW(StgThunk) + 2;
488 break;
489
490 case CONSTR_2_0:
491 evacuate(&((StgClosure *)p)->payload[1]);
492 evacuate(&((StgClosure *)p)->payload[0]);
493 p += sizeofW(StgHeader) + 2;
494 break;
495
496 case THUNK_1_0:
497 scavenge_thunk_srt(info);
498 evacuate(&((StgThunk *)p)->payload[0]);
499 p += sizeofW(StgThunk) + 1;
500 break;
501
502 case FUN_1_0:
503 scavenge_fun_srt(info);
504 case CONSTR_1_0:
505 evacuate(&((StgClosure *)p)->payload[0]);
506 p += sizeofW(StgHeader) + 1;
507 break;
508
509 case THUNK_0_1:
510 scavenge_thunk_srt(info);
511 p += sizeofW(StgThunk) + 1;
512 break;
513
514 case FUN_0_1:
515 scavenge_fun_srt(info);
516 case CONSTR_0_1:
517 p += sizeofW(StgHeader) + 1;
518 break;
519
520 case THUNK_0_2:
521 scavenge_thunk_srt(info);
522 p += sizeofW(StgThunk) + 2;
523 break;
524
525 case FUN_0_2:
526 scavenge_fun_srt(info);
527 case CONSTR_0_2:
528 p += sizeofW(StgHeader) + 2;
529 break;
530
531 case THUNK_1_1:
532 scavenge_thunk_srt(info);
533 evacuate(&((StgThunk *)p)->payload[0]);
534 p += sizeofW(StgThunk) + 2;
535 break;
536
537 case FUN_1_1:
538 scavenge_fun_srt(info);
539 case CONSTR_1_1:
540 evacuate(&((StgClosure *)p)->payload[0]);
541 p += sizeofW(StgHeader) + 2;
542 break;
543
544 case FUN:
545 scavenge_fun_srt(info);
546 goto gen_obj;
547
548 case THUNK:
549 {
550 StgPtr end;
551
552 scavenge_thunk_srt(info);
553 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
554 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
555 evacuate((StgClosure **)p);
556 }
557 p += info->layout.payload.nptrs;
558 break;
559 }
560
561 gen_obj:
562 case CONSTR:
563 case CONSTR_NOCAF:
564 case WEAK:
565 case PRIM:
566 {
567 StgPtr end;
568
569 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
570 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
571 evacuate((StgClosure **)p);
572 }
573 p += info->layout.payload.nptrs;
574 break;
575 }
576
577 case BCO: {
578 StgBCO *bco = (StgBCO *)p;
579 evacuate((StgClosure **)&bco->instrs);
580 evacuate((StgClosure **)&bco->literals);
581 evacuate((StgClosure **)&bco->ptrs);
582 p += bco_sizeW(bco);
583 break;
584 }
585
586 case BLACKHOLE:
587 evacuate(&((StgInd *)p)->indirectee);
588 p += sizeofW(StgInd);
589 break;
590
591 case MUT_VAR_CLEAN:
592 case MUT_VAR_DIRTY:
593 gct->eager_promotion = rtsFalse;
594 evacuate(&((StgMutVar *)p)->var);
595 gct->eager_promotion = saved_eager_promotion;
596
597 if (gct->failed_to_evac) {
598 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
599 } else {
600 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
601 }
602 p += sizeofW(StgMutVar);
603 break;
604
605 case BLOCKING_QUEUE:
606 {
607 StgBlockingQueue *bq = (StgBlockingQueue *)p;
608
609 gct->eager_promotion = rtsFalse;
610 evacuate(&bq->bh);
611 evacuate((StgClosure**)&bq->owner);
612 evacuate((StgClosure**)&bq->queue);
613 evacuate((StgClosure**)&bq->link);
614 gct->eager_promotion = saved_eager_promotion;
615
616 if (gct->failed_to_evac) {
617 bq->header.info = &stg_BLOCKING_QUEUE_DIRTY_info;
618 } else {
619 bq->header.info = &stg_BLOCKING_QUEUE_CLEAN_info;
620 }
621 p += sizeofW(StgBlockingQueue);
622 break;
623 }
624
625 case THUNK_SELECTOR:
626 {
627 StgSelector *s = (StgSelector *)p;
628 evacuate(&s->selectee);
629 p += THUNK_SELECTOR_sizeW();
630 break;
631 }
632
633 // A chunk of stack saved in a heap object
634 case AP_STACK:
635 {
636 StgAP_STACK *ap = (StgAP_STACK *)p;
637
638 evacuate(&ap->fun);
639 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
640 p = (StgPtr)ap->payload + ap->size;
641 break;
642 }
643
644 case PAP:
645 p = scavenge_PAP((StgPAP *)p);
646 break;
647
648 case AP:
649 p = scavenge_AP((StgAP *)p);
650 break;
651
652 case ARR_WORDS:
653 // nothing to follow
654 p += arr_words_sizeW((StgArrBytes *)p);
655 break;
656
657 case MUT_ARR_PTRS_CLEAN:
658 case MUT_ARR_PTRS_DIRTY:
659 {
660 // We don't eagerly promote objects pointed to by a mutable
661 // array, but if we find the array only points to objects in
662 // the same or an older generation, we mark it "clean" and
663 // avoid traversing it during minor GCs.
664 gct->eager_promotion = rtsFalse;
665
666 p = scavenge_mut_arr_ptrs((StgMutArrPtrs*)p);
667
668 if (gct->failed_to_evac) {
669 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
670 } else {
671 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
672 }
673
674 gct->eager_promotion = saved_eager_promotion;
675 gct->failed_to_evac = rtsTrue; // always put it on the mutable list.
676 break;
677 }
678
679 case MUT_ARR_PTRS_FROZEN:
680 case MUT_ARR_PTRS_FROZEN0:
681 // follow everything
682 {
683 p = scavenge_mut_arr_ptrs((StgMutArrPtrs*)p);
684
685 // If we're going to put this object on the mutable list, then
686 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
687 if (gct->failed_to_evac) {
688 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
689 } else {
690 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
691 }
692 break;
693 }
694
695 case SMALL_MUT_ARR_PTRS_CLEAN:
696 case SMALL_MUT_ARR_PTRS_DIRTY:
697 // follow everything
698 {
699 StgPtr next;
700
701 // We don't eagerly promote objects pointed to by a mutable
702 // array, but if we find the array only points to objects in
703 // the same or an older generation, we mark it "clean" and
704 // avoid traversing it during minor GCs.
705 gct->eager_promotion = rtsFalse;
706 next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
707 for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
708 evacuate((StgClosure **)p);
709 }
710 gct->eager_promotion = saved_eager_promotion;
711
712 if (gct->failed_to_evac) {
713 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_DIRTY_info;
714 } else {
715 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_CLEAN_info;
716 }
717
718 gct->failed_to_evac = rtsTrue; // always put it on the mutable list.
719 break;
720 }
721
722 case SMALL_MUT_ARR_PTRS_FROZEN:
723 case SMALL_MUT_ARR_PTRS_FROZEN0:
724 // follow everything
725 {
726 StgPtr next;
727
728 next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
729 for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
730 evacuate((StgClosure **)p);
731 }
732
733 // If we're going to put this object on the mutable list, then
734 // set its info ptr to SMALL_MUT_ARR_PTRS_FROZEN0 to indicate that.
735 if (gct->failed_to_evac) {
736 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN0_info;
737 } else {
738 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN_info;
739 }
740 break;
741 }
742
743 case TSO:
744 {
745 scavengeTSO((StgTSO *)p);
746 p += sizeofW(StgTSO);
747 break;
748 }
749
750 case STACK:
751 {
752 StgStack *stack = (StgStack*)p;
753
754 gct->eager_promotion = rtsFalse;
755
756 scavenge_stack(stack->sp, stack->stack + stack->stack_size);
757 stack->dirty = gct->failed_to_evac;
758 p += stack_sizeW(stack);
759
760 gct->eager_promotion = saved_eager_promotion;
761 break;
762 }
763
764 case MUT_PRIM:
765 {
766 StgPtr end;
767
768 gct->eager_promotion = rtsFalse;
769
770 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
771 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
772 evacuate((StgClosure **)p);
773 }
774 p += info->layout.payload.nptrs;
775
776 gct->eager_promotion = saved_eager_promotion;
777 gct->failed_to_evac = rtsTrue; // mutable
778 break;
779 }
780
781 case TREC_CHUNK:
782 {
783 StgWord i;
784 StgTRecChunk *tc = ((StgTRecChunk *) p);
785 TRecEntry *e = &(tc -> entries[0]);
786 gct->eager_promotion = rtsFalse;
787 evacuate((StgClosure **)&tc->prev_chunk);
788 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
789 evacuate((StgClosure **)&e->tvar);
790 evacuate((StgClosure **)&e->expected_value);
791 evacuate((StgClosure **)&e->new_value);
792 }
793 gct->eager_promotion = saved_eager_promotion;
794 gct->failed_to_evac = rtsTrue; // mutable
795 p += sizeofW(StgTRecChunk);
796 break;
797 }
798
799 case COMPACT_NFDATA:
800 // CompactNFData blocks live in compact lists, which we don't
801 // scavenge, because there nothing to scavenge in them
802 // so we should never ever see them
803 barf("scavenge: found unexpected Compact structure");
804 break;
805
806 default:
807 barf("scavenge: unimplemented/strange closure type %d @ %p",
808 info->type, p);
809 }
810
811 /*
812 * We need to record the current object on the mutable list if
813 * (a) It is actually mutable, or
814 * (b) It contains pointers to a younger generation.
815 * Case (b) arises if we didn't manage to promote everything that
816 * the current object points to into the current generation.
817 */
818 if (gct->failed_to_evac) {
819 gct->failed_to_evac = rtsFalse;
820 if (bd->gen_no > 0) {
821 recordMutableGen_GC((StgClosure *)q, bd->gen_no);
822 }
823 }
824 }
825
826 if (p > bd->free) {
827 gct->copied += ws->todo_free - bd->free;
828 bd->free = p;
829 }
830
831 debugTrace(DEBUG_gc, " scavenged %ld bytes",
832 (unsigned long)((bd->free - bd->u.scan) * sizeof(W_)));
833
834 // update stats: this is a block that has been scavenged
835 gct->scanned += bd->free - bd->u.scan;
836 bd->u.scan = bd->free;
837
838 if (bd != ws->todo_bd) {
839 // we're not going to evac any more objects into
840 // this block, so push it now.
841 push_scanned_block(bd, ws);
842 }
843
844 gct->scan_bd = NULL;
845 }
846 /* -----------------------------------------------------------------------------
847 Scavenge everything on the mark stack.
848
849 This is slightly different from scavenge():
850 - we don't walk linearly through the objects, so the scavenger
851 doesn't need to advance the pointer on to the next object.
852 -------------------------------------------------------------------------- */
853
854 static void
855 scavenge_mark_stack(void)
856 {
857 StgPtr p, q;
858 const StgInfoTable *info;
859 rtsBool saved_eager_promotion;
860
861 gct->evac_gen_no = oldest_gen->no;
862 saved_eager_promotion = gct->eager_promotion;
863
864 while ((p = pop_mark_stack())) {
865
866 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
867 info = get_itbl((StgClosure *)p);
868
869 q = p;
870 switch (info->type) {
871
872 case MVAR_CLEAN:
873 case MVAR_DIRTY:
874 {
875 StgMVar *mvar = ((StgMVar *)p);
876 gct->eager_promotion = rtsFalse;
877 evacuate((StgClosure **)&mvar->head);
878 evacuate((StgClosure **)&mvar->tail);
879 evacuate((StgClosure **)&mvar->value);
880 gct->eager_promotion = saved_eager_promotion;
881
882 if (gct->failed_to_evac) {
883 mvar->header.info = &stg_MVAR_DIRTY_info;
884 } else {
885 mvar->header.info = &stg_MVAR_CLEAN_info;
886 }
887 break;
888 }
889
890 case TVAR:
891 {
892 StgTVar *tvar = ((StgTVar *)p);
893 gct->eager_promotion = rtsFalse;
894 evacuate((StgClosure **)&tvar->current_value);
895 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
896 gct->eager_promotion = saved_eager_promotion;
897
898 if (gct->failed_to_evac) {
899 tvar->header.info = &stg_TVAR_DIRTY_info;
900 } else {
901 tvar->header.info = &stg_TVAR_CLEAN_info;
902 }
903 break;
904 }
905
906 case FUN_2_0:
907 scavenge_fun_srt(info);
908 evacuate(&((StgClosure *)p)->payload[1]);
909 evacuate(&((StgClosure *)p)->payload[0]);
910 break;
911
912 case THUNK_2_0:
913 scavenge_thunk_srt(info);
914 evacuate(&((StgThunk *)p)->payload[1]);
915 evacuate(&((StgThunk *)p)->payload[0]);
916 break;
917
918 case CONSTR_2_0:
919 evacuate(&((StgClosure *)p)->payload[1]);
920 evacuate(&((StgClosure *)p)->payload[0]);
921 break;
922
923 case FUN_1_0:
924 case FUN_1_1:
925 scavenge_fun_srt(info);
926 evacuate(&((StgClosure *)p)->payload[0]);
927 break;
928
929 case THUNK_1_0:
930 case THUNK_1_1:
931 scavenge_thunk_srt(info);
932 evacuate(&((StgThunk *)p)->payload[0]);
933 break;
934
935 case CONSTR_1_0:
936 case CONSTR_1_1:
937 evacuate(&((StgClosure *)p)->payload[0]);
938 break;
939
940 case FUN_0_1:
941 case FUN_0_2:
942 scavenge_fun_srt(info);
943 break;
944
945 case THUNK_0_1:
946 case THUNK_0_2:
947 scavenge_thunk_srt(info);
948 break;
949
950 case CONSTR_0_1:
951 case CONSTR_0_2:
952 break;
953
954 case FUN:
955 scavenge_fun_srt(info);
956 goto gen_obj;
957
958 case THUNK:
959 {
960 StgPtr end;
961
962 scavenge_thunk_srt(info);
963 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
964 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
965 evacuate((StgClosure **)p);
966 }
967 break;
968 }
969
970 gen_obj:
971 case CONSTR:
972 case CONSTR_NOCAF:
973 case WEAK:
974 case PRIM:
975 {
976 StgPtr end;
977
978 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
979 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
980 evacuate((StgClosure **)p);
981 }
982 break;
983 }
984
985 case BCO: {
986 StgBCO *bco = (StgBCO *)p;
987 evacuate((StgClosure **)&bco->instrs);
988 evacuate((StgClosure **)&bco->literals);
989 evacuate((StgClosure **)&bco->ptrs);
990 break;
991 }
992
993 case IND:
994 case BLACKHOLE:
995 evacuate(&((StgInd *)p)->indirectee);
996 break;
997
998 case MUT_VAR_CLEAN:
999 case MUT_VAR_DIRTY: {
1000 gct->eager_promotion = rtsFalse;
1001 evacuate(&((StgMutVar *)p)->var);
1002 gct->eager_promotion = saved_eager_promotion;
1003
1004 if (gct->failed_to_evac) {
1005 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
1006 } else {
1007 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
1008 }
1009 break;
1010 }
1011
1012 case BLOCKING_QUEUE:
1013 {
1014 StgBlockingQueue *bq = (StgBlockingQueue *)p;
1015
1016 gct->eager_promotion = rtsFalse;
1017 evacuate(&bq->bh);
1018 evacuate((StgClosure**)&bq->owner);
1019 evacuate((StgClosure**)&bq->queue);
1020 evacuate((StgClosure**)&bq->link);
1021 gct->eager_promotion = saved_eager_promotion;
1022
1023 if (gct->failed_to_evac) {
1024 bq->header.info = &stg_BLOCKING_QUEUE_DIRTY_info;
1025 } else {
1026 bq->header.info = &stg_BLOCKING_QUEUE_CLEAN_info;
1027 }
1028 break;
1029 }
1030
1031 case ARR_WORDS:
1032 break;
1033
1034 case THUNK_SELECTOR:
1035 {
1036 StgSelector *s = (StgSelector *)p;
1037 evacuate(&s->selectee);
1038 break;
1039 }
1040
1041 // A chunk of stack saved in a heap object
1042 case AP_STACK:
1043 {
1044 StgAP_STACK *ap = (StgAP_STACK *)p;
1045
1046 evacuate(&ap->fun);
1047 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
1048 break;
1049 }
1050
1051 case PAP:
1052 scavenge_PAP((StgPAP *)p);
1053 break;
1054
1055 case AP:
1056 scavenge_AP((StgAP *)p);
1057 break;
1058
1059 case MUT_ARR_PTRS_CLEAN:
1060 case MUT_ARR_PTRS_DIRTY:
1061 // follow everything
1062 {
1063 // We don't eagerly promote objects pointed to by a mutable
1064 // array, but if we find the array only points to objects in
1065 // the same or an older generation, we mark it "clean" and
1066 // avoid traversing it during minor GCs.
1067 gct->eager_promotion = rtsFalse;
1068
1069 scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
1070
1071 if (gct->failed_to_evac) {
1072 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
1073 } else {
1074 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
1075 }
1076
1077 gct->eager_promotion = saved_eager_promotion;
1078 gct->failed_to_evac = rtsTrue; // mutable anyhow.
1079 break;
1080 }
1081
1082 case MUT_ARR_PTRS_FROZEN:
1083 case MUT_ARR_PTRS_FROZEN0:
1084 // follow everything
1085 {
1086 StgPtr q = p;
1087
1088 scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
1089
1090 // If we're going to put this object on the mutable list, then
1091 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
1092 if (gct->failed_to_evac) {
1093 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
1094 } else {
1095 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
1096 }
1097 break;
1098 }
1099
1100 case SMALL_MUT_ARR_PTRS_CLEAN:
1101 case SMALL_MUT_ARR_PTRS_DIRTY:
1102 // follow everything
1103 {
1104 StgPtr next;
1105 rtsBool saved_eager;
1106
1107 // We don't eagerly promote objects pointed to by a mutable
1108 // array, but if we find the array only points to objects in
1109 // the same or an older generation, we mark it "clean" and
1110 // avoid traversing it during minor GCs.
1111 saved_eager = gct->eager_promotion;
1112 gct->eager_promotion = rtsFalse;
1113 next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
1114 for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
1115 evacuate((StgClosure **)p);
1116 }
1117 gct->eager_promotion = saved_eager;
1118
1119 if (gct->failed_to_evac) {
1120 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_DIRTY_info;
1121 } else {
1122 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_CLEAN_info;
1123 }
1124
1125 gct->failed_to_evac = rtsTrue; // mutable anyhow.
1126 break;
1127 }
1128
1129 case SMALL_MUT_ARR_PTRS_FROZEN:
1130 case SMALL_MUT_ARR_PTRS_FROZEN0:
1131 // follow everything
1132 {
1133 StgPtr next, q = p;
1134
1135 next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
1136 for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
1137 evacuate((StgClosure **)p);
1138 }
1139
1140 // If we're going to put this object on the mutable list, then
1141 // set its info ptr to SMALL_MUT_ARR_PTRS_FROZEN0 to indicate that.
1142 if (gct->failed_to_evac) {
1143 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN0_info;
1144 } else {
1145 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN_info;
1146 }
1147 break;
1148 }
1149
1150 case TSO:
1151 {
1152 scavengeTSO((StgTSO*)p);
1153 break;
1154 }
1155
1156 case STACK:
1157 {
1158 StgStack *stack = (StgStack*)p;
1159
1160 gct->eager_promotion = rtsFalse;
1161
1162 scavenge_stack(stack->sp, stack->stack + stack->stack_size);
1163 stack->dirty = gct->failed_to_evac;
1164
1165 gct->eager_promotion = saved_eager_promotion;
1166 break;
1167 }
1168
1169 case MUT_PRIM:
1170 {
1171 StgPtr end;
1172
1173 gct->eager_promotion = rtsFalse;
1174
1175 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1176 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
1177 evacuate((StgClosure **)p);
1178 }
1179
1180 gct->eager_promotion = saved_eager_promotion;
1181 gct->failed_to_evac = rtsTrue; // mutable
1182 break;
1183 }
1184
1185 case TREC_CHUNK:
1186 {
1187 StgWord i;
1188 StgTRecChunk *tc = ((StgTRecChunk *) p);
1189 TRecEntry *e = &(tc -> entries[0]);
1190 gct->eager_promotion = rtsFalse;
1191 evacuate((StgClosure **)&tc->prev_chunk);
1192 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
1193 evacuate((StgClosure **)&e->tvar);
1194 evacuate((StgClosure **)&e->expected_value);
1195 evacuate((StgClosure **)&e->new_value);
1196 }
1197 gct->eager_promotion = saved_eager_promotion;
1198 gct->failed_to_evac = rtsTrue; // mutable
1199 break;
1200 }
1201
1202 default:
1203 barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p",
1204 info->type, p);
1205 }
1206
1207 if (gct->failed_to_evac) {
1208 gct->failed_to_evac = rtsFalse;
1209 if (gct->evac_gen_no) {
1210 recordMutableGen_GC((StgClosure *)q, gct->evac_gen_no);
1211 }
1212 }
1213 } // while (p = pop_mark_stack())
1214 }
1215
1216 /* -----------------------------------------------------------------------------
1217 Scavenge one object.
1218
1219 This is used for objects that are temporarily marked as mutable
1220 because they contain old-to-new generation pointers. Only certain
1221 objects can have this property.
1222 -------------------------------------------------------------------------- */
1223
1224 static rtsBool
1225 scavenge_one(StgPtr p)
1226 {
1227 const StgInfoTable *info;
1228 rtsBool no_luck;
1229 rtsBool saved_eager_promotion;
1230
1231 saved_eager_promotion = gct->eager_promotion;
1232
1233 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1234 info = get_itbl((StgClosure *)p);
1235
1236 switch (info->type) {
1237
1238 case MVAR_CLEAN:
1239 case MVAR_DIRTY:
1240 {
1241 StgMVar *mvar = ((StgMVar *)p);
1242 gct->eager_promotion = rtsFalse;
1243 evacuate((StgClosure **)&mvar->head);
1244 evacuate((StgClosure **)&mvar->tail);
1245 evacuate((StgClosure **)&mvar->value);
1246 gct->eager_promotion = saved_eager_promotion;
1247
1248 if (gct->failed_to_evac) {
1249 mvar->header.info = &stg_MVAR_DIRTY_info;
1250 } else {
1251 mvar->header.info = &stg_MVAR_CLEAN_info;
1252 }
1253 break;
1254 }
1255
1256 case TVAR:
1257 {
1258 StgTVar *tvar = ((StgTVar *)p);
1259 gct->eager_promotion = rtsFalse;
1260 evacuate((StgClosure **)&tvar->current_value);
1261 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
1262 gct->eager_promotion = saved_eager_promotion;
1263
1264 if (gct->failed_to_evac) {
1265 tvar->header.info = &stg_TVAR_DIRTY_info;
1266 } else {
1267 tvar->header.info = &stg_TVAR_CLEAN_info;
1268 }
1269 break;
1270 }
1271
1272 case THUNK:
1273 case THUNK_1_0:
1274 case THUNK_0_1:
1275 case THUNK_1_1:
1276 case THUNK_0_2:
1277 case THUNK_2_0:
1278 {
1279 StgPtr q, end;
1280
1281 end = (StgPtr)((StgThunk *)p)->payload + info->layout.payload.ptrs;
1282 for (q = (StgPtr)((StgThunk *)p)->payload; q < end; q++) {
1283 evacuate((StgClosure **)q);
1284 }
1285 break;
1286 }
1287
1288 case FUN:
1289 case FUN_1_0: // hardly worth specialising these guys
1290 case FUN_0_1:
1291 case FUN_1_1:
1292 case FUN_0_2:
1293 case FUN_2_0:
1294 case CONSTR:
1295 case CONSTR_NOCAF:
1296 case CONSTR_1_0:
1297 case CONSTR_0_1:
1298 case CONSTR_1_1:
1299 case CONSTR_0_2:
1300 case CONSTR_2_0:
1301 case PRIM:
1302 {
1303 StgPtr q, end;
1304
1305 end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1306 for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) {
1307 evacuate((StgClosure **)q);
1308 }
1309 break;
1310 }
1311
1312 case WEAK:
1313 // This WEAK object will not be considered by tidyWeakList during this
1314 // collection because it is in a generation >= N, but it is on the
1315 // mutable list so we must evacuate all of its pointers because some
1316 // of them may point into a younger generation.
1317 scavengeLiveWeak((StgWeak *)p);
1318 break;
1319
1320 case MUT_VAR_CLEAN:
1321 case MUT_VAR_DIRTY: {
1322 StgPtr q = p;
1323
1324 gct->eager_promotion = rtsFalse;
1325 evacuate(&((StgMutVar *)p)->var);
1326 gct->eager_promotion = saved_eager_promotion;
1327
1328 if (gct->failed_to_evac) {
1329 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
1330 } else {
1331 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
1332 }
1333 break;
1334 }
1335
1336 case BLOCKING_QUEUE:
1337 {
1338 StgBlockingQueue *bq = (StgBlockingQueue *)p;
1339
1340 gct->eager_promotion = rtsFalse;
1341 evacuate(&bq->bh);
1342 evacuate((StgClosure**)&bq->owner);
1343 evacuate((StgClosure**)&bq->queue);
1344 evacuate((StgClosure**)&bq->link);
1345 gct->eager_promotion = saved_eager_promotion;
1346
1347 if (gct->failed_to_evac) {
1348 bq->header.info = &stg_BLOCKING_QUEUE_DIRTY_info;
1349 } else {
1350 bq->header.info = &stg_BLOCKING_QUEUE_CLEAN_info;
1351 }
1352 break;
1353 }
1354
1355 case THUNK_SELECTOR:
1356 {
1357 StgSelector *s = (StgSelector *)p;
1358 evacuate(&s->selectee);
1359 break;
1360 }
1361
1362 case AP_STACK:
1363 {
1364 StgAP_STACK *ap = (StgAP_STACK *)p;
1365
1366 evacuate(&ap->fun);
1367 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
1368 p = (StgPtr)ap->payload + ap->size;
1369 break;
1370 }
1371
1372 case PAP:
1373 p = scavenge_PAP((StgPAP *)p);
1374 break;
1375
1376 case AP:
1377 p = scavenge_AP((StgAP *)p);
1378 break;
1379
1380 case ARR_WORDS:
1381 // nothing to follow
1382 break;
1383
1384 case MUT_ARR_PTRS_CLEAN:
1385 case MUT_ARR_PTRS_DIRTY:
1386 {
1387 // We don't eagerly promote objects pointed to by a mutable
1388 // array, but if we find the array only points to objects in
1389 // the same or an older generation, we mark it "clean" and
1390 // avoid traversing it during minor GCs.
1391 gct->eager_promotion = rtsFalse;
1392
1393 scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
1394
1395 if (gct->failed_to_evac) {
1396 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
1397 } else {
1398 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
1399 }
1400
1401 gct->eager_promotion = saved_eager_promotion;
1402 gct->failed_to_evac = rtsTrue;
1403 break;
1404 }
1405
1406 case MUT_ARR_PTRS_FROZEN:
1407 case MUT_ARR_PTRS_FROZEN0:
1408 {
1409 // follow everything
1410 scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
1411
1412 // If we're going to put this object on the mutable list, then
1413 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
1414 if (gct->failed_to_evac) {
1415 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
1416 } else {
1417 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
1418 }
1419 break;
1420 }
1421
1422 case SMALL_MUT_ARR_PTRS_CLEAN:
1423 case SMALL_MUT_ARR_PTRS_DIRTY:
1424 {
1425 StgPtr next, q;
1426 rtsBool saved_eager;
1427
1428 // We don't eagerly promote objects pointed to by a mutable
1429 // array, but if we find the array only points to objects in
1430 // the same or an older generation, we mark it "clean" and
1431 // avoid traversing it during minor GCs.
1432 saved_eager = gct->eager_promotion;
1433 gct->eager_promotion = rtsFalse;
1434 q = p;
1435 next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
1436 for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
1437 evacuate((StgClosure **)p);
1438 }
1439 gct->eager_promotion = saved_eager;
1440
1441 if (gct->failed_to_evac) {
1442 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_DIRTY_info;
1443 } else {
1444 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_CLEAN_info;
1445 }
1446
1447 gct->failed_to_evac = rtsTrue;
1448 break;
1449 }
1450
1451 case SMALL_MUT_ARR_PTRS_FROZEN:
1452 case SMALL_MUT_ARR_PTRS_FROZEN0:
1453 {
1454 // follow everything
1455 StgPtr next, q=p;
1456
1457 next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
1458 for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
1459 evacuate((StgClosure **)p);
1460 }
1461
1462 // If we're going to put this object on the mutable list, then
1463 // set its info ptr to SMALL_MUT_ARR_PTRS_FROZEN0 to indicate that.
1464 if (gct->failed_to_evac) {
1465 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN0_info;
1466 } else {
1467 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN_info;
1468 }
1469 break;
1470 }
1471
1472 case TSO:
1473 {
1474 scavengeTSO((StgTSO*)p);
1475 break;
1476 }
1477
1478 case STACK:
1479 {
1480 StgStack *stack = (StgStack*)p;
1481
1482 gct->eager_promotion = rtsFalse;
1483
1484 scavenge_stack(stack->sp, stack->stack + stack->stack_size);
1485 stack->dirty = gct->failed_to_evac;
1486
1487 gct->eager_promotion = saved_eager_promotion;
1488 break;
1489 }
1490
1491 case MUT_PRIM:
1492 {
1493 StgPtr end;
1494
1495 gct->eager_promotion = rtsFalse;
1496
1497 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1498 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
1499 evacuate((StgClosure **)p);
1500 }
1501
1502 gct->eager_promotion = saved_eager_promotion;
1503 gct->failed_to_evac = rtsTrue; // mutable
1504 break;
1505
1506 }
1507
1508 case TREC_CHUNK:
1509 {
1510 StgWord i;
1511 StgTRecChunk *tc = ((StgTRecChunk *) p);
1512 TRecEntry *e = &(tc -> entries[0]);
1513 gct->eager_promotion = rtsFalse;
1514 evacuate((StgClosure **)&tc->prev_chunk);
1515 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
1516 evacuate((StgClosure **)&e->tvar);
1517 evacuate((StgClosure **)&e->expected_value);
1518 evacuate((StgClosure **)&e->new_value);
1519 }
1520 gct->eager_promotion = saved_eager_promotion;
1521 gct->failed_to_evac = rtsTrue; // mutable
1522 break;
1523 }
1524
1525 case IND:
1526 // IND can happen, for example, when the interpreter allocates
1527 // a gigantic AP closure (more than one block), which ends up
1528 // on the large-object list and then gets updated. See #3424.
1529 case BLACKHOLE:
1530 case IND_STATIC:
1531 evacuate(&((StgInd *)p)->indirectee);
1532
1533 #if 0 && defined(DEBUG)
1534 if (RtsFlags.DebugFlags.gc)
1535 /* Debugging code to print out the size of the thing we just
1536 * promoted
1537 */
1538 {
1539 StgPtr start = gen->scan;
1540 bdescr *start_bd = gen->scan_bd;
1541 StgWord size = 0;
1542 scavenge(&gen);
1543 if (start_bd != gen->scan_bd) {
1544 size += (P_)BLOCK_ROUND_UP(start) - start;
1545 start_bd = start_bd->link;
1546 while (start_bd != gen->scan_bd) {
1547 size += BLOCK_SIZE_W;
1548 start_bd = start_bd->link;
1549 }
1550 size += gen->scan -
1551 (P_)BLOCK_ROUND_DOWN(gen->scan);
1552 } else {
1553 size = gen->scan - start;
1554 }
1555 debugBelch("evac IND: %ld bytes", size * sizeof(W_));
1556 }
1557 #endif
1558 break;
1559
1560 default:
1561 barf("scavenge_one: strange object %d", (int)(info->type));
1562 }
1563
1564 no_luck = gct->failed_to_evac;
1565 gct->failed_to_evac = rtsFalse;
1566 return (no_luck);
1567 }
1568
1569 /* -----------------------------------------------------------------------------
1570 Scavenging mutable lists.
1571
1572 We treat the mutable list of each generation > N (i.e. all the
1573 generations older than the one being collected) as roots. We also
1574 remove non-mutable objects from the mutable list at this point.
1575 -------------------------------------------------------------------------- */
1576
1577 static void
1578 scavenge_mutable_list(bdescr *bd, generation *gen)
1579 {
1580 StgPtr p, q;
1581 uint32_t gen_no;
1582
1583 gen_no = gen->no;
1584 gct->evac_gen_no = gen_no;
1585 for (; bd != NULL; bd = bd->link) {
1586 for (q = bd->start; q < bd->free; q++) {
1587 p = (StgPtr)*q;
1588 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1589
1590 #ifdef DEBUG
1591 switch (get_itbl((StgClosure *)p)->type) {
1592 case MUT_VAR_CLEAN:
1593 // can happen due to concurrent writeMutVars
1594 case MUT_VAR_DIRTY:
1595 mutlist_MUTVARS++; break;
1596 case MUT_ARR_PTRS_CLEAN:
1597 case MUT_ARR_PTRS_DIRTY:
1598 case MUT_ARR_PTRS_FROZEN:
1599 case MUT_ARR_PTRS_FROZEN0:
1600 mutlist_MUTARRS++; break;
1601 case MVAR_CLEAN:
1602 barf("MVAR_CLEAN on mutable list");
1603 case MVAR_DIRTY:
1604 mutlist_MVARS++; break;
1605 case TVAR:
1606 mutlist_TVAR++; break;
1607 case TREC_CHUNK:
1608 mutlist_TREC_CHUNK++; break;
1609 case MUT_PRIM:
1610 if (((StgClosure*)p)->header.info == &stg_TVAR_WATCH_QUEUE_info)
1611 mutlist_TVAR_WATCH_QUEUE++;
1612 else if (((StgClosure*)p)->header.info == &stg_TREC_HEADER_info)
1613 mutlist_TREC_HEADER++;
1614 else if (((StgClosure*)p)->header.info == &stg_ATOMIC_INVARIANT_info)
1615 mutlist_ATOMIC_INVARIANT++;
1616 else if (((StgClosure*)p)->header.info == &stg_INVARIANT_CHECK_QUEUE_info)
1617 mutlist_INVARIANT_CHECK_QUEUE++;
1618 else
1619 mutlist_OTHERS++;
1620 break;
1621 default:
1622 mutlist_OTHERS++; break;
1623 }
1624 #endif
1625
1626 // Check whether this object is "clean", that is it
1627 // definitely doesn't point into a young generation.
1628 // Clean objects don't need to be scavenged. Some clean
1629 // objects (MUT_VAR_CLEAN) are not kept on the mutable
1630 // list at all; others, such as MUT_ARR_PTRS
1631 // are always on the mutable list.
1632 //
1633 switch (get_itbl((StgClosure *)p)->type) {
1634 case MUT_ARR_PTRS_CLEAN:
1635 recordMutableGen_GC((StgClosure *)p,gen_no);
1636 continue;
1637 case MUT_ARR_PTRS_DIRTY:
1638 {
1639 rtsBool saved_eager_promotion;
1640 saved_eager_promotion = gct->eager_promotion;
1641 gct->eager_promotion = rtsFalse;
1642
1643 scavenge_mut_arr_ptrs_marked((StgMutArrPtrs *)p);
1644
1645 if (gct->failed_to_evac) {
1646 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
1647 } else {
1648 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
1649 }
1650
1651 gct->eager_promotion = saved_eager_promotion;
1652 gct->failed_to_evac = rtsFalse;
1653 recordMutableGen_GC((StgClosure *)p,gen_no);
1654 continue;
1655 }
1656 default:
1657 ;
1658 }
1659
1660 if (scavenge_one(p)) {
1661 // didn't manage to promote everything, so put the
1662 // object back on the list.
1663 recordMutableGen_GC((StgClosure *)p,gen_no);
1664 }
1665 }
1666 }
1667 }
1668
1669 void
1670 scavenge_capability_mut_lists (Capability *cap)
1671 {
1672 uint32_t g;
1673
1674 /* Mutable lists from each generation > N
1675 * we want to *scavenge* these roots, not evacuate them: they're not
1676 * going to move in this GC.
1677 * Also do them in reverse generation order, for the usual reason:
1678 * namely to reduce the likelihood of spurious old->new pointers.
1679 */
1680 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
1681 scavenge_mutable_list(cap->saved_mut_lists[g], &generations[g]);
1682 freeChain_sync(cap->saved_mut_lists[g]);
1683 cap->saved_mut_lists[g] = NULL;
1684 }
1685 }
1686
1687 /* -----------------------------------------------------------------------------
1688 Scavenging the static objects.
1689
1690 We treat the mutable list of each generation > N (i.e. all the
1691 generations older than the one being collected) as roots. We also
1692 remove non-mutable objects from the mutable list at this point.
1693 -------------------------------------------------------------------------- */
1694
1695 static void
1696 scavenge_static(void)
1697 {
1698 StgClosure *flagged_p, *p;
1699 const StgInfoTable *info;
1700
1701 debugTrace(DEBUG_gc, "scavenging static objects");
1702
1703 /* Always evacuate straight to the oldest generation for static
1704 * objects */
1705 gct->evac_gen_no = oldest_gen->no;
1706
1707 /* keep going until we've scavenged all the objects on the linked
1708 list... */
1709
1710 while (1) {
1711
1712 /* get the next static object from the list. Remember, there might
1713 * be more stuff on this list after each evacuation...
1714 * (static_objects is a global)
1715 */
1716 flagged_p = gct->static_objects;
1717 if (flagged_p == END_OF_STATIC_OBJECT_LIST) {
1718 break;
1719 }
1720 p = UNTAG_STATIC_LIST_PTR(flagged_p);
1721
1722 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1723 info = get_itbl(p);
1724 // make sure the info pointer is into text space
1725
1726 /* Take this object *off* the static_objects list,
1727 * and put it on the scavenged_static_objects list.
1728 */
1729 gct->static_objects = *STATIC_LINK(info,p);
1730 *STATIC_LINK(info,p) = gct->scavenged_static_objects;
1731 gct->scavenged_static_objects = flagged_p;
1732
1733 switch (info -> type) {
1734
1735 case IND_STATIC:
1736 {
1737 StgInd *ind = (StgInd *)p;
1738 evacuate(&ind->indirectee);
1739
1740 /* might fail to evacuate it, in which case we have to pop it
1741 * back on the mutable list of the oldest generation. We
1742 * leave it *on* the scavenged_static_objects list, though,
1743 * in case we visit this object again.
1744 */
1745 if (gct->failed_to_evac) {
1746 gct->failed_to_evac = rtsFalse;
1747 recordMutableGen_GC((StgClosure *)p,oldest_gen->no);
1748 }
1749 break;
1750 }
1751
1752 case THUNK_STATIC:
1753 scavenge_thunk_srt(info);
1754 break;
1755
1756 case FUN_STATIC:
1757 scavenge_fun_srt(info);
1758 break;
1759
1760 case CONSTR:
1761 case CONSTR_NOCAF:
1762 case CONSTR_1_0:
1763 case CONSTR_0_1:
1764 case CONSTR_2_0:
1765 case CONSTR_1_1:
1766 case CONSTR_0_2:
1767 {
1768 StgPtr q, next;
1769
1770 next = (P_)p->payload + info->layout.payload.ptrs;
1771 // evacuate the pointers
1772 for (q = (P_)p->payload; q < next; q++) {
1773 evacuate((StgClosure **)q);
1774 }
1775 break;
1776 }
1777
1778 default:
1779 barf("scavenge_static: strange closure %d", (int)(info->type));
1780 }
1781
1782 ASSERT(gct->failed_to_evac == rtsFalse);
1783 }
1784 }
1785
1786 /* -----------------------------------------------------------------------------
1787 scavenge a chunk of memory described by a bitmap
1788 -------------------------------------------------------------------------- */
1789
1790 static void
1791 scavenge_large_bitmap( StgPtr p, StgLargeBitmap *large_bitmap, StgWord size )
1792 {
1793 uint32_t i, j, b;
1794 StgWord bitmap;
1795
1796 b = 0;
1797
1798 for (i = 0; i < size; b++) {
1799 bitmap = large_bitmap->bitmap[b];
1800 j = stg_min(size-i, BITS_IN(W_));
1801 i += j;
1802 for (; j > 0; j--, p++) {
1803 if ((bitmap & 1) == 0) {
1804 evacuate((StgClosure **)p);
1805 }
1806 bitmap = bitmap >> 1;
1807 }
1808 }
1809 }
1810
1811
1812 /* -----------------------------------------------------------------------------
1813 scavenge_stack walks over a section of stack and evacuates all the
1814 objects pointed to by it. We can use the same code for walking
1815 AP_STACK_UPDs, since these are just sections of copied stack.
1816 -------------------------------------------------------------------------- */
1817
1818 static void
1819 scavenge_stack(StgPtr p, StgPtr stack_end)
1820 {
1821 const StgRetInfoTable* info;
1822 StgWord bitmap;
1823 StgWord size;
1824
1825 /*
1826 * Each time around this loop, we are looking at a chunk of stack
1827 * that starts with an activation record.
1828 */
1829
1830 while (p < stack_end) {
1831 info = get_ret_itbl((StgClosure *)p);
1832
1833 switch (info->i.type) {
1834
1835 case UPDATE_FRAME:
1836 // Note [upd-black-hole]
1837 // In SMP, we can get update frames that point to indirections
1838 // when two threads evaluate the same thunk. We do attempt to
1839 // discover this situation in threadPaused(), but it's
1840 // possible that the following sequence occurs:
1841 //
1842 // A B
1843 // enter T
1844 // enter T
1845 // blackhole T
1846 // update T
1847 // GC
1848 //
1849 // Now T is an indirection, and the update frame is already
1850 // marked on A's stack, so we won't traverse it again in
1851 // threadPaused(). We could traverse the whole stack again
1852 // before GC, but that seems like overkill.
1853 //
1854 // Scavenging this update frame as normal would be disastrous;
1855 // the updatee would end up pointing to the value. So we
1856 // check whether the value after evacuation is a BLACKHOLE,
1857 // and if not, we change the update frame to an stg_enter
1858 // frame that simply returns the value. Hence, blackholing is
1859 // compulsory (otherwise we would have to check for thunks
1860 // too).
1861 //
1862 // One slight hiccup is that the THUNK_SELECTOR machinery can
1863 // overwrite the updatee with an IND. In parallel GC, this
1864 // could even be happening concurrently, so we can't check for
1865 // the IND. Fortunately if we assume that blackholing is
1866 // happening (either lazy or eager), then we can be sure that
1867 // the updatee is never a THUNK_SELECTOR and we're ok.
1868 // NB. this is a new invariant: blackholing is not optional.
1869 {
1870 StgUpdateFrame *frame = (StgUpdateFrame *)p;
1871 StgClosure *v;
1872
1873 evacuate(&frame->updatee);
1874 v = frame->updatee;
1875 if (GET_CLOSURE_TAG(v) != 0 ||
1876 (get_itbl(v)->type != BLACKHOLE)) {
1877 // blackholing is compulsory, see above.
1878 frame->header.info = (const StgInfoTable*)&stg_enter_checkbh_info;
1879 }
1880 ASSERT(v->header.info != &stg_TSO_info);
1881 p += sizeofW(StgUpdateFrame);
1882 continue;
1883 }
1884
1885 // small bitmap (< 32 entries, or 64 on a 64-bit machine)
1886 case CATCH_STM_FRAME:
1887 case CATCH_RETRY_FRAME:
1888 case ATOMICALLY_FRAME:
1889 case UNDERFLOW_FRAME:
1890 case STOP_FRAME:
1891 case CATCH_FRAME:
1892 case RET_SMALL:
1893 bitmap = BITMAP_BITS(info->i.layout.bitmap);
1894 size = BITMAP_SIZE(info->i.layout.bitmap);
1895 // NOTE: the payload starts immediately after the info-ptr, we
1896 // don't have an StgHeader in the same sense as a heap closure.
1897 p++;
1898 p = scavenge_small_bitmap(p, size, bitmap);
1899
1900 follow_srt:
1901 if (major_gc)
1902 scavenge_srt((StgClosure **)GET_SRT(info), info->i.srt_bitmap);
1903 continue;
1904
1905 case RET_BCO: {
1906 StgBCO *bco;
1907 StgWord size;
1908
1909 p++;
1910 evacuate((StgClosure **)p);
1911 bco = (StgBCO *)*p;
1912 p++;
1913 size = BCO_BITMAP_SIZE(bco);
1914 scavenge_large_bitmap(p, BCO_BITMAP(bco), size);
1915 p += size;
1916 continue;
1917 }
1918
1919 // large bitmap (> 32 entries, or > 64 on a 64-bit machine)
1920 case RET_BIG:
1921 {
1922 StgWord size;
1923
1924 size = GET_LARGE_BITMAP(&info->i)->size;
1925 p++;
1926 scavenge_large_bitmap(p, GET_LARGE_BITMAP(&info->i), size);
1927 p += size;
1928 // and don't forget to follow the SRT
1929 goto follow_srt;
1930 }
1931
1932 case RET_FUN:
1933 {
1934 StgRetFun *ret_fun = (StgRetFun *)p;
1935 const StgFunInfoTable *fun_info;
1936
1937 evacuate(&ret_fun->fun);
1938 fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun));
1939 p = scavenge_arg_block(fun_info, ret_fun->payload);
1940 goto follow_srt;
1941 }
1942
1943 default:
1944 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->i.type));
1945 }
1946 }
1947 }
1948
1949 /*-----------------------------------------------------------------------------
1950 scavenge the large object list.
1951
1952 evac_gen set by caller; similar games played with evac_gen as with
1953 scavenge() - see comment at the top of scavenge(). Most large
1954 objects are (repeatedly) mutable, so most of the time evac_gen will
1955 be zero.
1956 --------------------------------------------------------------------------- */
1957
1958 static void
1959 scavenge_large (gen_workspace *ws)
1960 {
1961 bdescr *bd;
1962 StgPtr p;
1963
1964 gct->evac_gen_no = ws->gen->no;
1965
1966 bd = ws->todo_large_objects;
1967
1968 for (; bd != NULL; bd = ws->todo_large_objects) {
1969
1970 // take this object *off* the large objects list and put it on
1971 // the scavenged large objects list. This is so that we can
1972 // treat todo_large_objects as a stack and push new objects on
1973 // the front when evacuating.
1974 ws->todo_large_objects = bd->link;
1975
1976 ACQUIRE_SPIN_LOCK(&ws->gen->sync);
1977 dbl_link_onto(bd, &ws->gen->scavenged_large_objects);
1978 ws->gen->n_scavenged_large_blocks += bd->blocks;
1979 RELEASE_SPIN_LOCK(&ws->gen->sync);
1980
1981 p = bd->start;
1982 if (scavenge_one(p)) {
1983 if (ws->gen->no > 0) {
1984 recordMutableGen_GC((StgClosure *)p, ws->gen->no);
1985 }
1986 }
1987
1988 // stats
1989 gct->scanned += closure_sizeW((StgClosure*)p);
1990 }
1991 }
1992
1993 /* ----------------------------------------------------------------------------
1994 Look for work to do.
1995
1996 We look for the oldest gen that has either a todo block that can
1997 be scanned, or a block of work on the global queue that we can
1998 scan.
1999
2000 It is important to take work from the *oldest* generation that we
2001 has work available, because that minimizes the likelihood of
2002 evacuating objects into a young generation when they should have
2003 been eagerly promoted. This really does make a difference (the
2004 cacheprof benchmark is one that is affected).
2005
2006 We also want to scan the todo block if possible before grabbing
2007 work from the global queue, the reason being that we don't want to
2008 steal work from the global queue and starve other threads if there
2009 is other work we can usefully be doing.
2010 ------------------------------------------------------------------------- */
2011
2012 static rtsBool
2013 scavenge_find_work (void)
2014 {
2015 int g;
2016 gen_workspace *ws;
2017 rtsBool did_something, did_anything;
2018 bdescr *bd;
2019
2020 gct->scav_find_work++;
2021
2022 did_anything = rtsFalse;
2023
2024 loop:
2025 did_something = rtsFalse;
2026 for (g = RtsFlags.GcFlags.generations-1; g >= 0; g--) {
2027 ws = &gct->gens[g];
2028
2029 gct->scan_bd = NULL;
2030
2031 // If we have a scan block with some work to do,
2032 // scavenge everything up to the free pointer.
2033 if (ws->todo_bd->u.scan < ws->todo_free)
2034 {
2035 scavenge_block(ws->todo_bd);
2036 did_something = rtsTrue;
2037 break;
2038 }
2039
2040 // If we have any large objects to scavenge, do them now.
2041 if (ws->todo_large_objects) {
2042 scavenge_large(ws);
2043 did_something = rtsTrue;
2044 break;
2045 }
2046
2047 if ((bd = grab_local_todo_block(ws)) != NULL) {
2048 scavenge_block(bd);
2049 did_something = rtsTrue;
2050 break;
2051 }
2052 }
2053
2054 if (did_something) {
2055 did_anything = rtsTrue;
2056 goto loop;
2057 }
2058
2059 #if defined(THREADED_RTS)
2060 if (work_stealing) {
2061 // look for work to steal
2062 for (g = RtsFlags.GcFlags.generations-1; g >= 0; g--) {
2063 if ((bd = steal_todo_block(g)) != NULL) {
2064 scavenge_block(bd);
2065 did_something = rtsTrue;
2066 break;
2067 }
2068 }
2069
2070 if (did_something) {
2071 did_anything = rtsTrue;
2072 goto loop;
2073 }
2074 }
2075 #endif
2076
2077 // only return when there is no more work to do
2078
2079 return did_anything;
2080 }
2081
2082 /* ----------------------------------------------------------------------------
2083 Scavenge until we can't find anything more to scavenge.
2084 ------------------------------------------------------------------------- */
2085
2086 void
2087 scavenge_loop(void)
2088 {
2089 rtsBool work_to_do;
2090
2091 loop:
2092 work_to_do = rtsFalse;
2093
2094 // scavenge static objects
2095 if (major_gc && gct->static_objects != END_OF_STATIC_OBJECT_LIST) {
2096 IF_DEBUG(sanity, checkStaticObjects(gct->static_objects));
2097 scavenge_static();
2098 }
2099
2100 // scavenge objects in compacted generation
2101 if (mark_stack_bd != NULL && !mark_stack_empty()) {
2102 scavenge_mark_stack();
2103 work_to_do = rtsTrue;
2104 }
2105
2106 // Order is important here: we want to deal in full blocks as
2107 // much as possible, so go for global work in preference to
2108 // local work. Only if all the global work has been exhausted
2109 // do we start scavenging the fragments of blocks in the local
2110 // workspaces.
2111 if (scavenge_find_work()) goto loop;
2112
2113 if (work_to_do) goto loop;
2114 }