Add /* fallthrough */ to fix -Wimplicit-fallthrough warning
[ghc.git] / rts / sm / Scav.c
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team 1998-2008
4 *
5 * Generational garbage collector: scavenging functions
6 *
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
9 *
10 * http://ghc.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/GC
11 *
12 * ---------------------------------------------------------------------------*/
13
14 #include "PosixSource.h"
15 #include "Rts.h"
16
17 #include "Storage.h"
18 #include "GC.h"
19 #include "GCThread.h"
20 #include "GCUtils.h"
21 #include "Compact.h"
22 #include "MarkStack.h"
23 #include "Evac.h"
24 #include "Scav.h"
25 #include "Apply.h"
26 #include "Trace.h"
27 #include "Sanity.h"
28 #include "Capability.h"
29 #include "LdvProfile.h"
30 #include "Hash.h"
31
32 #include "sm/MarkWeak.h"
33
34 static void scavenge_stack (StgPtr p, StgPtr stack_end);
35
36 static void scavenge_large_bitmap (StgPtr p,
37 StgLargeBitmap *large_bitmap,
38 StgWord size );
39
40 #if defined(THREADED_RTS) && !defined(PARALLEL_GC)
41 # define evacuate(a) evacuate1(a)
42 # define evacuate_BLACKHOLE(a) evacuate_BLACKHOLE1(a)
43 # define scavenge_loop(a) scavenge_loop1(a)
44 # define scavenge_block(a) scavenge_block1(a)
45 # define scavenge_mutable_list(bd,g) scavenge_mutable_list1(bd,g)
46 # define scavenge_capability_mut_lists(cap) scavenge_capability_mut_Lists1(cap)
47 #endif
48
49 /* -----------------------------------------------------------------------------
50 Scavenge a TSO.
51 -------------------------------------------------------------------------- */
52
53 static void
54 scavengeTSO (StgTSO *tso)
55 {
56 bool saved_eager;
57
58 debugTrace(DEBUG_gc,"scavenging thread %d",(int)tso->id);
59
60 // update the pointer from the InCall.
61 if (tso->bound != NULL) {
62 // NB. We can't just set tso->bound->tso = tso, because this
63 // might be an invalid copy the TSO resulting from multiple
64 // threads evacuating the TSO simultaneously (see
65 // Evac.c:copy_tag()). Calling evacuate() on this pointer
66 // will ensure that we update it to point to the correct copy.
67 evacuate((StgClosure **)&tso->bound->tso);
68 }
69
70 saved_eager = gct->eager_promotion;
71 gct->eager_promotion = false;
72
73 evacuate((StgClosure **)&tso->blocked_exceptions);
74 evacuate((StgClosure **)&tso->bq);
75
76 // scavange current transaction record
77 evacuate((StgClosure **)&tso->trec);
78
79 evacuate((StgClosure **)&tso->stackobj);
80
81 evacuate((StgClosure **)&tso->_link);
82 if ( tso->why_blocked == BlockedOnMVar
83 || tso->why_blocked == BlockedOnMVarRead
84 || tso->why_blocked == BlockedOnBlackHole
85 || tso->why_blocked == BlockedOnMsgThrowTo
86 || tso->why_blocked == NotBlocked
87 ) {
88 evacuate(&tso->block_info.closure);
89 }
90 #if defined(THREADED_RTS)
91 // in the THREADED_RTS, block_info.closure must always point to a
92 // valid closure, because we assume this in throwTo(). In the
93 // non-threaded RTS it might be a FD (for
94 // BlockedOnRead/BlockedOnWrite) or a time value (BlockedOnDelay)
95 else {
96 tso->block_info.closure = (StgClosure *)END_TSO_QUEUE;
97 }
98 #endif
99
100 tso->dirty = gct->failed_to_evac;
101
102 gct->eager_promotion = saved_eager;
103 }
104
105 /* ----------------------------------------------------------------------------
106 Scavenging compact objects
107 ------------------------------------------------------------------------- */
108
109 typedef struct {
110 // We must save gct when calling mapHashTable(), which is compiled
111 // without GCThread.h and so uses a different calling convention.
112 // See also GC.c:mark_root where we do a similar thing.
113 gc_thread *saved_gct;
114 HashTable *newHash;
115 } MapHashData;
116
117 static void
118 evacuate_hash_entry(MapHashData *dat, StgWord key, const void *value)
119 {
120 StgClosure *p = (StgClosure*)key;
121 #if defined(THREADED_RTS)
122 gc_thread *old_gct = gct;
123 #endif
124
125 SET_GCT(dat->saved_gct);
126 evacuate(&p);
127 insertHashTable(dat->newHash, (StgWord)p, value);
128 SET_GCT(old_gct);
129 }
130
131 static void
132 scavenge_compact(StgCompactNFData *str)
133 {
134 bool saved_eager;
135 saved_eager = gct->eager_promotion;
136 gct->eager_promotion = false;
137
138 if (str->hash) {
139 MapHashData dat;
140 dat.saved_gct = gct;
141 HashTable *newHash = allocHashTable();
142 dat.newHash = newHash;
143 mapHashTable(str->hash, (void*)&dat, (MapHashFn)evacuate_hash_entry);
144 freeHashTable(str->hash, NULL);
145 str->hash = newHash;
146 }
147
148 debugTrace(DEBUG_compact,
149 "compact alive @%p, gen %d, %" FMT_Word " bytes",
150 str, Bdescr((P_)str)->gen_no, str->totalW * sizeof(W_))
151
152 gct->eager_promotion = saved_eager;
153 if (gct->failed_to_evac) {
154 ((StgClosure *)str)->header.info = &stg_COMPACT_NFDATA_DIRTY_info;
155 } else {
156 ((StgClosure *)str)->header.info = &stg_COMPACT_NFDATA_CLEAN_info;
157 }
158 }
159
160 /* -----------------------------------------------------------------------------
161 Mutable arrays of pointers
162 -------------------------------------------------------------------------- */
163
164 static StgPtr scavenge_mut_arr_ptrs (StgMutArrPtrs *a)
165 {
166 W_ m;
167 bool any_failed;
168 StgPtr p, q;
169
170 any_failed = false;
171 p = (StgPtr)&a->payload[0];
172 for (m = 0; (int)m < (int)mutArrPtrsCards(a->ptrs) - 1; m++)
173 {
174 q = p + (1 << MUT_ARR_PTRS_CARD_BITS);
175 for (; p < q; p++) {
176 evacuate((StgClosure**)p);
177 }
178 if (gct->failed_to_evac) {
179 any_failed = true;
180 *mutArrPtrsCard(a,m) = 1;
181 gct->failed_to_evac = false;
182 } else {
183 *mutArrPtrsCard(a,m) = 0;
184 }
185 }
186
187 q = (StgPtr)&a->payload[a->ptrs];
188 if (p < q) {
189 for (; p < q; p++) {
190 evacuate((StgClosure**)p);
191 }
192 if (gct->failed_to_evac) {
193 any_failed = true;
194 *mutArrPtrsCard(a,m) = 1;
195 gct->failed_to_evac = false;
196 } else {
197 *mutArrPtrsCard(a,m) = 0;
198 }
199 }
200
201 gct->failed_to_evac = any_failed;
202 return (StgPtr)a + mut_arr_ptrs_sizeW(a);
203 }
204
205 // scavenge only the marked areas of a MUT_ARR_PTRS
206 static StgPtr scavenge_mut_arr_ptrs_marked (StgMutArrPtrs *a)
207 {
208 W_ m;
209 StgPtr p, q;
210 bool any_failed;
211
212 any_failed = false;
213 for (m = 0; m < mutArrPtrsCards(a->ptrs); m++)
214 {
215 if (*mutArrPtrsCard(a,m) != 0) {
216 p = (StgPtr)&a->payload[m << MUT_ARR_PTRS_CARD_BITS];
217 q = stg_min(p + (1 << MUT_ARR_PTRS_CARD_BITS),
218 (StgPtr)&a->payload[a->ptrs]);
219 for (; p < q; p++) {
220 evacuate((StgClosure**)p);
221 }
222 if (gct->failed_to_evac) {
223 any_failed = true;
224 gct->failed_to_evac = false;
225 } else {
226 *mutArrPtrsCard(a,m) = 0;
227 }
228 }
229 }
230
231 gct->failed_to_evac = any_failed;
232 return (StgPtr)a + mut_arr_ptrs_sizeW(a);
233 }
234
235 STATIC_INLINE StgPtr
236 scavenge_small_bitmap (StgPtr p, StgWord size, StgWord bitmap)
237 {
238 while (size > 0) {
239 if ((bitmap & 1) == 0) {
240 evacuate((StgClosure **)p);
241 }
242 p++;
243 bitmap = bitmap >> 1;
244 size--;
245 }
246 return p;
247 }
248
249 /* -----------------------------------------------------------------------------
250 Blocks of function args occur on the stack (at the top) and
251 in PAPs.
252 -------------------------------------------------------------------------- */
253
254 STATIC_INLINE StgPtr
255 scavenge_arg_block (const StgFunInfoTable *fun_info, StgClosure **args)
256 {
257 StgPtr p;
258 StgWord bitmap;
259 StgWord size;
260
261 p = (StgPtr)args;
262 switch (fun_info->f.fun_type) {
263 case ARG_GEN:
264 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
265 size = BITMAP_SIZE(fun_info->f.b.bitmap);
266 goto small_bitmap;
267 case ARG_GEN_BIG:
268 size = GET_FUN_LARGE_BITMAP(fun_info)->size;
269 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
270 p += size;
271 break;
272 default:
273 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
274 size = BITMAP_SIZE(stg_arg_bitmaps[fun_info->f.fun_type]);
275 small_bitmap:
276 p = scavenge_small_bitmap(p, size, bitmap);
277 break;
278 }
279 return p;
280 }
281
282 STATIC_INLINE GNUC_ATTR_HOT StgPtr
283 scavenge_PAP_payload (StgClosure *fun, StgClosure **payload, StgWord size)
284 {
285 StgPtr p;
286 StgWord bitmap;
287 const StgFunInfoTable *fun_info;
288
289 fun_info = get_fun_itbl(UNTAG_CONST_CLOSURE(fun));
290 ASSERT(fun_info->i.type != PAP);
291 p = (StgPtr)payload;
292
293 switch (fun_info->f.fun_type) {
294 case ARG_GEN:
295 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
296 goto small_bitmap;
297 case ARG_GEN_BIG:
298 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
299 p += size;
300 break;
301 case ARG_BCO:
302 scavenge_large_bitmap((StgPtr)payload, BCO_BITMAP(fun), size);
303 p += size;
304 break;
305 default:
306 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
307 small_bitmap:
308 p = scavenge_small_bitmap(p, size, bitmap);
309 break;
310 }
311 return p;
312 }
313
314 STATIC_INLINE GNUC_ATTR_HOT StgPtr
315 scavenge_PAP (StgPAP *pap)
316 {
317 evacuate(&pap->fun);
318 return scavenge_PAP_payload (pap->fun, pap->payload, pap->n_args);
319 }
320
321 STATIC_INLINE StgPtr
322 scavenge_AP (StgAP *ap)
323 {
324 evacuate(&ap->fun);
325 return scavenge_PAP_payload (ap->fun, ap->payload, ap->n_args);
326 }
327
328 /* -----------------------------------------------------------------------------
329 Scavenge SRTs
330 -------------------------------------------------------------------------- */
331
332 STATIC_INLINE GNUC_ATTR_HOT void
333 scavenge_thunk_srt(const StgInfoTable *info)
334 {
335 StgThunkInfoTable *thunk_info;
336
337 if (!major_gc) return;
338
339 thunk_info = itbl_to_thunk_itbl(info);
340 if (thunk_info->i.srt) {
341 StgClosure *srt = (StgClosure*)GET_SRT(thunk_info);
342 evacuate(&srt);
343 }
344 }
345
346 STATIC_INLINE GNUC_ATTR_HOT void
347 scavenge_fun_srt(const StgInfoTable *info)
348 {
349 StgFunInfoTable *fun_info;
350
351 if (!major_gc) return;
352
353 fun_info = itbl_to_fun_itbl(info);
354 if (fun_info->i.srt) {
355 StgClosure *srt = (StgClosure*)GET_FUN_SRT(fun_info);
356 evacuate(&srt);
357 }
358 }
359
360 /* -----------------------------------------------------------------------------
361 Scavenge a block from the given scan pointer up to bd->free.
362
363 evac_gen_no is set by the caller to be either zero (for a step in a
364 generation < N) or G where G is the generation of the step being
365 scavenged.
366
367 We sometimes temporarily change evac_gen_no back to zero if we're
368 scavenging a mutable object where eager promotion isn't such a good
369 idea.
370 -------------------------------------------------------------------------- */
371
372 static GNUC_ATTR_HOT void
373 scavenge_block (bdescr *bd)
374 {
375 StgPtr p, q;
376 const StgInfoTable *info;
377 bool saved_eager_promotion;
378 gen_workspace *ws;
379
380 debugTrace(DEBUG_gc, "scavenging block %p (gen %d) @ %p",
381 bd->start, bd->gen_no, bd->u.scan);
382
383 gct->scan_bd = bd;
384 gct->evac_gen_no = bd->gen_no;
385 saved_eager_promotion = gct->eager_promotion;
386 gct->failed_to_evac = false;
387
388 ws = &gct->gens[bd->gen->no];
389
390 p = bd->u.scan;
391
392 // we might be evacuating into the very object that we're
393 // scavenging, so we have to check the real bd->free pointer each
394 // time around the loop.
395 while (p < bd->free || (bd == ws->todo_bd && p < ws->todo_free)) {
396
397 ASSERT(bd->link == NULL);
398 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
399 info = get_itbl((StgClosure *)p);
400
401 ASSERT(gct->thunk_selector_depth == 0);
402
403 q = p;
404 switch (info->type) {
405
406 case MVAR_CLEAN:
407 case MVAR_DIRTY:
408 {
409 StgMVar *mvar = ((StgMVar *)p);
410 gct->eager_promotion = false;
411 evacuate((StgClosure **)&mvar->head);
412 evacuate((StgClosure **)&mvar->tail);
413 evacuate((StgClosure **)&mvar->value);
414 gct->eager_promotion = saved_eager_promotion;
415
416 if (gct->failed_to_evac) {
417 mvar->header.info = &stg_MVAR_DIRTY_info;
418 } else {
419 mvar->header.info = &stg_MVAR_CLEAN_info;
420 }
421 p += sizeofW(StgMVar);
422 break;
423 }
424
425 case TVAR:
426 {
427 StgTVar *tvar = ((StgTVar *)p);
428 gct->eager_promotion = false;
429 evacuate((StgClosure **)&tvar->current_value);
430 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
431 gct->eager_promotion = saved_eager_promotion;
432
433 if (gct->failed_to_evac) {
434 tvar->header.info = &stg_TVAR_DIRTY_info;
435 } else {
436 tvar->header.info = &stg_TVAR_CLEAN_info;
437 }
438 p += sizeofW(StgTVar);
439 break;
440 }
441
442 case FUN_2_0:
443 scavenge_fun_srt(info);
444 evacuate(&((StgClosure *)p)->payload[1]);
445 evacuate(&((StgClosure *)p)->payload[0]);
446 p += sizeofW(StgHeader) + 2;
447 break;
448
449 case THUNK_2_0:
450 scavenge_thunk_srt(info);
451 evacuate(&((StgThunk *)p)->payload[1]);
452 evacuate(&((StgThunk *)p)->payload[0]);
453 p += sizeofW(StgThunk) + 2;
454 break;
455
456 case CONSTR_2_0:
457 evacuate(&((StgClosure *)p)->payload[1]);
458 evacuate(&((StgClosure *)p)->payload[0]);
459 p += sizeofW(StgHeader) + 2;
460 break;
461
462 case THUNK_1_0:
463 scavenge_thunk_srt(info);
464 evacuate(&((StgThunk *)p)->payload[0]);
465 p += sizeofW(StgThunk) + 1;
466 break;
467
468 case FUN_1_0:
469 scavenge_fun_srt(info);
470 /* fallthrough */
471 case CONSTR_1_0:
472 evacuate(&((StgClosure *)p)->payload[0]);
473 p += sizeofW(StgHeader) + 1;
474 break;
475
476 case THUNK_0_1:
477 scavenge_thunk_srt(info);
478 p += sizeofW(StgThunk) + 1;
479 break;
480
481 case FUN_0_1:
482 scavenge_fun_srt(info);
483 /* fallthrough */
484 case CONSTR_0_1:
485 p += sizeofW(StgHeader) + 1;
486 break;
487
488 case THUNK_0_2:
489 scavenge_thunk_srt(info);
490 p += sizeofW(StgThunk) + 2;
491 break;
492
493 case FUN_0_2:
494 scavenge_fun_srt(info);
495 /* fallthrough */
496 case CONSTR_0_2:
497 p += sizeofW(StgHeader) + 2;
498 break;
499
500 case THUNK_1_1:
501 scavenge_thunk_srt(info);
502 evacuate(&((StgThunk *)p)->payload[0]);
503 p += sizeofW(StgThunk) + 2;
504 break;
505
506 case FUN_1_1:
507 scavenge_fun_srt(info);
508 /* fallthrough */
509 case CONSTR_1_1:
510 evacuate(&((StgClosure *)p)->payload[0]);
511 p += sizeofW(StgHeader) + 2;
512 break;
513
514 case FUN:
515 scavenge_fun_srt(info);
516 goto gen_obj;
517
518 case THUNK:
519 {
520 StgPtr end;
521
522 scavenge_thunk_srt(info);
523 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
524 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
525 evacuate((StgClosure **)p);
526 }
527 p += info->layout.payload.nptrs;
528 break;
529 }
530
531 gen_obj:
532 case CONSTR:
533 case CONSTR_NOCAF:
534 case WEAK:
535 case PRIM:
536 {
537 StgPtr end;
538
539 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
540 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
541 evacuate((StgClosure **)p);
542 }
543 p += info->layout.payload.nptrs;
544 break;
545 }
546
547 case BCO: {
548 StgBCO *bco = (StgBCO *)p;
549 evacuate((StgClosure **)&bco->instrs);
550 evacuate((StgClosure **)&bco->literals);
551 evacuate((StgClosure **)&bco->ptrs);
552 p += bco_sizeW(bco);
553 break;
554 }
555
556 case BLACKHOLE:
557 evacuate(&((StgInd *)p)->indirectee);
558 p += sizeofW(StgInd);
559 break;
560
561 case MUT_VAR_CLEAN:
562 case MUT_VAR_DIRTY:
563 gct->eager_promotion = false;
564 evacuate(&((StgMutVar *)p)->var);
565 gct->eager_promotion = saved_eager_promotion;
566
567 if (gct->failed_to_evac) {
568 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
569 } else {
570 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
571 }
572 p += sizeofW(StgMutVar);
573 break;
574
575 case BLOCKING_QUEUE:
576 {
577 StgBlockingQueue *bq = (StgBlockingQueue *)p;
578
579 gct->eager_promotion = false;
580 evacuate(&bq->bh);
581 evacuate((StgClosure**)&bq->owner);
582 evacuate((StgClosure**)&bq->queue);
583 evacuate((StgClosure**)&bq->link);
584 gct->eager_promotion = saved_eager_promotion;
585
586 if (gct->failed_to_evac) {
587 bq->header.info = &stg_BLOCKING_QUEUE_DIRTY_info;
588 } else {
589 bq->header.info = &stg_BLOCKING_QUEUE_CLEAN_info;
590 }
591 p += sizeofW(StgBlockingQueue);
592 break;
593 }
594
595 case THUNK_SELECTOR:
596 {
597 StgSelector *s = (StgSelector *)p;
598 evacuate(&s->selectee);
599 p += THUNK_SELECTOR_sizeW();
600 break;
601 }
602
603 // A chunk of stack saved in a heap object
604 case AP_STACK:
605 {
606 StgAP_STACK *ap = (StgAP_STACK *)p;
607
608 evacuate(&ap->fun);
609 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
610 p = (StgPtr)ap->payload + ap->size;
611 break;
612 }
613
614 case PAP:
615 p = scavenge_PAP((StgPAP *)p);
616 break;
617
618 case AP:
619 p = scavenge_AP((StgAP *)p);
620 break;
621
622 case ARR_WORDS:
623 // nothing to follow
624 p += arr_words_sizeW((StgArrBytes *)p);
625 break;
626
627 case MUT_ARR_PTRS_CLEAN:
628 case MUT_ARR_PTRS_DIRTY:
629 {
630 // We don't eagerly promote objects pointed to by a mutable
631 // array, but if we find the array only points to objects in
632 // the same or an older generation, we mark it "clean" and
633 // avoid traversing it during minor GCs.
634 gct->eager_promotion = false;
635
636 p = scavenge_mut_arr_ptrs((StgMutArrPtrs*)p);
637
638 if (gct->failed_to_evac) {
639 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
640 } else {
641 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
642 }
643
644 gct->eager_promotion = saved_eager_promotion;
645 gct->failed_to_evac = true; // always put it on the mutable list.
646 break;
647 }
648
649 case MUT_ARR_PTRS_FROZEN:
650 case MUT_ARR_PTRS_FROZEN0:
651 // follow everything
652 {
653 p = scavenge_mut_arr_ptrs((StgMutArrPtrs*)p);
654
655 // If we're going to put this object on the mutable list, then
656 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
657 if (gct->failed_to_evac) {
658 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
659 } else {
660 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
661 }
662 break;
663 }
664
665 case SMALL_MUT_ARR_PTRS_CLEAN:
666 case SMALL_MUT_ARR_PTRS_DIRTY:
667 // follow everything
668 {
669 StgPtr next;
670
671 // We don't eagerly promote objects pointed to by a mutable
672 // array, but if we find the array only points to objects in
673 // the same or an older generation, we mark it "clean" and
674 // avoid traversing it during minor GCs.
675 gct->eager_promotion = false;
676 next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
677 for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
678 evacuate((StgClosure **)p);
679 }
680 gct->eager_promotion = saved_eager_promotion;
681
682 if (gct->failed_to_evac) {
683 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_DIRTY_info;
684 } else {
685 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_CLEAN_info;
686 }
687
688 gct->failed_to_evac = true; // always put it on the mutable list.
689 break;
690 }
691
692 case SMALL_MUT_ARR_PTRS_FROZEN:
693 case SMALL_MUT_ARR_PTRS_FROZEN0:
694 // follow everything
695 {
696 StgPtr next;
697
698 next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
699 for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
700 evacuate((StgClosure **)p);
701 }
702
703 // If we're going to put this object on the mutable list, then
704 // set its info ptr to SMALL_MUT_ARR_PTRS_FROZEN0 to indicate that.
705 if (gct->failed_to_evac) {
706 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN0_info;
707 } else {
708 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN_info;
709 }
710 break;
711 }
712
713 case TSO:
714 {
715 scavengeTSO((StgTSO *)p);
716 p += sizeofW(StgTSO);
717 break;
718 }
719
720 case STACK:
721 {
722 StgStack *stack = (StgStack*)p;
723
724 gct->eager_promotion = false;
725
726 scavenge_stack(stack->sp, stack->stack + stack->stack_size);
727 stack->dirty = gct->failed_to_evac;
728 p += stack_sizeW(stack);
729
730 gct->eager_promotion = saved_eager_promotion;
731 break;
732 }
733
734 case MUT_PRIM:
735 {
736 StgPtr end;
737
738 gct->eager_promotion = false;
739
740 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
741 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
742 evacuate((StgClosure **)p);
743 }
744 p += info->layout.payload.nptrs;
745
746 gct->eager_promotion = saved_eager_promotion;
747 gct->failed_to_evac = true; // mutable
748 break;
749 }
750
751 case TREC_CHUNK:
752 {
753 StgWord i;
754 StgTRecChunk *tc = ((StgTRecChunk *) p);
755 TRecEntry *e = &(tc -> entries[0]);
756 gct->eager_promotion = false;
757 evacuate((StgClosure **)&tc->prev_chunk);
758 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
759 evacuate((StgClosure **)&e->tvar);
760 evacuate((StgClosure **)&e->expected_value);
761 evacuate((StgClosure **)&e->new_value);
762 }
763 gct->eager_promotion = saved_eager_promotion;
764 gct->failed_to_evac = true; // mutable
765 p += sizeofW(StgTRecChunk);
766 break;
767 }
768
769 default:
770 barf("scavenge: unimplemented/strange closure type %d @ %p",
771 info->type, p);
772 }
773
774 /*
775 * We need to record the current object on the mutable list if
776 * (a) It is actually mutable, or
777 * (b) It contains pointers to a younger generation.
778 * Case (b) arises if we didn't manage to promote everything that
779 * the current object points to into the current generation.
780 */
781 if (gct->failed_to_evac) {
782 gct->failed_to_evac = false;
783 if (bd->gen_no > 0) {
784 recordMutableGen_GC((StgClosure *)q, bd->gen_no);
785 }
786 }
787 }
788
789 if (p > bd->free) {
790 gct->copied += ws->todo_free - bd->free;
791 bd->free = p;
792 }
793
794 debugTrace(DEBUG_gc, " scavenged %ld bytes",
795 (unsigned long)((bd->free - bd->u.scan) * sizeof(W_)));
796
797 // update stats: this is a block that has been scavenged
798 gct->scanned += bd->free - bd->u.scan;
799 bd->u.scan = bd->free;
800
801 if (bd != ws->todo_bd) {
802 // we're not going to evac any more objects into
803 // this block, so push it now.
804 push_scanned_block(bd, ws);
805 }
806
807 gct->scan_bd = NULL;
808 }
809 /* -----------------------------------------------------------------------------
810 Scavenge everything on the mark stack.
811
812 This is slightly different from scavenge():
813 - we don't walk linearly through the objects, so the scavenger
814 doesn't need to advance the pointer on to the next object.
815 -------------------------------------------------------------------------- */
816
817 static void
818 scavenge_mark_stack(void)
819 {
820 StgPtr p, q;
821 const StgInfoTable *info;
822 bool saved_eager_promotion;
823
824 gct->evac_gen_no = oldest_gen->no;
825 saved_eager_promotion = gct->eager_promotion;
826
827 while ((p = pop_mark_stack())) {
828
829 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
830 info = get_itbl((StgClosure *)p);
831
832 q = p;
833 switch (info->type) {
834
835 case MVAR_CLEAN:
836 case MVAR_DIRTY:
837 {
838 StgMVar *mvar = ((StgMVar *)p);
839 gct->eager_promotion = false;
840 evacuate((StgClosure **)&mvar->head);
841 evacuate((StgClosure **)&mvar->tail);
842 evacuate((StgClosure **)&mvar->value);
843 gct->eager_promotion = saved_eager_promotion;
844
845 if (gct->failed_to_evac) {
846 mvar->header.info = &stg_MVAR_DIRTY_info;
847 } else {
848 mvar->header.info = &stg_MVAR_CLEAN_info;
849 }
850 break;
851 }
852
853 case TVAR:
854 {
855 StgTVar *tvar = ((StgTVar *)p);
856 gct->eager_promotion = false;
857 evacuate((StgClosure **)&tvar->current_value);
858 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
859 gct->eager_promotion = saved_eager_promotion;
860
861 if (gct->failed_to_evac) {
862 tvar->header.info = &stg_TVAR_DIRTY_info;
863 } else {
864 tvar->header.info = &stg_TVAR_CLEAN_info;
865 }
866 break;
867 }
868
869 case FUN_2_0:
870 scavenge_fun_srt(info);
871 evacuate(&((StgClosure *)p)->payload[1]);
872 evacuate(&((StgClosure *)p)->payload[0]);
873 break;
874
875 case THUNK_2_0:
876 scavenge_thunk_srt(info);
877 evacuate(&((StgThunk *)p)->payload[1]);
878 evacuate(&((StgThunk *)p)->payload[0]);
879 break;
880
881 case CONSTR_2_0:
882 evacuate(&((StgClosure *)p)->payload[1]);
883 evacuate(&((StgClosure *)p)->payload[0]);
884 break;
885
886 case FUN_1_0:
887 case FUN_1_1:
888 scavenge_fun_srt(info);
889 evacuate(&((StgClosure *)p)->payload[0]);
890 break;
891
892 case THUNK_1_0:
893 case THUNK_1_1:
894 scavenge_thunk_srt(info);
895 evacuate(&((StgThunk *)p)->payload[0]);
896 break;
897
898 case CONSTR_1_0:
899 case CONSTR_1_1:
900 evacuate(&((StgClosure *)p)->payload[0]);
901 break;
902
903 case FUN_0_1:
904 case FUN_0_2:
905 scavenge_fun_srt(info);
906 break;
907
908 case THUNK_0_1:
909 case THUNK_0_2:
910 scavenge_thunk_srt(info);
911 break;
912
913 case CONSTR_0_1:
914 case CONSTR_0_2:
915 break;
916
917 case FUN:
918 scavenge_fun_srt(info);
919 goto gen_obj;
920
921 case THUNK:
922 {
923 StgPtr end;
924
925 scavenge_thunk_srt(info);
926 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
927 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
928 evacuate((StgClosure **)p);
929 }
930 break;
931 }
932
933 gen_obj:
934 case CONSTR:
935 case CONSTR_NOCAF:
936 case WEAK:
937 case PRIM:
938 {
939 StgPtr end;
940
941 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
942 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
943 evacuate((StgClosure **)p);
944 }
945 break;
946 }
947
948 case BCO: {
949 StgBCO *bco = (StgBCO *)p;
950 evacuate((StgClosure **)&bco->instrs);
951 evacuate((StgClosure **)&bco->literals);
952 evacuate((StgClosure **)&bco->ptrs);
953 break;
954 }
955
956 case IND:
957 case BLACKHOLE:
958 evacuate(&((StgInd *)p)->indirectee);
959 break;
960
961 case MUT_VAR_CLEAN:
962 case MUT_VAR_DIRTY: {
963 gct->eager_promotion = false;
964 evacuate(&((StgMutVar *)p)->var);
965 gct->eager_promotion = saved_eager_promotion;
966
967 if (gct->failed_to_evac) {
968 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
969 } else {
970 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
971 }
972 break;
973 }
974
975 case BLOCKING_QUEUE:
976 {
977 StgBlockingQueue *bq = (StgBlockingQueue *)p;
978
979 gct->eager_promotion = false;
980 evacuate(&bq->bh);
981 evacuate((StgClosure**)&bq->owner);
982 evacuate((StgClosure**)&bq->queue);
983 evacuate((StgClosure**)&bq->link);
984 gct->eager_promotion = saved_eager_promotion;
985
986 if (gct->failed_to_evac) {
987 bq->header.info = &stg_BLOCKING_QUEUE_DIRTY_info;
988 } else {
989 bq->header.info = &stg_BLOCKING_QUEUE_CLEAN_info;
990 }
991 break;
992 }
993
994 case ARR_WORDS:
995 break;
996
997 case THUNK_SELECTOR:
998 {
999 StgSelector *s = (StgSelector *)p;
1000 evacuate(&s->selectee);
1001 break;
1002 }
1003
1004 // A chunk of stack saved in a heap object
1005 case AP_STACK:
1006 {
1007 StgAP_STACK *ap = (StgAP_STACK *)p;
1008
1009 evacuate(&ap->fun);
1010 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
1011 break;
1012 }
1013
1014 case PAP:
1015 scavenge_PAP((StgPAP *)p);
1016 break;
1017
1018 case AP:
1019 scavenge_AP((StgAP *)p);
1020 break;
1021
1022 case MUT_ARR_PTRS_CLEAN:
1023 case MUT_ARR_PTRS_DIRTY:
1024 // follow everything
1025 {
1026 // We don't eagerly promote objects pointed to by a mutable
1027 // array, but if we find the array only points to objects in
1028 // the same or an older generation, we mark it "clean" and
1029 // avoid traversing it during minor GCs.
1030 gct->eager_promotion = false;
1031
1032 scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
1033
1034 if (gct->failed_to_evac) {
1035 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
1036 } else {
1037 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
1038 }
1039
1040 gct->eager_promotion = saved_eager_promotion;
1041 gct->failed_to_evac = true; // mutable anyhow.
1042 break;
1043 }
1044
1045 case MUT_ARR_PTRS_FROZEN:
1046 case MUT_ARR_PTRS_FROZEN0:
1047 // follow everything
1048 {
1049 StgPtr q = p;
1050
1051 scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
1052
1053 // If we're going to put this object on the mutable list, then
1054 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
1055 if (gct->failed_to_evac) {
1056 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
1057 } else {
1058 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
1059 }
1060 break;
1061 }
1062
1063 case SMALL_MUT_ARR_PTRS_CLEAN:
1064 case SMALL_MUT_ARR_PTRS_DIRTY:
1065 // follow everything
1066 {
1067 StgPtr next;
1068 bool saved_eager;
1069
1070 // We don't eagerly promote objects pointed to by a mutable
1071 // array, but if we find the array only points to objects in
1072 // the same or an older generation, we mark it "clean" and
1073 // avoid traversing it during minor GCs.
1074 saved_eager = gct->eager_promotion;
1075 gct->eager_promotion = false;
1076 next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
1077 for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
1078 evacuate((StgClosure **)p);
1079 }
1080 gct->eager_promotion = saved_eager;
1081
1082 if (gct->failed_to_evac) {
1083 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_DIRTY_info;
1084 } else {
1085 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_CLEAN_info;
1086 }
1087
1088 gct->failed_to_evac = true; // mutable anyhow.
1089 break;
1090 }
1091
1092 case SMALL_MUT_ARR_PTRS_FROZEN:
1093 case SMALL_MUT_ARR_PTRS_FROZEN0:
1094 // follow everything
1095 {
1096 StgPtr next, q = p;
1097
1098 next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
1099 for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
1100 evacuate((StgClosure **)p);
1101 }
1102
1103 // If we're going to put this object on the mutable list, then
1104 // set its info ptr to SMALL_MUT_ARR_PTRS_FROZEN0 to indicate that.
1105 if (gct->failed_to_evac) {
1106 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN0_info;
1107 } else {
1108 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN_info;
1109 }
1110 break;
1111 }
1112
1113 case TSO:
1114 {
1115 scavengeTSO((StgTSO*)p);
1116 break;
1117 }
1118
1119 case STACK:
1120 {
1121 StgStack *stack = (StgStack*)p;
1122
1123 gct->eager_promotion = false;
1124
1125 scavenge_stack(stack->sp, stack->stack + stack->stack_size);
1126 stack->dirty = gct->failed_to_evac;
1127
1128 gct->eager_promotion = saved_eager_promotion;
1129 break;
1130 }
1131
1132 case MUT_PRIM:
1133 {
1134 StgPtr end;
1135
1136 gct->eager_promotion = false;
1137
1138 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1139 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
1140 evacuate((StgClosure **)p);
1141 }
1142
1143 gct->eager_promotion = saved_eager_promotion;
1144 gct->failed_to_evac = true; // mutable
1145 break;
1146 }
1147
1148 case TREC_CHUNK:
1149 {
1150 StgWord i;
1151 StgTRecChunk *tc = ((StgTRecChunk *) p);
1152 TRecEntry *e = &(tc -> entries[0]);
1153 gct->eager_promotion = false;
1154 evacuate((StgClosure **)&tc->prev_chunk);
1155 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
1156 evacuate((StgClosure **)&e->tvar);
1157 evacuate((StgClosure **)&e->expected_value);
1158 evacuate((StgClosure **)&e->new_value);
1159 }
1160 gct->eager_promotion = saved_eager_promotion;
1161 gct->failed_to_evac = true; // mutable
1162 break;
1163 }
1164
1165 default:
1166 barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p",
1167 info->type, p);
1168 }
1169
1170 if (gct->failed_to_evac) {
1171 gct->failed_to_evac = false;
1172 if (gct->evac_gen_no) {
1173 recordMutableGen_GC((StgClosure *)q, gct->evac_gen_no);
1174 }
1175 }
1176 } // while (p = pop_mark_stack())
1177 }
1178
1179 /* -----------------------------------------------------------------------------
1180 Scavenge one object.
1181
1182 This is used for objects that are temporarily marked as mutable
1183 because they contain old-to-new generation pointers. Only certain
1184 objects can have this property.
1185 -------------------------------------------------------------------------- */
1186
1187 static bool
1188 scavenge_one(StgPtr p)
1189 {
1190 const StgInfoTable *info;
1191 bool no_luck;
1192 bool saved_eager_promotion;
1193
1194 saved_eager_promotion = gct->eager_promotion;
1195
1196 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1197 info = get_itbl((StgClosure *)p);
1198
1199 switch (info->type) {
1200
1201 case MVAR_CLEAN:
1202 case MVAR_DIRTY:
1203 {
1204 StgMVar *mvar = ((StgMVar *)p);
1205 gct->eager_promotion = false;
1206 evacuate((StgClosure **)&mvar->head);
1207 evacuate((StgClosure **)&mvar->tail);
1208 evacuate((StgClosure **)&mvar->value);
1209 gct->eager_promotion = saved_eager_promotion;
1210
1211 if (gct->failed_to_evac) {
1212 mvar->header.info = &stg_MVAR_DIRTY_info;
1213 } else {
1214 mvar->header.info = &stg_MVAR_CLEAN_info;
1215 }
1216 break;
1217 }
1218
1219 case TVAR:
1220 {
1221 StgTVar *tvar = ((StgTVar *)p);
1222 gct->eager_promotion = false;
1223 evacuate((StgClosure **)&tvar->current_value);
1224 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
1225 gct->eager_promotion = saved_eager_promotion;
1226
1227 if (gct->failed_to_evac) {
1228 tvar->header.info = &stg_TVAR_DIRTY_info;
1229 } else {
1230 tvar->header.info = &stg_TVAR_CLEAN_info;
1231 }
1232 break;
1233 }
1234
1235 case THUNK:
1236 case THUNK_1_0:
1237 case THUNK_0_1:
1238 case THUNK_1_1:
1239 case THUNK_0_2:
1240 case THUNK_2_0:
1241 {
1242 StgPtr q, end;
1243
1244 end = (StgPtr)((StgThunk *)p)->payload + info->layout.payload.ptrs;
1245 for (q = (StgPtr)((StgThunk *)p)->payload; q < end; q++) {
1246 evacuate((StgClosure **)q);
1247 }
1248 break;
1249 }
1250
1251 case FUN:
1252 case FUN_1_0: // hardly worth specialising these guys
1253 case FUN_0_1:
1254 case FUN_1_1:
1255 case FUN_0_2:
1256 case FUN_2_0:
1257 case CONSTR:
1258 case CONSTR_NOCAF:
1259 case CONSTR_1_0:
1260 case CONSTR_0_1:
1261 case CONSTR_1_1:
1262 case CONSTR_0_2:
1263 case CONSTR_2_0:
1264 case PRIM:
1265 {
1266 StgPtr q, end;
1267
1268 end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1269 for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) {
1270 evacuate((StgClosure **)q);
1271 }
1272 break;
1273 }
1274
1275 case WEAK:
1276 // This WEAK object will not be considered by tidyWeakList during this
1277 // collection because it is in a generation > N, but it is on the
1278 // mutable list so we must evacuate all of its pointers because some
1279 // of them may point into a younger generation.
1280 scavengeLiveWeak((StgWeak *)p);
1281 break;
1282
1283 case MUT_VAR_CLEAN:
1284 case MUT_VAR_DIRTY: {
1285 StgPtr q = p;
1286
1287 gct->eager_promotion = false;
1288 evacuate(&((StgMutVar *)p)->var);
1289 gct->eager_promotion = saved_eager_promotion;
1290
1291 if (gct->failed_to_evac) {
1292 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
1293 } else {
1294 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
1295 }
1296 break;
1297 }
1298
1299 case BLOCKING_QUEUE:
1300 {
1301 StgBlockingQueue *bq = (StgBlockingQueue *)p;
1302
1303 gct->eager_promotion = false;
1304 evacuate(&bq->bh);
1305 evacuate((StgClosure**)&bq->owner);
1306 evacuate((StgClosure**)&bq->queue);
1307 evacuate((StgClosure**)&bq->link);
1308 gct->eager_promotion = saved_eager_promotion;
1309
1310 if (gct->failed_to_evac) {
1311 bq->header.info = &stg_BLOCKING_QUEUE_DIRTY_info;
1312 } else {
1313 bq->header.info = &stg_BLOCKING_QUEUE_CLEAN_info;
1314 }
1315 break;
1316 }
1317
1318 case THUNK_SELECTOR:
1319 {
1320 StgSelector *s = (StgSelector *)p;
1321 evacuate(&s->selectee);
1322 break;
1323 }
1324
1325 case AP_STACK:
1326 {
1327 StgAP_STACK *ap = (StgAP_STACK *)p;
1328
1329 evacuate(&ap->fun);
1330 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
1331 p = (StgPtr)ap->payload + ap->size;
1332 break;
1333 }
1334
1335 case PAP:
1336 p = scavenge_PAP((StgPAP *)p);
1337 break;
1338
1339 case AP:
1340 p = scavenge_AP((StgAP *)p);
1341 break;
1342
1343 case ARR_WORDS:
1344 // nothing to follow
1345 break;
1346
1347 case MUT_ARR_PTRS_CLEAN:
1348 case MUT_ARR_PTRS_DIRTY:
1349 {
1350 // We don't eagerly promote objects pointed to by a mutable
1351 // array, but if we find the array only points to objects in
1352 // the same or an older generation, we mark it "clean" and
1353 // avoid traversing it during minor GCs.
1354 gct->eager_promotion = false;
1355
1356 scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
1357
1358 if (gct->failed_to_evac) {
1359 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
1360 } else {
1361 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
1362 }
1363
1364 gct->eager_promotion = saved_eager_promotion;
1365 gct->failed_to_evac = true;
1366 break;
1367 }
1368
1369 case MUT_ARR_PTRS_FROZEN:
1370 case MUT_ARR_PTRS_FROZEN0:
1371 {
1372 // follow everything
1373 scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
1374
1375 // If we're going to put this object on the mutable list, then
1376 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
1377 if (gct->failed_to_evac) {
1378 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
1379 } else {
1380 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
1381 }
1382 break;
1383 }
1384
1385 case SMALL_MUT_ARR_PTRS_CLEAN:
1386 case SMALL_MUT_ARR_PTRS_DIRTY:
1387 {
1388 StgPtr next, q;
1389 bool saved_eager;
1390
1391 // We don't eagerly promote objects pointed to by a mutable
1392 // array, but if we find the array only points to objects in
1393 // the same or an older generation, we mark it "clean" and
1394 // avoid traversing it during minor GCs.
1395 saved_eager = gct->eager_promotion;
1396 gct->eager_promotion = false;
1397 q = p;
1398 next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
1399 for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
1400 evacuate((StgClosure **)p);
1401 }
1402 gct->eager_promotion = saved_eager;
1403
1404 if (gct->failed_to_evac) {
1405 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_DIRTY_info;
1406 } else {
1407 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_CLEAN_info;
1408 }
1409
1410 gct->failed_to_evac = true;
1411 break;
1412 }
1413
1414 case SMALL_MUT_ARR_PTRS_FROZEN:
1415 case SMALL_MUT_ARR_PTRS_FROZEN0:
1416 {
1417 // follow everything
1418 StgPtr next, q=p;
1419
1420 next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
1421 for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
1422 evacuate((StgClosure **)p);
1423 }
1424
1425 // If we're going to put this object on the mutable list, then
1426 // set its info ptr to SMALL_MUT_ARR_PTRS_FROZEN0 to indicate that.
1427 if (gct->failed_to_evac) {
1428 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN0_info;
1429 } else {
1430 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN_info;
1431 }
1432 break;
1433 }
1434
1435 case TSO:
1436 {
1437 scavengeTSO((StgTSO*)p);
1438 break;
1439 }
1440
1441 case STACK:
1442 {
1443 StgStack *stack = (StgStack*)p;
1444
1445 gct->eager_promotion = false;
1446
1447 scavenge_stack(stack->sp, stack->stack + stack->stack_size);
1448 stack->dirty = gct->failed_to_evac;
1449
1450 gct->eager_promotion = saved_eager_promotion;
1451 break;
1452 }
1453
1454 case MUT_PRIM:
1455 {
1456 StgPtr end;
1457
1458 gct->eager_promotion = false;
1459
1460 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1461 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
1462 evacuate((StgClosure **)p);
1463 }
1464
1465 gct->eager_promotion = saved_eager_promotion;
1466 gct->failed_to_evac = true; // mutable
1467 break;
1468
1469 }
1470
1471 case TREC_CHUNK:
1472 {
1473 StgWord i;
1474 StgTRecChunk *tc = ((StgTRecChunk *) p);
1475 TRecEntry *e = &(tc -> entries[0]);
1476 gct->eager_promotion = false;
1477 evacuate((StgClosure **)&tc->prev_chunk);
1478 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
1479 evacuate((StgClosure **)&e->tvar);
1480 evacuate((StgClosure **)&e->expected_value);
1481 evacuate((StgClosure **)&e->new_value);
1482 }
1483 gct->eager_promotion = saved_eager_promotion;
1484 gct->failed_to_evac = true; // mutable
1485 break;
1486 }
1487
1488 case IND:
1489 // IND can happen, for example, when the interpreter allocates
1490 // a gigantic AP closure (more than one block), which ends up
1491 // on the large-object list and then gets updated. See #3424.
1492 case BLACKHOLE:
1493 case IND_STATIC:
1494 evacuate(&((StgInd *)p)->indirectee);
1495
1496 #if 0 && defined(DEBUG)
1497 if (RtsFlags.DebugFlags.gc)
1498 /* Debugging code to print out the size of the thing we just
1499 * promoted
1500 */
1501 {
1502 StgPtr start = gen->scan;
1503 bdescr *start_bd = gen->scan_bd;
1504 StgWord size = 0;
1505 scavenge(&gen);
1506 if (start_bd != gen->scan_bd) {
1507 size += (P_)BLOCK_ROUND_UP(start) - start;
1508 start_bd = start_bd->link;
1509 while (start_bd != gen->scan_bd) {
1510 size += BLOCK_SIZE_W;
1511 start_bd = start_bd->link;
1512 }
1513 size += gen->scan -
1514 (P_)BLOCK_ROUND_DOWN(gen->scan);
1515 } else {
1516 size = gen->scan - start;
1517 }
1518 debugBelch("evac IND: %ld bytes", size * sizeof(W_));
1519 }
1520 #endif
1521 break;
1522
1523 case COMPACT_NFDATA:
1524 scavenge_compact((StgCompactNFData*)p);
1525 break;
1526
1527 default:
1528 barf("scavenge_one: strange object %d", (int)(info->type));
1529 }
1530
1531 no_luck = gct->failed_to_evac;
1532 gct->failed_to_evac = false;
1533 return (no_luck);
1534 }
1535
1536 /* -----------------------------------------------------------------------------
1537 Scavenging mutable lists.
1538
1539 We treat the mutable list of each generation > N (i.e. all the
1540 generations older than the one being collected) as roots. We also
1541 remove non-mutable objects from the mutable list at this point.
1542 -------------------------------------------------------------------------- */
1543
1544 static void
1545 scavenge_mutable_list(bdescr *bd, generation *gen)
1546 {
1547 StgPtr p, q;
1548 uint32_t gen_no;
1549
1550 gen_no = gen->no;
1551 gct->evac_gen_no = gen_no;
1552 for (; bd != NULL; bd = bd->link) {
1553 for (q = bd->start; q < bd->free; q++) {
1554 p = (StgPtr)*q;
1555 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1556
1557 #if defined(DEBUG)
1558 switch (get_itbl((StgClosure *)p)->type) {
1559 case MUT_VAR_CLEAN:
1560 // can happen due to concurrent writeMutVars
1561 case MUT_VAR_DIRTY:
1562 mutlist_MUTVARS++; break;
1563 case MUT_ARR_PTRS_CLEAN:
1564 case MUT_ARR_PTRS_DIRTY:
1565 case MUT_ARR_PTRS_FROZEN:
1566 case MUT_ARR_PTRS_FROZEN0:
1567 mutlist_MUTARRS++; break;
1568 case MVAR_CLEAN:
1569 barf("MVAR_CLEAN on mutable list");
1570 case MVAR_DIRTY:
1571 mutlist_MVARS++; break;
1572 case TVAR:
1573 mutlist_TVAR++; break;
1574 case TREC_CHUNK:
1575 mutlist_TREC_CHUNK++; break;
1576 case MUT_PRIM:
1577 if (((StgClosure*)p)->header.info == &stg_TVAR_WATCH_QUEUE_info)
1578 mutlist_TVAR_WATCH_QUEUE++;
1579 else if (((StgClosure*)p)->header.info == &stg_TREC_HEADER_info)
1580 mutlist_TREC_HEADER++;
1581 else if (((StgClosure*)p)->header.info == &stg_ATOMIC_INVARIANT_info)
1582 mutlist_ATOMIC_INVARIANT++;
1583 else if (((StgClosure*)p)->header.info == &stg_INVARIANT_CHECK_QUEUE_info)
1584 mutlist_INVARIANT_CHECK_QUEUE++;
1585 else
1586 mutlist_OTHERS++;
1587 break;
1588 default:
1589 mutlist_OTHERS++; break;
1590 }
1591 #endif
1592
1593 // Check whether this object is "clean", that is it
1594 // definitely doesn't point into a young generation.
1595 // Clean objects don't need to be scavenged. Some clean
1596 // objects (MUT_VAR_CLEAN) are not kept on the mutable
1597 // list at all; others, such as MUT_ARR_PTRS
1598 // are always on the mutable list.
1599 //
1600 switch (get_itbl((StgClosure *)p)->type) {
1601 case MUT_ARR_PTRS_CLEAN:
1602 recordMutableGen_GC((StgClosure *)p,gen_no);
1603 continue;
1604 case MUT_ARR_PTRS_DIRTY:
1605 {
1606 bool saved_eager_promotion;
1607 saved_eager_promotion = gct->eager_promotion;
1608 gct->eager_promotion = false;
1609
1610 scavenge_mut_arr_ptrs_marked((StgMutArrPtrs *)p);
1611
1612 if (gct->failed_to_evac) {
1613 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
1614 } else {
1615 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
1616 }
1617
1618 gct->eager_promotion = saved_eager_promotion;
1619 gct->failed_to_evac = false;
1620 recordMutableGen_GC((StgClosure *)p,gen_no);
1621 continue;
1622 }
1623 default:
1624 ;
1625 }
1626
1627 if (scavenge_one(p)) {
1628 // didn't manage to promote everything, so put the
1629 // object back on the list.
1630 recordMutableGen_GC((StgClosure *)p,gen_no);
1631 }
1632 }
1633 }
1634 }
1635
1636 void
1637 scavenge_capability_mut_lists (Capability *cap)
1638 {
1639 uint32_t g;
1640
1641 /* Mutable lists from each generation > N
1642 * we want to *scavenge* these roots, not evacuate them: they're not
1643 * going to move in this GC.
1644 * Also do them in reverse generation order, for the usual reason:
1645 * namely to reduce the likelihood of spurious old->new pointers.
1646 */
1647 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
1648 scavenge_mutable_list(cap->saved_mut_lists[g], &generations[g]);
1649 freeChain_sync(cap->saved_mut_lists[g]);
1650 cap->saved_mut_lists[g] = NULL;
1651 }
1652 }
1653
1654 /* -----------------------------------------------------------------------------
1655 Scavenging the static objects.
1656
1657 We treat the mutable list of each generation > N (i.e. all the
1658 generations older than the one being collected) as roots. We also
1659 remove non-mutable objects from the mutable list at this point.
1660 -------------------------------------------------------------------------- */
1661
1662 static void
1663 scavenge_static(void)
1664 {
1665 StgClosure *flagged_p, *p;
1666 const StgInfoTable *info;
1667
1668 debugTrace(DEBUG_gc, "scavenging static objects");
1669
1670 /* Always evacuate straight to the oldest generation for static
1671 * objects */
1672 gct->evac_gen_no = oldest_gen->no;
1673
1674 /* keep going until we've scavenged all the objects on the linked
1675 list... */
1676
1677 while (1) {
1678
1679 /* get the next static object from the list. Remember, there might
1680 * be more stuff on this list after each evacuation...
1681 * (static_objects is a global)
1682 */
1683 flagged_p = gct->static_objects;
1684 if (flagged_p == END_OF_STATIC_OBJECT_LIST) {
1685 break;
1686 }
1687 p = UNTAG_STATIC_LIST_PTR(flagged_p);
1688
1689 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1690 info = get_itbl(p);
1691 // make sure the info pointer is into text space
1692
1693 /* Take this object *off* the static_objects list,
1694 * and put it on the scavenged_static_objects list.
1695 */
1696 gct->static_objects = *STATIC_LINK(info,p);
1697 *STATIC_LINK(info,p) = gct->scavenged_static_objects;
1698 gct->scavenged_static_objects = flagged_p;
1699
1700 switch (info -> type) {
1701
1702 case IND_STATIC:
1703 {
1704 StgInd *ind = (StgInd *)p;
1705 evacuate(&ind->indirectee);
1706
1707 /* might fail to evacuate it, in which case we have to pop it
1708 * back on the mutable list of the oldest generation. We
1709 * leave it *on* the scavenged_static_objects list, though,
1710 * in case we visit this object again.
1711 */
1712 if (gct->failed_to_evac) {
1713 gct->failed_to_evac = false;
1714 recordMutableGen_GC((StgClosure *)p,oldest_gen->no);
1715 }
1716 break;
1717 }
1718
1719 case THUNK_STATIC:
1720 scavenge_thunk_srt(info);
1721 break;
1722
1723 case FUN_STATIC:
1724 scavenge_fun_srt(info);
1725 /* fallthrough */
1726
1727 // a FUN_STATIC can also be an SRT, so it may have pointer
1728 // fields. See Note [SRTs] in CmmBuildInfoTables, specifically
1729 // the [FUN] optimisation.
1730
1731 case CONSTR:
1732 case CONSTR_NOCAF:
1733 case CONSTR_1_0:
1734 case CONSTR_0_1:
1735 case CONSTR_2_0:
1736 case CONSTR_1_1:
1737 case CONSTR_0_2:
1738 {
1739 StgPtr q, next;
1740
1741 next = (P_)p->payload + info->layout.payload.ptrs;
1742 // evacuate the pointers
1743 for (q = (P_)p->payload; q < next; q++) {
1744 evacuate((StgClosure **)q);
1745 }
1746 break;
1747 }
1748
1749 default:
1750 barf("scavenge_static: strange closure %d", (int)(info->type));
1751 }
1752
1753 ASSERT(gct->failed_to_evac == false);
1754 }
1755 }
1756
1757 /* -----------------------------------------------------------------------------
1758 scavenge a chunk of memory described by a bitmap
1759 -------------------------------------------------------------------------- */
1760
1761 static void
1762 scavenge_large_bitmap( StgPtr p, StgLargeBitmap *large_bitmap, StgWord size )
1763 {
1764 uint32_t i, j, b;
1765 StgWord bitmap;
1766
1767 b = 0;
1768
1769 for (i = 0; i < size; b++) {
1770 bitmap = large_bitmap->bitmap[b];
1771 j = stg_min(size-i, BITS_IN(W_));
1772 i += j;
1773 for (; j > 0; j--, p++) {
1774 if ((bitmap & 1) == 0) {
1775 evacuate((StgClosure **)p);
1776 }
1777 bitmap = bitmap >> 1;
1778 }
1779 }
1780 }
1781
1782
1783 /* -----------------------------------------------------------------------------
1784 scavenge_stack walks over a section of stack and evacuates all the
1785 objects pointed to by it. We can use the same code for walking
1786 AP_STACK_UPDs, since these are just sections of copied stack.
1787 -------------------------------------------------------------------------- */
1788
1789 static void
1790 scavenge_stack(StgPtr p, StgPtr stack_end)
1791 {
1792 const StgRetInfoTable* info;
1793 StgWord bitmap;
1794 StgWord size;
1795
1796 /*
1797 * Each time around this loop, we are looking at a chunk of stack
1798 * that starts with an activation record.
1799 */
1800
1801 while (p < stack_end) {
1802 info = get_ret_itbl((StgClosure *)p);
1803
1804 switch (info->i.type) {
1805
1806 case UPDATE_FRAME:
1807 // Note [upd-black-hole]
1808 //
1809 // In SMP, we can get update frames that point to indirections
1810 // when two threads evaluate the same thunk. We do attempt to
1811 // discover this situation in threadPaused(), but it's
1812 // possible that the following sequence occurs:
1813 //
1814 // A B
1815 // enter T
1816 // enter T
1817 // blackhole T
1818 // update T
1819 // GC
1820 //
1821 // Now T is an indirection, and the update frame is already
1822 // marked on A's stack, so we won't traverse it again in
1823 // threadPaused(). We could traverse the whole stack again
1824 // before GC, but that would be too expensive.
1825 //
1826 // Scavenging this update frame as normal would be disastrous;
1827 // the indirection will be shorted out, and the updatee would
1828 // end up pointing to the value. The update code will then
1829 // overwrite the value, instead of the BLACKHOLE it is
1830 // expecting to write to.
1831 //
1832 // One way we could try to fix this is to detect when the
1833 // BLACKHOLE has been updated by another thread, and then
1834 // replace this update frame with a special frame that just
1835 // enters the value. But this introduces some other
1836 // complexities:
1837 //
1838 // - we must be careful to call checkBlockingQueues() in this
1839 // special frame, because we might otherwise miss wakeups
1840 // for threads that blocked on the original BLACKHOLE,
1841 // - we must spot this frame when we're stripping the stack in
1842 // raiseAsync() and raiseExceptionHelper(), and arrange to call
1843 // checkBlockingQueues() there too.
1844 //
1845 // This is hard to get right, indeed we previously got it
1846 // wrong (see #13751). So we now take a different approach:
1847 // always copy the BLACKHOLE, even if it is actually an
1848 // indirection. This way we keep the update frame, we're
1849 // guaranteed to still perform the update, and check for
1850 // missed wakeups even when stripping the stack in
1851 // raiseAsync() and raiseExceptionHelper(). This is also a
1852 // little more efficient, because evacuating a known BLACKHOLE
1853 // is faster than evacuating an unknown closure.
1854 //
1855 // NOTE: for the reasons above, blackholing (either lazy or
1856 // eager) is NOT optional. See also Note [avoiding
1857 // threadPaused] in Interpreter.c.
1858 //
1859 // There are a couple of alternative solutions:
1860 // - if we see an update frame that points to an indirection,
1861 // arrange to call checkBlockingQueues() on that thread
1862 // after GC.
1863 // - spot a BLOCKING_QUEUE that points to a value and
1864 // arrange to wake it up after the GC.
1865 //
1866 // These are more difficult to implement, requiring an extra
1867 // list to be maintained during GC. They also rely on more
1868 // subtle invariants than the solution implemented here.
1869 //
1870
1871 {
1872 StgUpdateFrame *frame = (StgUpdateFrame *)p;
1873
1874 evacuate_BLACKHOLE(&frame->updatee);
1875 p += sizeofW(StgUpdateFrame);
1876 continue;
1877 }
1878
1879 // small bitmap (< 32 entries, or 64 on a 64-bit machine)
1880 case CATCH_STM_FRAME:
1881 case CATCH_RETRY_FRAME:
1882 case ATOMICALLY_FRAME:
1883 case UNDERFLOW_FRAME:
1884 case STOP_FRAME:
1885 case CATCH_FRAME:
1886 case RET_SMALL:
1887 bitmap = BITMAP_BITS(info->i.layout.bitmap);
1888 size = BITMAP_SIZE(info->i.layout.bitmap);
1889 // NOTE: the payload starts immediately after the info-ptr, we
1890 // don't have an StgHeader in the same sense as a heap closure.
1891 p++;
1892 p = scavenge_small_bitmap(p, size, bitmap);
1893
1894 follow_srt:
1895 if (major_gc && info->i.srt) {
1896 StgClosure *srt = (StgClosure*)GET_SRT(info);
1897 evacuate(&srt);
1898 }
1899 continue;
1900
1901 case RET_BCO: {
1902 StgBCO *bco;
1903 StgWord size;
1904
1905 p++;
1906 evacuate((StgClosure **)p);
1907 bco = (StgBCO *)*p;
1908 p++;
1909 size = BCO_BITMAP_SIZE(bco);
1910 scavenge_large_bitmap(p, BCO_BITMAP(bco), size);
1911 p += size;
1912 continue;
1913 }
1914
1915 // large bitmap (> 32 entries, or > 64 on a 64-bit machine)
1916 case RET_BIG:
1917 {
1918 StgWord size;
1919
1920 size = GET_LARGE_BITMAP(&info->i)->size;
1921 p++;
1922 scavenge_large_bitmap(p, GET_LARGE_BITMAP(&info->i), size);
1923 p += size;
1924 // and don't forget to follow the SRT
1925 goto follow_srt;
1926 }
1927
1928 case RET_FUN:
1929 {
1930 StgRetFun *ret_fun = (StgRetFun *)p;
1931 const StgFunInfoTable *fun_info;
1932
1933 evacuate(&ret_fun->fun);
1934 fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun));
1935 p = scavenge_arg_block(fun_info, ret_fun->payload);
1936 goto follow_srt;
1937 }
1938
1939 default:
1940 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->i.type));
1941 }
1942 }
1943 }
1944
1945 /*-----------------------------------------------------------------------------
1946 scavenge the large object list.
1947
1948 evac_gen set by caller; similar games played with evac_gen as with
1949 scavenge() - see comment at the top of scavenge(). Most large
1950 objects are (repeatedly) mutable, so most of the time evac_gen will
1951 be zero.
1952 --------------------------------------------------------------------------- */
1953
1954 static void
1955 scavenge_large (gen_workspace *ws)
1956 {
1957 bdescr *bd;
1958 StgPtr p;
1959
1960 gct->evac_gen_no = ws->gen->no;
1961
1962 bd = ws->todo_large_objects;
1963
1964 for (; bd != NULL; bd = ws->todo_large_objects) {
1965
1966 // take this object *off* the large objects list and put it on
1967 // the scavenged large objects list. This is so that we can
1968 // treat todo_large_objects as a stack and push new objects on
1969 // the front when evacuating.
1970 ws->todo_large_objects = bd->link;
1971
1972 ACQUIRE_SPIN_LOCK(&ws->gen->sync);
1973 if (bd->flags & BF_COMPACT) {
1974 dbl_link_onto(bd, &ws->gen->live_compact_objects);
1975 StgCompactNFData *str = ((StgCompactNFDataBlock*)bd->start)->owner;
1976 ws->gen->n_live_compact_blocks += str->totalW / BLOCK_SIZE_W;
1977 p = (StgPtr)str;
1978 } else {
1979 dbl_link_onto(bd, &ws->gen->scavenged_large_objects);
1980 ws->gen->n_scavenged_large_blocks += bd->blocks;
1981 p = bd->start;
1982 }
1983 RELEASE_SPIN_LOCK(&ws->gen->sync);
1984
1985 if (scavenge_one(p)) {
1986 if (ws->gen->no > 0) {
1987 recordMutableGen_GC((StgClosure *)p, ws->gen->no);
1988 }
1989 }
1990
1991 // stats
1992 gct->scanned += closure_sizeW((StgClosure*)p);
1993 }
1994 }
1995
1996 /* ----------------------------------------------------------------------------
1997 Look for work to do.
1998
1999 We look for the oldest gen that has either a todo block that can
2000 be scanned, or a block of work on the global queue that we can
2001 scan.
2002
2003 It is important to take work from the *oldest* generation that we
2004 has work available, because that minimizes the likelihood of
2005 evacuating objects into a young generation when they should have
2006 been eagerly promoted. This really does make a difference (the
2007 cacheprof benchmark is one that is affected).
2008
2009 We also want to scan the todo block if possible before grabbing
2010 work from the global queue, the reason being that we don't want to
2011 steal work from the global queue and starve other threads if there
2012 is other work we can usefully be doing.
2013 ------------------------------------------------------------------------- */
2014
2015 static bool
2016 scavenge_find_work (void)
2017 {
2018 int g;
2019 gen_workspace *ws;
2020 bool did_something, did_anything;
2021 bdescr *bd;
2022
2023 gct->scav_find_work++;
2024
2025 did_anything = false;
2026
2027 loop:
2028 did_something = false;
2029 for (g = RtsFlags.GcFlags.generations-1; g >= 0; g--) {
2030 ws = &gct->gens[g];
2031
2032 gct->scan_bd = NULL;
2033
2034 // If we have a scan block with some work to do,
2035 // scavenge everything up to the free pointer.
2036 if (ws->todo_bd->u.scan < ws->todo_free)
2037 {
2038 scavenge_block(ws->todo_bd);
2039 did_something = true;
2040 break;
2041 }
2042
2043 // If we have any large objects to scavenge, do them now.
2044 if (ws->todo_large_objects) {
2045 scavenge_large(ws);
2046 did_something = true;
2047 break;
2048 }
2049
2050 if ((bd = grab_local_todo_block(ws)) != NULL) {
2051 scavenge_block(bd);
2052 did_something = true;
2053 break;
2054 }
2055 }
2056
2057 if (did_something) {
2058 did_anything = true;
2059 goto loop;
2060 }
2061
2062 #if defined(THREADED_RTS)
2063 if (work_stealing) {
2064 // look for work to steal
2065 for (g = RtsFlags.GcFlags.generations-1; g >= 0; g--) {
2066 if ((bd = steal_todo_block(g)) != NULL) {
2067 scavenge_block(bd);
2068 did_something = true;
2069 break;
2070 }
2071 }
2072
2073 if (did_something) {
2074 did_anything = true;
2075 goto loop;
2076 }
2077 }
2078 #endif
2079
2080 // only return when there is no more work to do
2081
2082 return did_anything;
2083 }
2084
2085 /* ----------------------------------------------------------------------------
2086 Scavenge until we can't find anything more to scavenge.
2087 ------------------------------------------------------------------------- */
2088
2089 void
2090 scavenge_loop(void)
2091 {
2092 bool work_to_do;
2093
2094 loop:
2095 work_to_do = false;
2096
2097 // scavenge static objects
2098 if (major_gc && gct->static_objects != END_OF_STATIC_OBJECT_LIST) {
2099 IF_DEBUG(sanity, checkStaticObjects(gct->static_objects));
2100 scavenge_static();
2101 }
2102
2103 // scavenge objects in compacted generation
2104 if (mark_stack_bd != NULL && !mark_stack_empty()) {
2105 scavenge_mark_stack();
2106 work_to_do = true;
2107 }
2108
2109 // Order is important here: we want to deal in full blocks as
2110 // much as possible, so go for global work in preference to
2111 // local work. Only if all the global work has been exhausted
2112 // do we start scavenging the fragments of blocks in the local
2113 // workspaces.
2114 if (scavenge_find_work()) goto loop;
2115
2116 if (work_to_do) goto loop;
2117 }