rts: More const correct-ness fixes
[ghc.git] / rts / sm / Scav.c
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team 1998-2008
4 *
5 * Generational garbage collector: scavenging functions
6 *
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
9 *
10 * http://ghc.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/GC
11 *
12 * ---------------------------------------------------------------------------*/
13
14 #include "PosixSource.h"
15 #include "Rts.h"
16
17 #include "Storage.h"
18 #include "GC.h"
19 #include "GCThread.h"
20 #include "GCUtils.h"
21 #include "Compact.h"
22 #include "MarkStack.h"
23 #include "Evac.h"
24 #include "Scav.h"
25 #include "Apply.h"
26 #include "Trace.h"
27 #include "Sanity.h"
28 #include "Capability.h"
29 #include "LdvProfile.h"
30
31 #include "sm/MarkWeak.h"
32
33 static void scavenge_stack (StgPtr p, StgPtr stack_end);
34
35 static void scavenge_large_bitmap (StgPtr p,
36 StgLargeBitmap *large_bitmap,
37 StgWord size );
38
39 #if defined(THREADED_RTS) && !defined(PARALLEL_GC)
40 # define evacuate(a) evacuate1(a)
41 # define scavenge_loop(a) scavenge_loop1(a)
42 # define scavenge_block(a) scavenge_block1(a)
43 # define scavenge_mutable_list(bd,g) scavenge_mutable_list1(bd,g)
44 # define scavenge_capability_mut_lists(cap) scavenge_capability_mut_Lists1(cap)
45 #endif
46
47 /* -----------------------------------------------------------------------------
48 Scavenge a TSO.
49 -------------------------------------------------------------------------- */
50
51 static void
52 scavengeTSO (StgTSO *tso)
53 {
54 rtsBool saved_eager;
55
56 debugTrace(DEBUG_gc,"scavenging thread %d",(int)tso->id);
57
58 // update the pointer from the InCall.
59 if (tso->bound != NULL) {
60 // NB. We can't just set tso->bound->tso = tso, because this
61 // might be an invalid copy the TSO resulting from multiple
62 // threads evacuating the TSO simultaneously (see
63 // Evac.c:copy_tag()). Calling evacuate() on this pointer
64 // will ensure that we update it to point to the correct copy.
65 evacuate((StgClosure **)&tso->bound->tso);
66 }
67
68 saved_eager = gct->eager_promotion;
69 gct->eager_promotion = rtsFalse;
70
71 evacuate((StgClosure **)&tso->blocked_exceptions);
72 evacuate((StgClosure **)&tso->bq);
73
74 // scavange current transaction record
75 evacuate((StgClosure **)&tso->trec);
76
77 evacuate((StgClosure **)&tso->stackobj);
78
79 evacuate((StgClosure **)&tso->_link);
80 if ( tso->why_blocked == BlockedOnMVar
81 || tso->why_blocked == BlockedOnMVarRead
82 || tso->why_blocked == BlockedOnBlackHole
83 || tso->why_blocked == BlockedOnMsgThrowTo
84 || tso->why_blocked == NotBlocked
85 ) {
86 evacuate(&tso->block_info.closure);
87 }
88 #ifdef THREADED_RTS
89 // in the THREADED_RTS, block_info.closure must always point to a
90 // valid closure, because we assume this in throwTo(). In the
91 // non-threaded RTS it might be a FD (for
92 // BlockedOnRead/BlockedOnWrite) or a time value (BlockedOnDelay)
93 else {
94 tso->block_info.closure = (StgClosure *)END_TSO_QUEUE;
95 }
96 #endif
97
98 tso->dirty = gct->failed_to_evac;
99
100 gct->eager_promotion = saved_eager;
101 }
102
103 /* -----------------------------------------------------------------------------
104 Mutable arrays of pointers
105 -------------------------------------------------------------------------- */
106
107 static StgPtr scavenge_mut_arr_ptrs (StgMutArrPtrs *a)
108 {
109 W_ m;
110 rtsBool any_failed;
111 StgPtr p, q;
112
113 any_failed = rtsFalse;
114 p = (StgPtr)&a->payload[0];
115 for (m = 0; (int)m < (int)mutArrPtrsCards(a->ptrs) - 1; m++)
116 {
117 q = p + (1 << MUT_ARR_PTRS_CARD_BITS);
118 for (; p < q; p++) {
119 evacuate((StgClosure**)p);
120 }
121 if (gct->failed_to_evac) {
122 any_failed = rtsTrue;
123 *mutArrPtrsCard(a,m) = 1;
124 gct->failed_to_evac = rtsFalse;
125 } else {
126 *mutArrPtrsCard(a,m) = 0;
127 }
128 }
129
130 q = (StgPtr)&a->payload[a->ptrs];
131 if (p < q) {
132 for (; p < q; p++) {
133 evacuate((StgClosure**)p);
134 }
135 if (gct->failed_to_evac) {
136 any_failed = rtsTrue;
137 *mutArrPtrsCard(a,m) = 1;
138 gct->failed_to_evac = rtsFalse;
139 } else {
140 *mutArrPtrsCard(a,m) = 0;
141 }
142 }
143
144 gct->failed_to_evac = any_failed;
145 return (StgPtr)a + mut_arr_ptrs_sizeW(a);
146 }
147
148 // scavenge only the marked areas of a MUT_ARR_PTRS
149 static StgPtr scavenge_mut_arr_ptrs_marked (StgMutArrPtrs *a)
150 {
151 W_ m;
152 StgPtr p, q;
153 rtsBool any_failed;
154
155 any_failed = rtsFalse;
156 for (m = 0; m < mutArrPtrsCards(a->ptrs); m++)
157 {
158 if (*mutArrPtrsCard(a,m) != 0) {
159 p = (StgPtr)&a->payload[m << MUT_ARR_PTRS_CARD_BITS];
160 q = stg_min(p + (1 << MUT_ARR_PTRS_CARD_BITS),
161 (StgPtr)&a->payload[a->ptrs]);
162 for (; p < q; p++) {
163 evacuate((StgClosure**)p);
164 }
165 if (gct->failed_to_evac) {
166 any_failed = rtsTrue;
167 gct->failed_to_evac = rtsFalse;
168 } else {
169 *mutArrPtrsCard(a,m) = 0;
170 }
171 }
172 }
173
174 gct->failed_to_evac = any_failed;
175 return (StgPtr)a + mut_arr_ptrs_sizeW(a);
176 }
177
178 STATIC_INLINE StgPtr
179 scavenge_small_bitmap (StgPtr p, StgWord size, StgWord bitmap)
180 {
181 while (size > 0) {
182 if ((bitmap & 1) == 0) {
183 evacuate((StgClosure **)p);
184 }
185 p++;
186 bitmap = bitmap >> 1;
187 size--;
188 }
189 return p;
190 }
191
192 /* -----------------------------------------------------------------------------
193 Blocks of function args occur on the stack (at the top) and
194 in PAPs.
195 -------------------------------------------------------------------------- */
196
197 STATIC_INLINE StgPtr
198 scavenge_arg_block (const StgFunInfoTable *fun_info, StgClosure **args)
199 {
200 StgPtr p;
201 StgWord bitmap;
202 StgWord size;
203
204 p = (StgPtr)args;
205 switch (fun_info->f.fun_type) {
206 case ARG_GEN:
207 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
208 size = BITMAP_SIZE(fun_info->f.b.bitmap);
209 goto small_bitmap;
210 case ARG_GEN_BIG:
211 size = GET_FUN_LARGE_BITMAP(fun_info)->size;
212 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
213 p += size;
214 break;
215 default:
216 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
217 size = BITMAP_SIZE(stg_arg_bitmaps[fun_info->f.fun_type]);
218 small_bitmap:
219 p = scavenge_small_bitmap(p, size, bitmap);
220 break;
221 }
222 return p;
223 }
224
225 STATIC_INLINE GNUC_ATTR_HOT StgPtr
226 scavenge_PAP_payload (StgClosure *fun, StgClosure **payload, StgWord size)
227 {
228 StgPtr p;
229 StgWord bitmap;
230 const StgFunInfoTable *fun_info;
231
232 fun_info = get_fun_itbl(UNTAG_CONST_CLOSURE(fun));
233 ASSERT(fun_info->i.type != PAP);
234 p = (StgPtr)payload;
235
236 switch (fun_info->f.fun_type) {
237 case ARG_GEN:
238 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
239 goto small_bitmap;
240 case ARG_GEN_BIG:
241 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
242 p += size;
243 break;
244 case ARG_BCO:
245 scavenge_large_bitmap((StgPtr)payload, BCO_BITMAP(fun), size);
246 p += size;
247 break;
248 default:
249 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
250 small_bitmap:
251 p = scavenge_small_bitmap(p, size, bitmap);
252 break;
253 }
254 return p;
255 }
256
257 STATIC_INLINE GNUC_ATTR_HOT StgPtr
258 scavenge_PAP (StgPAP *pap)
259 {
260 evacuate(&pap->fun);
261 return scavenge_PAP_payload (pap->fun, pap->payload, pap->n_args);
262 }
263
264 STATIC_INLINE StgPtr
265 scavenge_AP (StgAP *ap)
266 {
267 evacuate(&ap->fun);
268 return scavenge_PAP_payload (ap->fun, ap->payload, ap->n_args);
269 }
270
271 /* -----------------------------------------------------------------------------
272 Scavenge SRTs
273 -------------------------------------------------------------------------- */
274
275 /* Similar to scavenge_large_bitmap(), but we don't write back the
276 * pointers we get back from evacuate().
277 */
278 static void
279 scavenge_large_srt_bitmap( StgLargeSRT *large_srt )
280 {
281 uint32_t i, j, size;
282 StgWord bitmap;
283 StgClosure **p;
284
285 size = (uint32_t)large_srt->l.size;
286 p = (StgClosure **)large_srt->srt;
287
288 for (i = 0; i < size / BITS_IN(W_); i++) {
289 bitmap = large_srt->l.bitmap[i];
290 // skip zero words: bitmaps can be very sparse, and this helps
291 // performance a lot in some cases.
292 if (bitmap != 0) {
293 for (j = 0; j < BITS_IN(W_); j++) {
294 if ((bitmap & 1) != 0) {
295 evacuate(p);
296 }
297 p++;
298 bitmap = bitmap >> 1;
299 }
300 } else {
301 p += BITS_IN(W_);
302 }
303 }
304 if (size % BITS_IN(W_) != 0) {
305 bitmap = large_srt->l.bitmap[i];
306 for (j = 0; j < size % BITS_IN(W_); j++) {
307 if ((bitmap & 1) != 0) {
308 evacuate(p);
309 }
310 p++;
311 bitmap = bitmap >> 1;
312 }
313 }
314 }
315
316 /* evacuate the SRT. If srt_bitmap is zero, then there isn't an
317 * srt field in the info table. That's ok, because we'll
318 * never dereference it.
319 */
320 STATIC_INLINE GNUC_ATTR_HOT void
321 scavenge_srt (StgClosure **srt, uint32_t srt_bitmap)
322 {
323 uint32_t bitmap;
324 StgClosure **p;
325
326 bitmap = srt_bitmap;
327 p = srt;
328
329 if (bitmap == (StgHalfWord)(-1)) {
330 scavenge_large_srt_bitmap( (StgLargeSRT *)srt );
331 return;
332 }
333
334 while (bitmap != 0) {
335 if ((bitmap & 1) != 0) {
336 #if defined(COMPILING_WINDOWS_DLL)
337 // Special-case to handle references to closures hiding out in DLLs, since
338 // double indirections required to get at those. The code generator knows
339 // which is which when generating the SRT, so it stores the (indirect)
340 // reference to the DLL closure in the table by first adding one to it.
341 // We check for this here, and undo the addition before evacuating it.
342 //
343 // If the SRT entry hasn't got bit 0 set, the SRT entry points to a
344 // closure that's fixed at link-time, and no extra magic is required.
345 if ( (W_)(*srt) & 0x1 ) {
346 evacuate( (StgClosure**) ((W_) (*srt) & ~0x1));
347 } else {
348 evacuate(p);
349 }
350 #else
351 evacuate(p);
352 #endif
353 }
354 p++;
355 bitmap = bitmap >> 1;
356 }
357 }
358
359
360 STATIC_INLINE GNUC_ATTR_HOT void
361 scavenge_thunk_srt(const StgInfoTable *info)
362 {
363 StgThunkInfoTable *thunk_info;
364 uint32_t bitmap;
365
366 if (!major_gc) return;
367
368 thunk_info = itbl_to_thunk_itbl(info);
369 bitmap = thunk_info->i.srt_bitmap;
370 if (bitmap) {
371 // don't read srt_offset if bitmap==0, because it doesn't exist
372 // and so the memory might not be readable.
373 scavenge_srt((StgClosure **)GET_SRT(thunk_info), bitmap);
374 }
375 }
376
377 STATIC_INLINE GNUC_ATTR_HOT void
378 scavenge_fun_srt(const StgInfoTable *info)
379 {
380 StgFunInfoTable *fun_info;
381 uint32_t bitmap;
382
383 if (!major_gc) return;
384
385 fun_info = itbl_to_fun_itbl(info);
386 bitmap = fun_info->i.srt_bitmap;
387 if (bitmap) {
388 // don't read srt_offset if bitmap==0, because it doesn't exist
389 // and so the memory might not be readable.
390 scavenge_srt((StgClosure **)GET_FUN_SRT(fun_info), bitmap);
391 }
392 }
393
394 /* -----------------------------------------------------------------------------
395 Scavenge a block from the given scan pointer up to bd->free.
396
397 evac_gen_no is set by the caller to be either zero (for a step in a
398 generation < N) or G where G is the generation of the step being
399 scavenged.
400
401 We sometimes temporarily change evac_gen_no back to zero if we're
402 scavenging a mutable object where eager promotion isn't such a good
403 idea.
404 -------------------------------------------------------------------------- */
405
406 static GNUC_ATTR_HOT void
407 scavenge_block (bdescr *bd)
408 {
409 StgPtr p, q;
410 const StgInfoTable *info;
411 rtsBool saved_eager_promotion;
412 gen_workspace *ws;
413
414 debugTrace(DEBUG_gc, "scavenging block %p (gen %d) @ %p",
415 bd->start, bd->gen_no, bd->u.scan);
416
417 gct->scan_bd = bd;
418 gct->evac_gen_no = bd->gen_no;
419 saved_eager_promotion = gct->eager_promotion;
420 gct->failed_to_evac = rtsFalse;
421
422 ws = &gct->gens[bd->gen->no];
423
424 p = bd->u.scan;
425
426 // we might be evacuating into the very object that we're
427 // scavenging, so we have to check the real bd->free pointer each
428 // time around the loop.
429 while (p < bd->free || (bd == ws->todo_bd && p < ws->todo_free)) {
430
431 ASSERT(bd->link == NULL);
432 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
433 info = get_itbl((StgClosure *)p);
434
435 ASSERT(gct->thunk_selector_depth == 0);
436
437 q = p;
438 switch (info->type) {
439
440 case MVAR_CLEAN:
441 case MVAR_DIRTY:
442 {
443 StgMVar *mvar = ((StgMVar *)p);
444 gct->eager_promotion = rtsFalse;
445 evacuate((StgClosure **)&mvar->head);
446 evacuate((StgClosure **)&mvar->tail);
447 evacuate((StgClosure **)&mvar->value);
448 gct->eager_promotion = saved_eager_promotion;
449
450 if (gct->failed_to_evac) {
451 mvar->header.info = &stg_MVAR_DIRTY_info;
452 } else {
453 mvar->header.info = &stg_MVAR_CLEAN_info;
454 }
455 p += sizeofW(StgMVar);
456 break;
457 }
458
459 case TVAR:
460 {
461 StgTVar *tvar = ((StgTVar *)p);
462 gct->eager_promotion = rtsFalse;
463 evacuate((StgClosure **)&tvar->current_value);
464 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
465 gct->eager_promotion = saved_eager_promotion;
466
467 if (gct->failed_to_evac) {
468 tvar->header.info = &stg_TVAR_DIRTY_info;
469 } else {
470 tvar->header.info = &stg_TVAR_CLEAN_info;
471 }
472 p += sizeofW(StgTVar);
473 break;
474 }
475
476 case FUN_2_0:
477 scavenge_fun_srt(info);
478 evacuate(&((StgClosure *)p)->payload[1]);
479 evacuate(&((StgClosure *)p)->payload[0]);
480 p += sizeofW(StgHeader) + 2;
481 break;
482
483 case THUNK_2_0:
484 scavenge_thunk_srt(info);
485 evacuate(&((StgThunk *)p)->payload[1]);
486 evacuate(&((StgThunk *)p)->payload[0]);
487 p += sizeofW(StgThunk) + 2;
488 break;
489
490 case CONSTR_2_0:
491 evacuate(&((StgClosure *)p)->payload[1]);
492 evacuate(&((StgClosure *)p)->payload[0]);
493 p += sizeofW(StgHeader) + 2;
494 break;
495
496 case THUNK_1_0:
497 scavenge_thunk_srt(info);
498 evacuate(&((StgThunk *)p)->payload[0]);
499 p += sizeofW(StgThunk) + 1;
500 break;
501
502 case FUN_1_0:
503 scavenge_fun_srt(info);
504 case CONSTR_1_0:
505 evacuate(&((StgClosure *)p)->payload[0]);
506 p += sizeofW(StgHeader) + 1;
507 break;
508
509 case THUNK_0_1:
510 scavenge_thunk_srt(info);
511 p += sizeofW(StgThunk) + 1;
512 break;
513
514 case FUN_0_1:
515 scavenge_fun_srt(info);
516 case CONSTR_0_1:
517 p += sizeofW(StgHeader) + 1;
518 break;
519
520 case THUNK_0_2:
521 scavenge_thunk_srt(info);
522 p += sizeofW(StgThunk) + 2;
523 break;
524
525 case FUN_0_2:
526 scavenge_fun_srt(info);
527 case CONSTR_0_2:
528 p += sizeofW(StgHeader) + 2;
529 break;
530
531 case THUNK_1_1:
532 scavenge_thunk_srt(info);
533 evacuate(&((StgThunk *)p)->payload[0]);
534 p += sizeofW(StgThunk) + 2;
535 break;
536
537 case FUN_1_1:
538 scavenge_fun_srt(info);
539 case CONSTR_1_1:
540 evacuate(&((StgClosure *)p)->payload[0]);
541 p += sizeofW(StgHeader) + 2;
542 break;
543
544 case FUN:
545 scavenge_fun_srt(info);
546 goto gen_obj;
547
548 case THUNK:
549 {
550 StgPtr end;
551
552 scavenge_thunk_srt(info);
553 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
554 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
555 evacuate((StgClosure **)p);
556 }
557 p += info->layout.payload.nptrs;
558 break;
559 }
560
561 gen_obj:
562 case CONSTR:
563 case WEAK:
564 case PRIM:
565 {
566 StgPtr end;
567
568 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
569 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
570 evacuate((StgClosure **)p);
571 }
572 p += info->layout.payload.nptrs;
573 break;
574 }
575
576 case BCO: {
577 StgBCO *bco = (StgBCO *)p;
578 evacuate((StgClosure **)&bco->instrs);
579 evacuate((StgClosure **)&bco->literals);
580 evacuate((StgClosure **)&bco->ptrs);
581 p += bco_sizeW(bco);
582 break;
583 }
584
585 case BLACKHOLE:
586 evacuate(&((StgInd *)p)->indirectee);
587 p += sizeofW(StgInd);
588 break;
589
590 case MUT_VAR_CLEAN:
591 case MUT_VAR_DIRTY:
592 gct->eager_promotion = rtsFalse;
593 evacuate(&((StgMutVar *)p)->var);
594 gct->eager_promotion = saved_eager_promotion;
595
596 if (gct->failed_to_evac) {
597 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
598 } else {
599 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
600 }
601 p += sizeofW(StgMutVar);
602 break;
603
604 case BLOCKING_QUEUE:
605 {
606 StgBlockingQueue *bq = (StgBlockingQueue *)p;
607
608 gct->eager_promotion = rtsFalse;
609 evacuate(&bq->bh);
610 evacuate((StgClosure**)&bq->owner);
611 evacuate((StgClosure**)&bq->queue);
612 evacuate((StgClosure**)&bq->link);
613 gct->eager_promotion = saved_eager_promotion;
614
615 if (gct->failed_to_evac) {
616 bq->header.info = &stg_BLOCKING_QUEUE_DIRTY_info;
617 } else {
618 bq->header.info = &stg_BLOCKING_QUEUE_CLEAN_info;
619 }
620 p += sizeofW(StgBlockingQueue);
621 break;
622 }
623
624 case THUNK_SELECTOR:
625 {
626 StgSelector *s = (StgSelector *)p;
627 evacuate(&s->selectee);
628 p += THUNK_SELECTOR_sizeW();
629 break;
630 }
631
632 // A chunk of stack saved in a heap object
633 case AP_STACK:
634 {
635 StgAP_STACK *ap = (StgAP_STACK *)p;
636
637 evacuate(&ap->fun);
638 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
639 p = (StgPtr)ap->payload + ap->size;
640 break;
641 }
642
643 case PAP:
644 p = scavenge_PAP((StgPAP *)p);
645 break;
646
647 case AP:
648 p = scavenge_AP((StgAP *)p);
649 break;
650
651 case ARR_WORDS:
652 // nothing to follow
653 p += arr_words_sizeW((StgArrBytes *)p);
654 break;
655
656 case MUT_ARR_PTRS_CLEAN:
657 case MUT_ARR_PTRS_DIRTY:
658 {
659 // We don't eagerly promote objects pointed to by a mutable
660 // array, but if we find the array only points to objects in
661 // the same or an older generation, we mark it "clean" and
662 // avoid traversing it during minor GCs.
663 gct->eager_promotion = rtsFalse;
664
665 p = scavenge_mut_arr_ptrs((StgMutArrPtrs*)p);
666
667 if (gct->failed_to_evac) {
668 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
669 } else {
670 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
671 }
672
673 gct->eager_promotion = saved_eager_promotion;
674 gct->failed_to_evac = rtsTrue; // always put it on the mutable list.
675 break;
676 }
677
678 case MUT_ARR_PTRS_FROZEN:
679 case MUT_ARR_PTRS_FROZEN0:
680 // follow everything
681 {
682 p = scavenge_mut_arr_ptrs((StgMutArrPtrs*)p);
683
684 // If we're going to put this object on the mutable list, then
685 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
686 if (gct->failed_to_evac) {
687 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
688 } else {
689 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
690 }
691 break;
692 }
693
694 case SMALL_MUT_ARR_PTRS_CLEAN:
695 case SMALL_MUT_ARR_PTRS_DIRTY:
696 // follow everything
697 {
698 StgPtr next;
699
700 // We don't eagerly promote objects pointed to by a mutable
701 // array, but if we find the array only points to objects in
702 // the same or an older generation, we mark it "clean" and
703 // avoid traversing it during minor GCs.
704 gct->eager_promotion = rtsFalse;
705 next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
706 for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
707 evacuate((StgClosure **)p);
708 }
709 gct->eager_promotion = saved_eager_promotion;
710
711 if (gct->failed_to_evac) {
712 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_DIRTY_info;
713 } else {
714 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_CLEAN_info;
715 }
716
717 gct->failed_to_evac = rtsTrue; // always put it on the mutable list.
718 break;
719 }
720
721 case SMALL_MUT_ARR_PTRS_FROZEN:
722 case SMALL_MUT_ARR_PTRS_FROZEN0:
723 // follow everything
724 {
725 StgPtr next;
726
727 next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
728 for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
729 evacuate((StgClosure **)p);
730 }
731
732 // If we're going to put this object on the mutable list, then
733 // set its info ptr to SMALL_MUT_ARR_PTRS_FROZEN0 to indicate that.
734 if (gct->failed_to_evac) {
735 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN0_info;
736 } else {
737 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN_info;
738 }
739 break;
740 }
741
742 case TSO:
743 {
744 scavengeTSO((StgTSO *)p);
745 p += sizeofW(StgTSO);
746 break;
747 }
748
749 case STACK:
750 {
751 StgStack *stack = (StgStack*)p;
752
753 gct->eager_promotion = rtsFalse;
754
755 scavenge_stack(stack->sp, stack->stack + stack->stack_size);
756 stack->dirty = gct->failed_to_evac;
757 p += stack_sizeW(stack);
758
759 gct->eager_promotion = saved_eager_promotion;
760 break;
761 }
762
763 case MUT_PRIM:
764 {
765 StgPtr end;
766
767 gct->eager_promotion = rtsFalse;
768
769 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
770 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
771 evacuate((StgClosure **)p);
772 }
773 p += info->layout.payload.nptrs;
774
775 gct->eager_promotion = saved_eager_promotion;
776 gct->failed_to_evac = rtsTrue; // mutable
777 break;
778 }
779
780 case TREC_CHUNK:
781 {
782 StgWord i;
783 StgTRecChunk *tc = ((StgTRecChunk *) p);
784 TRecEntry *e = &(tc -> entries[0]);
785 gct->eager_promotion = rtsFalse;
786 evacuate((StgClosure **)&tc->prev_chunk);
787 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
788 evacuate((StgClosure **)&e->tvar);
789 evacuate((StgClosure **)&e->expected_value);
790 evacuate((StgClosure **)&e->new_value);
791 }
792 gct->eager_promotion = saved_eager_promotion;
793 gct->failed_to_evac = rtsTrue; // mutable
794 p += sizeofW(StgTRecChunk);
795 break;
796 }
797
798 default:
799 barf("scavenge: unimplemented/strange closure type %d @ %p",
800 info->type, p);
801 }
802
803 /*
804 * We need to record the current object on the mutable list if
805 * (a) It is actually mutable, or
806 * (b) It contains pointers to a younger generation.
807 * Case (b) arises if we didn't manage to promote everything that
808 * the current object points to into the current generation.
809 */
810 if (gct->failed_to_evac) {
811 gct->failed_to_evac = rtsFalse;
812 if (bd->gen_no > 0) {
813 recordMutableGen_GC((StgClosure *)q, bd->gen_no);
814 }
815 }
816 }
817
818 if (p > bd->free) {
819 gct->copied += ws->todo_free - bd->free;
820 bd->free = p;
821 }
822
823 debugTrace(DEBUG_gc, " scavenged %ld bytes",
824 (unsigned long)((bd->free - bd->u.scan) * sizeof(W_)));
825
826 // update stats: this is a block that has been scavenged
827 gct->scanned += bd->free - bd->u.scan;
828 bd->u.scan = bd->free;
829
830 if (bd != ws->todo_bd) {
831 // we're not going to evac any more objects into
832 // this block, so push it now.
833 push_scanned_block(bd, ws);
834 }
835
836 gct->scan_bd = NULL;
837 }
838 /* -----------------------------------------------------------------------------
839 Scavenge everything on the mark stack.
840
841 This is slightly different from scavenge():
842 - we don't walk linearly through the objects, so the scavenger
843 doesn't need to advance the pointer on to the next object.
844 -------------------------------------------------------------------------- */
845
846 static void
847 scavenge_mark_stack(void)
848 {
849 StgPtr p, q;
850 const StgInfoTable *info;
851 rtsBool saved_eager_promotion;
852
853 gct->evac_gen_no = oldest_gen->no;
854 saved_eager_promotion = gct->eager_promotion;
855
856 while ((p = pop_mark_stack())) {
857
858 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
859 info = get_itbl((StgClosure *)p);
860
861 q = p;
862 switch (info->type) {
863
864 case MVAR_CLEAN:
865 case MVAR_DIRTY:
866 {
867 StgMVar *mvar = ((StgMVar *)p);
868 gct->eager_promotion = rtsFalse;
869 evacuate((StgClosure **)&mvar->head);
870 evacuate((StgClosure **)&mvar->tail);
871 evacuate((StgClosure **)&mvar->value);
872 gct->eager_promotion = saved_eager_promotion;
873
874 if (gct->failed_to_evac) {
875 mvar->header.info = &stg_MVAR_DIRTY_info;
876 } else {
877 mvar->header.info = &stg_MVAR_CLEAN_info;
878 }
879 break;
880 }
881
882 case TVAR:
883 {
884 StgTVar *tvar = ((StgTVar *)p);
885 gct->eager_promotion = rtsFalse;
886 evacuate((StgClosure **)&tvar->current_value);
887 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
888 gct->eager_promotion = saved_eager_promotion;
889
890 if (gct->failed_to_evac) {
891 tvar->header.info = &stg_TVAR_DIRTY_info;
892 } else {
893 tvar->header.info = &stg_TVAR_CLEAN_info;
894 }
895 break;
896 }
897
898 case FUN_2_0:
899 scavenge_fun_srt(info);
900 evacuate(&((StgClosure *)p)->payload[1]);
901 evacuate(&((StgClosure *)p)->payload[0]);
902 break;
903
904 case THUNK_2_0:
905 scavenge_thunk_srt(info);
906 evacuate(&((StgThunk *)p)->payload[1]);
907 evacuate(&((StgThunk *)p)->payload[0]);
908 break;
909
910 case CONSTR_2_0:
911 evacuate(&((StgClosure *)p)->payload[1]);
912 evacuate(&((StgClosure *)p)->payload[0]);
913 break;
914
915 case FUN_1_0:
916 case FUN_1_1:
917 scavenge_fun_srt(info);
918 evacuate(&((StgClosure *)p)->payload[0]);
919 break;
920
921 case THUNK_1_0:
922 case THUNK_1_1:
923 scavenge_thunk_srt(info);
924 evacuate(&((StgThunk *)p)->payload[0]);
925 break;
926
927 case CONSTR_1_0:
928 case CONSTR_1_1:
929 evacuate(&((StgClosure *)p)->payload[0]);
930 break;
931
932 case FUN_0_1:
933 case FUN_0_2:
934 scavenge_fun_srt(info);
935 break;
936
937 case THUNK_0_1:
938 case THUNK_0_2:
939 scavenge_thunk_srt(info);
940 break;
941
942 case CONSTR_0_1:
943 case CONSTR_0_2:
944 break;
945
946 case FUN:
947 scavenge_fun_srt(info);
948 goto gen_obj;
949
950 case THUNK:
951 {
952 StgPtr end;
953
954 scavenge_thunk_srt(info);
955 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
956 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
957 evacuate((StgClosure **)p);
958 }
959 break;
960 }
961
962 gen_obj:
963 case CONSTR:
964 case WEAK:
965 case PRIM:
966 {
967 StgPtr end;
968
969 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
970 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
971 evacuate((StgClosure **)p);
972 }
973 break;
974 }
975
976 case BCO: {
977 StgBCO *bco = (StgBCO *)p;
978 evacuate((StgClosure **)&bco->instrs);
979 evacuate((StgClosure **)&bco->literals);
980 evacuate((StgClosure **)&bco->ptrs);
981 break;
982 }
983
984 case IND:
985 case BLACKHOLE:
986 evacuate(&((StgInd *)p)->indirectee);
987 break;
988
989 case MUT_VAR_CLEAN:
990 case MUT_VAR_DIRTY: {
991 gct->eager_promotion = rtsFalse;
992 evacuate(&((StgMutVar *)p)->var);
993 gct->eager_promotion = saved_eager_promotion;
994
995 if (gct->failed_to_evac) {
996 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
997 } else {
998 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
999 }
1000 break;
1001 }
1002
1003 case BLOCKING_QUEUE:
1004 {
1005 StgBlockingQueue *bq = (StgBlockingQueue *)p;
1006
1007 gct->eager_promotion = rtsFalse;
1008 evacuate(&bq->bh);
1009 evacuate((StgClosure**)&bq->owner);
1010 evacuate((StgClosure**)&bq->queue);
1011 evacuate((StgClosure**)&bq->link);
1012 gct->eager_promotion = saved_eager_promotion;
1013
1014 if (gct->failed_to_evac) {
1015 bq->header.info = &stg_BLOCKING_QUEUE_DIRTY_info;
1016 } else {
1017 bq->header.info = &stg_BLOCKING_QUEUE_CLEAN_info;
1018 }
1019 break;
1020 }
1021
1022 case ARR_WORDS:
1023 break;
1024
1025 case THUNK_SELECTOR:
1026 {
1027 StgSelector *s = (StgSelector *)p;
1028 evacuate(&s->selectee);
1029 break;
1030 }
1031
1032 // A chunk of stack saved in a heap object
1033 case AP_STACK:
1034 {
1035 StgAP_STACK *ap = (StgAP_STACK *)p;
1036
1037 evacuate(&ap->fun);
1038 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
1039 break;
1040 }
1041
1042 case PAP:
1043 scavenge_PAP((StgPAP *)p);
1044 break;
1045
1046 case AP:
1047 scavenge_AP((StgAP *)p);
1048 break;
1049
1050 case MUT_ARR_PTRS_CLEAN:
1051 case MUT_ARR_PTRS_DIRTY:
1052 // follow everything
1053 {
1054 // We don't eagerly promote objects pointed to by a mutable
1055 // array, but if we find the array only points to objects in
1056 // the same or an older generation, we mark it "clean" and
1057 // avoid traversing it during minor GCs.
1058 gct->eager_promotion = rtsFalse;
1059
1060 scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
1061
1062 if (gct->failed_to_evac) {
1063 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
1064 } else {
1065 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
1066 }
1067
1068 gct->eager_promotion = saved_eager_promotion;
1069 gct->failed_to_evac = rtsTrue; // mutable anyhow.
1070 break;
1071 }
1072
1073 case MUT_ARR_PTRS_FROZEN:
1074 case MUT_ARR_PTRS_FROZEN0:
1075 // follow everything
1076 {
1077 StgPtr q = p;
1078
1079 scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
1080
1081 // If we're going to put this object on the mutable list, then
1082 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
1083 if (gct->failed_to_evac) {
1084 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
1085 } else {
1086 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
1087 }
1088 break;
1089 }
1090
1091 case SMALL_MUT_ARR_PTRS_CLEAN:
1092 case SMALL_MUT_ARR_PTRS_DIRTY:
1093 // follow everything
1094 {
1095 StgPtr next;
1096 rtsBool saved_eager;
1097
1098 // We don't eagerly promote objects pointed to by a mutable
1099 // array, but if we find the array only points to objects in
1100 // the same or an older generation, we mark it "clean" and
1101 // avoid traversing it during minor GCs.
1102 saved_eager = gct->eager_promotion;
1103 gct->eager_promotion = rtsFalse;
1104 next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
1105 for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
1106 evacuate((StgClosure **)p);
1107 }
1108 gct->eager_promotion = saved_eager;
1109
1110 if (gct->failed_to_evac) {
1111 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_DIRTY_info;
1112 } else {
1113 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_CLEAN_info;
1114 }
1115
1116 gct->failed_to_evac = rtsTrue; // mutable anyhow.
1117 break;
1118 }
1119
1120 case SMALL_MUT_ARR_PTRS_FROZEN:
1121 case SMALL_MUT_ARR_PTRS_FROZEN0:
1122 // follow everything
1123 {
1124 StgPtr next, q = p;
1125
1126 next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
1127 for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
1128 evacuate((StgClosure **)p);
1129 }
1130
1131 // If we're going to put this object on the mutable list, then
1132 // set its info ptr to SMALL_MUT_ARR_PTRS_FROZEN0 to indicate that.
1133 if (gct->failed_to_evac) {
1134 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN0_info;
1135 } else {
1136 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN_info;
1137 }
1138 break;
1139 }
1140
1141 case TSO:
1142 {
1143 scavengeTSO((StgTSO*)p);
1144 break;
1145 }
1146
1147 case STACK:
1148 {
1149 StgStack *stack = (StgStack*)p;
1150
1151 gct->eager_promotion = rtsFalse;
1152
1153 scavenge_stack(stack->sp, stack->stack + stack->stack_size);
1154 stack->dirty = gct->failed_to_evac;
1155
1156 gct->eager_promotion = saved_eager_promotion;
1157 break;
1158 }
1159
1160 case MUT_PRIM:
1161 {
1162 StgPtr end;
1163
1164 gct->eager_promotion = rtsFalse;
1165
1166 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1167 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
1168 evacuate((StgClosure **)p);
1169 }
1170
1171 gct->eager_promotion = saved_eager_promotion;
1172 gct->failed_to_evac = rtsTrue; // mutable
1173 break;
1174 }
1175
1176 case TREC_CHUNK:
1177 {
1178 StgWord i;
1179 StgTRecChunk *tc = ((StgTRecChunk *) p);
1180 TRecEntry *e = &(tc -> entries[0]);
1181 gct->eager_promotion = rtsFalse;
1182 evacuate((StgClosure **)&tc->prev_chunk);
1183 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
1184 evacuate((StgClosure **)&e->tvar);
1185 evacuate((StgClosure **)&e->expected_value);
1186 evacuate((StgClosure **)&e->new_value);
1187 }
1188 gct->eager_promotion = saved_eager_promotion;
1189 gct->failed_to_evac = rtsTrue; // mutable
1190 break;
1191 }
1192
1193 default:
1194 barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p",
1195 info->type, p);
1196 }
1197
1198 if (gct->failed_to_evac) {
1199 gct->failed_to_evac = rtsFalse;
1200 if (gct->evac_gen_no) {
1201 recordMutableGen_GC((StgClosure *)q, gct->evac_gen_no);
1202 }
1203 }
1204 } // while (p = pop_mark_stack())
1205 }
1206
1207 /* -----------------------------------------------------------------------------
1208 Scavenge one object.
1209
1210 This is used for objects that are temporarily marked as mutable
1211 because they contain old-to-new generation pointers. Only certain
1212 objects can have this property.
1213 -------------------------------------------------------------------------- */
1214
1215 static rtsBool
1216 scavenge_one(StgPtr p)
1217 {
1218 const StgInfoTable *info;
1219 rtsBool no_luck;
1220 rtsBool saved_eager_promotion;
1221
1222 saved_eager_promotion = gct->eager_promotion;
1223
1224 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1225 info = get_itbl((StgClosure *)p);
1226
1227 switch (info->type) {
1228
1229 case MVAR_CLEAN:
1230 case MVAR_DIRTY:
1231 {
1232 StgMVar *mvar = ((StgMVar *)p);
1233 gct->eager_promotion = rtsFalse;
1234 evacuate((StgClosure **)&mvar->head);
1235 evacuate((StgClosure **)&mvar->tail);
1236 evacuate((StgClosure **)&mvar->value);
1237 gct->eager_promotion = saved_eager_promotion;
1238
1239 if (gct->failed_to_evac) {
1240 mvar->header.info = &stg_MVAR_DIRTY_info;
1241 } else {
1242 mvar->header.info = &stg_MVAR_CLEAN_info;
1243 }
1244 break;
1245 }
1246
1247 case TVAR:
1248 {
1249 StgTVar *tvar = ((StgTVar *)p);
1250 gct->eager_promotion = rtsFalse;
1251 evacuate((StgClosure **)&tvar->current_value);
1252 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
1253 gct->eager_promotion = saved_eager_promotion;
1254
1255 if (gct->failed_to_evac) {
1256 tvar->header.info = &stg_TVAR_DIRTY_info;
1257 } else {
1258 tvar->header.info = &stg_TVAR_CLEAN_info;
1259 }
1260 break;
1261 }
1262
1263 case THUNK:
1264 case THUNK_1_0:
1265 case THUNK_0_1:
1266 case THUNK_1_1:
1267 case THUNK_0_2:
1268 case THUNK_2_0:
1269 {
1270 StgPtr q, end;
1271
1272 end = (StgPtr)((StgThunk *)p)->payload + info->layout.payload.ptrs;
1273 for (q = (StgPtr)((StgThunk *)p)->payload; q < end; q++) {
1274 evacuate((StgClosure **)q);
1275 }
1276 break;
1277 }
1278
1279 case FUN:
1280 case FUN_1_0: // hardly worth specialising these guys
1281 case FUN_0_1:
1282 case FUN_1_1:
1283 case FUN_0_2:
1284 case FUN_2_0:
1285 case CONSTR:
1286 case CONSTR_1_0:
1287 case CONSTR_0_1:
1288 case CONSTR_1_1:
1289 case CONSTR_0_2:
1290 case CONSTR_2_0:
1291 case PRIM:
1292 {
1293 StgPtr q, end;
1294
1295 end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1296 for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) {
1297 evacuate((StgClosure **)q);
1298 }
1299 break;
1300 }
1301
1302 case WEAK:
1303 // This WEAK object will not be considered by tidyWeakList during this
1304 // collection because it is in a generation >= N, but it is on the
1305 // mutable list so we must evacuate all of its pointers because some
1306 // of them may point into a younger generation.
1307 scavengeLiveWeak((StgWeak *)p);
1308 break;
1309
1310 case MUT_VAR_CLEAN:
1311 case MUT_VAR_DIRTY: {
1312 StgPtr q = p;
1313
1314 gct->eager_promotion = rtsFalse;
1315 evacuate(&((StgMutVar *)p)->var);
1316 gct->eager_promotion = saved_eager_promotion;
1317
1318 if (gct->failed_to_evac) {
1319 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
1320 } else {
1321 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
1322 }
1323 break;
1324 }
1325
1326 case BLOCKING_QUEUE:
1327 {
1328 StgBlockingQueue *bq = (StgBlockingQueue *)p;
1329
1330 gct->eager_promotion = rtsFalse;
1331 evacuate(&bq->bh);
1332 evacuate((StgClosure**)&bq->owner);
1333 evacuate((StgClosure**)&bq->queue);
1334 evacuate((StgClosure**)&bq->link);
1335 gct->eager_promotion = saved_eager_promotion;
1336
1337 if (gct->failed_to_evac) {
1338 bq->header.info = &stg_BLOCKING_QUEUE_DIRTY_info;
1339 } else {
1340 bq->header.info = &stg_BLOCKING_QUEUE_CLEAN_info;
1341 }
1342 break;
1343 }
1344
1345 case THUNK_SELECTOR:
1346 {
1347 StgSelector *s = (StgSelector *)p;
1348 evacuate(&s->selectee);
1349 break;
1350 }
1351
1352 case AP_STACK:
1353 {
1354 StgAP_STACK *ap = (StgAP_STACK *)p;
1355
1356 evacuate(&ap->fun);
1357 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
1358 p = (StgPtr)ap->payload + ap->size;
1359 break;
1360 }
1361
1362 case PAP:
1363 p = scavenge_PAP((StgPAP *)p);
1364 break;
1365
1366 case AP:
1367 p = scavenge_AP((StgAP *)p);
1368 break;
1369
1370 case ARR_WORDS:
1371 // nothing to follow
1372 break;
1373
1374 case MUT_ARR_PTRS_CLEAN:
1375 case MUT_ARR_PTRS_DIRTY:
1376 {
1377 // We don't eagerly promote objects pointed to by a mutable
1378 // array, but if we find the array only points to objects in
1379 // the same or an older generation, we mark it "clean" and
1380 // avoid traversing it during minor GCs.
1381 gct->eager_promotion = rtsFalse;
1382
1383 scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
1384
1385 if (gct->failed_to_evac) {
1386 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
1387 } else {
1388 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
1389 }
1390
1391 gct->eager_promotion = saved_eager_promotion;
1392 gct->failed_to_evac = rtsTrue;
1393 break;
1394 }
1395
1396 case MUT_ARR_PTRS_FROZEN:
1397 case MUT_ARR_PTRS_FROZEN0:
1398 {
1399 // follow everything
1400 scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
1401
1402 // If we're going to put this object on the mutable list, then
1403 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
1404 if (gct->failed_to_evac) {
1405 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
1406 } else {
1407 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
1408 }
1409 break;
1410 }
1411
1412 case SMALL_MUT_ARR_PTRS_CLEAN:
1413 case SMALL_MUT_ARR_PTRS_DIRTY:
1414 {
1415 StgPtr next, q;
1416 rtsBool saved_eager;
1417
1418 // We don't eagerly promote objects pointed to by a mutable
1419 // array, but if we find the array only points to objects in
1420 // the same or an older generation, we mark it "clean" and
1421 // avoid traversing it during minor GCs.
1422 saved_eager = gct->eager_promotion;
1423 gct->eager_promotion = rtsFalse;
1424 q = p;
1425 next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
1426 for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
1427 evacuate((StgClosure **)p);
1428 }
1429 gct->eager_promotion = saved_eager;
1430
1431 if (gct->failed_to_evac) {
1432 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_DIRTY_info;
1433 } else {
1434 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_CLEAN_info;
1435 }
1436
1437 gct->failed_to_evac = rtsTrue;
1438 break;
1439 }
1440
1441 case SMALL_MUT_ARR_PTRS_FROZEN:
1442 case SMALL_MUT_ARR_PTRS_FROZEN0:
1443 {
1444 // follow everything
1445 StgPtr next, q=p;
1446
1447 next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
1448 for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
1449 evacuate((StgClosure **)p);
1450 }
1451
1452 // If we're going to put this object on the mutable list, then
1453 // set its info ptr to SMALL_MUT_ARR_PTRS_FROZEN0 to indicate that.
1454 if (gct->failed_to_evac) {
1455 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN0_info;
1456 } else {
1457 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN_info;
1458 }
1459 break;
1460 }
1461
1462 case TSO:
1463 {
1464 scavengeTSO((StgTSO*)p);
1465 break;
1466 }
1467
1468 case STACK:
1469 {
1470 StgStack *stack = (StgStack*)p;
1471
1472 gct->eager_promotion = rtsFalse;
1473
1474 scavenge_stack(stack->sp, stack->stack + stack->stack_size);
1475 stack->dirty = gct->failed_to_evac;
1476
1477 gct->eager_promotion = saved_eager_promotion;
1478 break;
1479 }
1480
1481 case MUT_PRIM:
1482 {
1483 StgPtr end;
1484
1485 gct->eager_promotion = rtsFalse;
1486
1487 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1488 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
1489 evacuate((StgClosure **)p);
1490 }
1491
1492 gct->eager_promotion = saved_eager_promotion;
1493 gct->failed_to_evac = rtsTrue; // mutable
1494 break;
1495
1496 }
1497
1498 case TREC_CHUNK:
1499 {
1500 StgWord i;
1501 StgTRecChunk *tc = ((StgTRecChunk *) p);
1502 TRecEntry *e = &(tc -> entries[0]);
1503 gct->eager_promotion = rtsFalse;
1504 evacuate((StgClosure **)&tc->prev_chunk);
1505 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
1506 evacuate((StgClosure **)&e->tvar);
1507 evacuate((StgClosure **)&e->expected_value);
1508 evacuate((StgClosure **)&e->new_value);
1509 }
1510 gct->eager_promotion = saved_eager_promotion;
1511 gct->failed_to_evac = rtsTrue; // mutable
1512 break;
1513 }
1514
1515 case IND:
1516 // IND can happen, for example, when the interpreter allocates
1517 // a gigantic AP closure (more than one block), which ends up
1518 // on the large-object list and then gets updated. See #3424.
1519 case BLACKHOLE:
1520 case IND_STATIC:
1521 evacuate(&((StgInd *)p)->indirectee);
1522
1523 #if 0 && defined(DEBUG)
1524 if (RtsFlags.DebugFlags.gc)
1525 /* Debugging code to print out the size of the thing we just
1526 * promoted
1527 */
1528 {
1529 StgPtr start = gen->scan;
1530 bdescr *start_bd = gen->scan_bd;
1531 StgWord size = 0;
1532 scavenge(&gen);
1533 if (start_bd != gen->scan_bd) {
1534 size += (P_)BLOCK_ROUND_UP(start) - start;
1535 start_bd = start_bd->link;
1536 while (start_bd != gen->scan_bd) {
1537 size += BLOCK_SIZE_W;
1538 start_bd = start_bd->link;
1539 }
1540 size += gen->scan -
1541 (P_)BLOCK_ROUND_DOWN(gen->scan);
1542 } else {
1543 size = gen->scan - start;
1544 }
1545 debugBelch("evac IND: %ld bytes", size * sizeof(W_));
1546 }
1547 #endif
1548 break;
1549
1550 default:
1551 barf("scavenge_one: strange object %d", (int)(info->type));
1552 }
1553
1554 no_luck = gct->failed_to_evac;
1555 gct->failed_to_evac = rtsFalse;
1556 return (no_luck);
1557 }
1558
1559 /* -----------------------------------------------------------------------------
1560 Scavenging mutable lists.
1561
1562 We treat the mutable list of each generation > N (i.e. all the
1563 generations older than the one being collected) as roots. We also
1564 remove non-mutable objects from the mutable list at this point.
1565 -------------------------------------------------------------------------- */
1566
1567 static void
1568 scavenge_mutable_list(bdescr *bd, generation *gen)
1569 {
1570 StgPtr p, q;
1571 uint32_t gen_no;
1572
1573 gen_no = gen->no;
1574 gct->evac_gen_no = gen_no;
1575 for (; bd != NULL; bd = bd->link) {
1576 for (q = bd->start; q < bd->free; q++) {
1577 p = (StgPtr)*q;
1578 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1579
1580 #ifdef DEBUG
1581 switch (get_itbl((StgClosure *)p)->type) {
1582 case MUT_VAR_CLEAN:
1583 // can happen due to concurrent writeMutVars
1584 case MUT_VAR_DIRTY:
1585 mutlist_MUTVARS++; break;
1586 case MUT_ARR_PTRS_CLEAN:
1587 case MUT_ARR_PTRS_DIRTY:
1588 case MUT_ARR_PTRS_FROZEN:
1589 case MUT_ARR_PTRS_FROZEN0:
1590 mutlist_MUTARRS++; break;
1591 case MVAR_CLEAN:
1592 barf("MVAR_CLEAN on mutable list");
1593 case MVAR_DIRTY:
1594 mutlist_MVARS++; break;
1595 case TVAR:
1596 mutlist_TVAR++; break;
1597 case TREC_CHUNK:
1598 mutlist_TREC_CHUNK++; break;
1599 case MUT_PRIM:
1600 if (((StgClosure*)p)->header.info == &stg_TVAR_WATCH_QUEUE_info)
1601 mutlist_TVAR_WATCH_QUEUE++;
1602 else if (((StgClosure*)p)->header.info == &stg_TREC_HEADER_info)
1603 mutlist_TREC_HEADER++;
1604 else if (((StgClosure*)p)->header.info == &stg_ATOMIC_INVARIANT_info)
1605 mutlist_ATOMIC_INVARIANT++;
1606 else if (((StgClosure*)p)->header.info == &stg_INVARIANT_CHECK_QUEUE_info)
1607 mutlist_INVARIANT_CHECK_QUEUE++;
1608 else
1609 mutlist_OTHERS++;
1610 break;
1611 default:
1612 mutlist_OTHERS++; break;
1613 }
1614 #endif
1615
1616 // Check whether this object is "clean", that is it
1617 // definitely doesn't point into a young generation.
1618 // Clean objects don't need to be scavenged. Some clean
1619 // objects (MUT_VAR_CLEAN) are not kept on the mutable
1620 // list at all; others, such as MUT_ARR_PTRS
1621 // are always on the mutable list.
1622 //
1623 switch (get_itbl((StgClosure *)p)->type) {
1624 case MUT_ARR_PTRS_CLEAN:
1625 recordMutableGen_GC((StgClosure *)p,gen_no);
1626 continue;
1627 case MUT_ARR_PTRS_DIRTY:
1628 {
1629 rtsBool saved_eager_promotion;
1630 saved_eager_promotion = gct->eager_promotion;
1631 gct->eager_promotion = rtsFalse;
1632
1633 scavenge_mut_arr_ptrs_marked((StgMutArrPtrs *)p);
1634
1635 if (gct->failed_to_evac) {
1636 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
1637 } else {
1638 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
1639 }
1640
1641 gct->eager_promotion = saved_eager_promotion;
1642 gct->failed_to_evac = rtsFalse;
1643 recordMutableGen_GC((StgClosure *)p,gen_no);
1644 continue;
1645 }
1646 default:
1647 ;
1648 }
1649
1650 if (scavenge_one(p)) {
1651 // didn't manage to promote everything, so put the
1652 // object back on the list.
1653 recordMutableGen_GC((StgClosure *)p,gen_no);
1654 }
1655 }
1656 }
1657 }
1658
1659 void
1660 scavenge_capability_mut_lists (Capability *cap)
1661 {
1662 uint32_t g;
1663
1664 /* Mutable lists from each generation > N
1665 * we want to *scavenge* these roots, not evacuate them: they're not
1666 * going to move in this GC.
1667 * Also do them in reverse generation order, for the usual reason:
1668 * namely to reduce the likelihood of spurious old->new pointers.
1669 */
1670 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
1671 scavenge_mutable_list(cap->saved_mut_lists[g], &generations[g]);
1672 freeChain_sync(cap->saved_mut_lists[g]);
1673 cap->saved_mut_lists[g] = NULL;
1674 }
1675 }
1676
1677 /* -----------------------------------------------------------------------------
1678 Scavenging the static objects.
1679
1680 We treat the mutable list of each generation > N (i.e. all the
1681 generations older than the one being collected) as roots. We also
1682 remove non-mutable objects from the mutable list at this point.
1683 -------------------------------------------------------------------------- */
1684
1685 static void
1686 scavenge_static(void)
1687 {
1688 StgClosure *flagged_p, *p;
1689 const StgInfoTable *info;
1690
1691 debugTrace(DEBUG_gc, "scavenging static objects");
1692
1693 /* Always evacuate straight to the oldest generation for static
1694 * objects */
1695 gct->evac_gen_no = oldest_gen->no;
1696
1697 /* keep going until we've scavenged all the objects on the linked
1698 list... */
1699
1700 while (1) {
1701
1702 /* get the next static object from the list. Remember, there might
1703 * be more stuff on this list after each evacuation...
1704 * (static_objects is a global)
1705 */
1706 flagged_p = gct->static_objects;
1707 if (flagged_p == END_OF_STATIC_OBJECT_LIST) {
1708 break;
1709 }
1710 p = UNTAG_STATIC_LIST_PTR(flagged_p);
1711
1712 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1713 info = get_itbl(p);
1714 // make sure the info pointer is into text space
1715
1716 /* Take this object *off* the static_objects list,
1717 * and put it on the scavenged_static_objects list.
1718 */
1719 gct->static_objects = *STATIC_LINK(info,p);
1720 *STATIC_LINK(info,p) = gct->scavenged_static_objects;
1721 gct->scavenged_static_objects = flagged_p;
1722
1723 switch (info -> type) {
1724
1725 case IND_STATIC:
1726 {
1727 StgInd *ind = (StgInd *)p;
1728 evacuate(&ind->indirectee);
1729
1730 /* might fail to evacuate it, in which case we have to pop it
1731 * back on the mutable list of the oldest generation. We
1732 * leave it *on* the scavenged_static_objects list, though,
1733 * in case we visit this object again.
1734 */
1735 if (gct->failed_to_evac) {
1736 gct->failed_to_evac = rtsFalse;
1737 recordMutableGen_GC((StgClosure *)p,oldest_gen->no);
1738 }
1739 break;
1740 }
1741
1742 case THUNK_STATIC:
1743 scavenge_thunk_srt(info);
1744 break;
1745
1746 case FUN_STATIC:
1747 scavenge_fun_srt(info);
1748 break;
1749
1750 case CONSTR_STATIC:
1751 {
1752 StgPtr q, next;
1753
1754 next = (P_)p->payload + info->layout.payload.ptrs;
1755 // evacuate the pointers
1756 for (q = (P_)p->payload; q < next; q++) {
1757 evacuate((StgClosure **)q);
1758 }
1759 break;
1760 }
1761
1762 default:
1763 barf("scavenge_static: strange closure %d", (int)(info->type));
1764 }
1765
1766 ASSERT(gct->failed_to_evac == rtsFalse);
1767 }
1768 }
1769
1770 /* -----------------------------------------------------------------------------
1771 scavenge a chunk of memory described by a bitmap
1772 -------------------------------------------------------------------------- */
1773
1774 static void
1775 scavenge_large_bitmap( StgPtr p, StgLargeBitmap *large_bitmap, StgWord size )
1776 {
1777 uint32_t i, j, b;
1778 StgWord bitmap;
1779
1780 b = 0;
1781
1782 for (i = 0; i < size; b++) {
1783 bitmap = large_bitmap->bitmap[b];
1784 j = stg_min(size-i, BITS_IN(W_));
1785 i += j;
1786 for (; j > 0; j--, p++) {
1787 if ((bitmap & 1) == 0) {
1788 evacuate((StgClosure **)p);
1789 }
1790 bitmap = bitmap >> 1;
1791 }
1792 }
1793 }
1794
1795
1796 /* -----------------------------------------------------------------------------
1797 scavenge_stack walks over a section of stack and evacuates all the
1798 objects pointed to by it. We can use the same code for walking
1799 AP_STACK_UPDs, since these are just sections of copied stack.
1800 -------------------------------------------------------------------------- */
1801
1802 static void
1803 scavenge_stack(StgPtr p, StgPtr stack_end)
1804 {
1805 const StgRetInfoTable* info;
1806 StgWord bitmap;
1807 StgWord size;
1808
1809 /*
1810 * Each time around this loop, we are looking at a chunk of stack
1811 * that starts with an activation record.
1812 */
1813
1814 while (p < stack_end) {
1815 info = get_ret_itbl((StgClosure *)p);
1816
1817 switch (info->i.type) {
1818
1819 case UPDATE_FRAME:
1820 // Note [upd-black-hole]
1821 // In SMP, we can get update frames that point to indirections
1822 // when two threads evaluate the same thunk. We do attempt to
1823 // discover this situation in threadPaused(), but it's
1824 // possible that the following sequence occurs:
1825 //
1826 // A B
1827 // enter T
1828 // enter T
1829 // blackhole T
1830 // update T
1831 // GC
1832 //
1833 // Now T is an indirection, and the update frame is already
1834 // marked on A's stack, so we won't traverse it again in
1835 // threadPaused(). We could traverse the whole stack again
1836 // before GC, but that seems like overkill.
1837 //
1838 // Scavenging this update frame as normal would be disastrous;
1839 // the updatee would end up pointing to the value. So we
1840 // check whether the value after evacuation is a BLACKHOLE,
1841 // and if not, we change the update frame to an stg_enter
1842 // frame that simply returns the value. Hence, blackholing is
1843 // compulsory (otherwise we would have to check for thunks
1844 // too).
1845 //
1846 // One slight hiccup is that the THUNK_SELECTOR machinery can
1847 // overwrite the updatee with an IND. In parallel GC, this
1848 // could even be happening concurrently, so we can't check for
1849 // the IND. Fortunately if we assume that blackholing is
1850 // happening (either lazy or eager), then we can be sure that
1851 // the updatee is never a THUNK_SELECTOR and we're ok.
1852 // NB. this is a new invariant: blackholing is not optional.
1853 {
1854 StgUpdateFrame *frame = (StgUpdateFrame *)p;
1855 StgClosure *v;
1856
1857 evacuate(&frame->updatee);
1858 v = frame->updatee;
1859 if (GET_CLOSURE_TAG(v) != 0 ||
1860 (get_itbl(v)->type != BLACKHOLE)) {
1861 // blackholing is compulsory, see above.
1862 frame->header.info = (const StgInfoTable*)&stg_enter_checkbh_info;
1863 }
1864 ASSERT(v->header.info != &stg_TSO_info);
1865 p += sizeofW(StgUpdateFrame);
1866 continue;
1867 }
1868
1869 // small bitmap (< 32 entries, or 64 on a 64-bit machine)
1870 case CATCH_STM_FRAME:
1871 case CATCH_RETRY_FRAME:
1872 case ATOMICALLY_FRAME:
1873 case UNDERFLOW_FRAME:
1874 case STOP_FRAME:
1875 case CATCH_FRAME:
1876 case RET_SMALL:
1877 bitmap = BITMAP_BITS(info->i.layout.bitmap);
1878 size = BITMAP_SIZE(info->i.layout.bitmap);
1879 // NOTE: the payload starts immediately after the info-ptr, we
1880 // don't have an StgHeader in the same sense as a heap closure.
1881 p++;
1882 p = scavenge_small_bitmap(p, size, bitmap);
1883
1884 follow_srt:
1885 if (major_gc)
1886 scavenge_srt((StgClosure **)GET_SRT(info), info->i.srt_bitmap);
1887 continue;
1888
1889 case RET_BCO: {
1890 StgBCO *bco;
1891 StgWord size;
1892
1893 p++;
1894 evacuate((StgClosure **)p);
1895 bco = (StgBCO *)*p;
1896 p++;
1897 size = BCO_BITMAP_SIZE(bco);
1898 scavenge_large_bitmap(p, BCO_BITMAP(bco), size);
1899 p += size;
1900 continue;
1901 }
1902
1903 // large bitmap (> 32 entries, or > 64 on a 64-bit machine)
1904 case RET_BIG:
1905 {
1906 StgWord size;
1907
1908 size = GET_LARGE_BITMAP(&info->i)->size;
1909 p++;
1910 scavenge_large_bitmap(p, GET_LARGE_BITMAP(&info->i), size);
1911 p += size;
1912 // and don't forget to follow the SRT
1913 goto follow_srt;
1914 }
1915
1916 case RET_FUN:
1917 {
1918 StgRetFun *ret_fun = (StgRetFun *)p;
1919 const StgFunInfoTable *fun_info;
1920
1921 evacuate(&ret_fun->fun);
1922 fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun));
1923 p = scavenge_arg_block(fun_info, ret_fun->payload);
1924 goto follow_srt;
1925 }
1926
1927 default:
1928 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->i.type));
1929 }
1930 }
1931 }
1932
1933 /*-----------------------------------------------------------------------------
1934 scavenge the large object list.
1935
1936 evac_gen set by caller; similar games played with evac_gen as with
1937 scavenge() - see comment at the top of scavenge(). Most large
1938 objects are (repeatedly) mutable, so most of the time evac_gen will
1939 be zero.
1940 --------------------------------------------------------------------------- */
1941
1942 static void
1943 scavenge_large (gen_workspace *ws)
1944 {
1945 bdescr *bd;
1946 StgPtr p;
1947
1948 gct->evac_gen_no = ws->gen->no;
1949
1950 bd = ws->todo_large_objects;
1951
1952 for (; bd != NULL; bd = ws->todo_large_objects) {
1953
1954 // take this object *off* the large objects list and put it on
1955 // the scavenged large objects list. This is so that we can
1956 // treat new_large_objects as a stack and push new objects on
1957 // the front when evacuating.
1958 ws->todo_large_objects = bd->link;
1959
1960 ACQUIRE_SPIN_LOCK(&ws->gen->sync);
1961 dbl_link_onto(bd, &ws->gen->scavenged_large_objects);
1962 ws->gen->n_scavenged_large_blocks += bd->blocks;
1963 RELEASE_SPIN_LOCK(&ws->gen->sync);
1964
1965 p = bd->start;
1966 if (scavenge_one(p)) {
1967 if (ws->gen->no > 0) {
1968 recordMutableGen_GC((StgClosure *)p, ws->gen->no);
1969 }
1970 }
1971
1972 // stats
1973 gct->scanned += closure_sizeW((StgClosure*)p);
1974 }
1975 }
1976
1977 /* ----------------------------------------------------------------------------
1978 Look for work to do.
1979
1980 We look for the oldest gen that has either a todo block that can
1981 be scanned, or a block of work on the global queue that we can
1982 scan.
1983
1984 It is important to take work from the *oldest* generation that we
1985 has work available, because that minimizes the likelihood of
1986 evacuating objects into a young generation when they should have
1987 been eagerly promoted. This really does make a difference (the
1988 cacheprof benchmark is one that is affected).
1989
1990 We also want to scan the todo block if possible before grabbing
1991 work from the global queue, the reason being that we don't want to
1992 steal work from the global queue and starve other threads if there
1993 is other work we can usefully be doing.
1994 ------------------------------------------------------------------------- */
1995
1996 static rtsBool
1997 scavenge_find_work (void)
1998 {
1999 int g;
2000 gen_workspace *ws;
2001 rtsBool did_something, did_anything;
2002 bdescr *bd;
2003
2004 gct->scav_find_work++;
2005
2006 did_anything = rtsFalse;
2007
2008 loop:
2009 did_something = rtsFalse;
2010 for (g = RtsFlags.GcFlags.generations-1; g >= 0; g--) {
2011 ws = &gct->gens[g];
2012
2013 gct->scan_bd = NULL;
2014
2015 // If we have a scan block with some work to do,
2016 // scavenge everything up to the free pointer.
2017 if (ws->todo_bd->u.scan < ws->todo_free)
2018 {
2019 scavenge_block(ws->todo_bd);
2020 did_something = rtsTrue;
2021 break;
2022 }
2023
2024 // If we have any large objects to scavenge, do them now.
2025 if (ws->todo_large_objects) {
2026 scavenge_large(ws);
2027 did_something = rtsTrue;
2028 break;
2029 }
2030
2031 if ((bd = grab_local_todo_block(ws)) != NULL) {
2032 scavenge_block(bd);
2033 did_something = rtsTrue;
2034 break;
2035 }
2036 }
2037
2038 if (did_something) {
2039 did_anything = rtsTrue;
2040 goto loop;
2041 }
2042
2043 #if defined(THREADED_RTS)
2044 if (work_stealing) {
2045 // look for work to steal
2046 for (g = RtsFlags.GcFlags.generations-1; g >= 0; g--) {
2047 if ((bd = steal_todo_block(g)) != NULL) {
2048 scavenge_block(bd);
2049 did_something = rtsTrue;
2050 break;
2051 }
2052 }
2053
2054 if (did_something) {
2055 did_anything = rtsTrue;
2056 goto loop;
2057 }
2058 }
2059 #endif
2060
2061 // only return when there is no more work to do
2062
2063 return did_anything;
2064 }
2065
2066 /* ----------------------------------------------------------------------------
2067 Scavenge until we can't find anything more to scavenge.
2068 ------------------------------------------------------------------------- */
2069
2070 void
2071 scavenge_loop(void)
2072 {
2073 rtsBool work_to_do;
2074
2075 loop:
2076 work_to_do = rtsFalse;
2077
2078 // scavenge static objects
2079 if (major_gc && gct->static_objects != END_OF_STATIC_OBJECT_LIST) {
2080 IF_DEBUG(sanity, checkStaticObjects(gct->static_objects));
2081 scavenge_static();
2082 }
2083
2084 // scavenge objects in compacted generation
2085 if (mark_stack_bd != NULL && !mark_stack_empty()) {
2086 scavenge_mark_stack();
2087 work_to_do = rtsTrue;
2088 }
2089
2090 // Order is important here: we want to deal in full blocks as
2091 // much as possible, so go for global work in preference to
2092 // local work. Only if all the global work has been exhausted
2093 // do we start scavenging the fragments of blocks in the local
2094 // workspaces.
2095 if (scavenge_find_work()) goto loop;
2096
2097 if (work_to_do) goto loop;
2098 }