6137f6d862ec81ce76f3ba63606f32bdd45ae760
[ghc.git] / rts / sm / Scav.c
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team 1998-2008
4 *
5 * Generational garbage collector: scavenging functions
6 *
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
9 *
10 * http://hackage.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/GC
11 *
12 * ---------------------------------------------------------------------------*/
13
14 #include "PosixSource.h"
15 #include "Rts.h"
16
17 #include "Storage.h"
18 #include "GC.h"
19 #include "GCThread.h"
20 #include "GCUtils.h"
21 #include "Compact.h"
22 #include "MarkStack.h"
23 #include "Evac.h"
24 #include "Scav.h"
25 #include "Apply.h"
26 #include "Trace.h"
27 #include "Sanity.h"
28 #include "Capability.h"
29 #include "LdvProfile.h"
30
31 static void scavenge_stack (StgPtr p, StgPtr stack_end);
32
33 static void scavenge_large_bitmap (StgPtr p,
34 StgLargeBitmap *large_bitmap,
35 nat size );
36
37 #if defined(THREADED_RTS) && !defined(PARALLEL_GC)
38 # define evacuate(a) evacuate1(a)
39 # define scavenge_loop(a) scavenge_loop1(a)
40 # define scavenge_block(a) scavenge_block1(a)
41 # define scavenge_mutable_list(bd,g) scavenge_mutable_list1(bd,g)
42 # define scavenge_capability_mut_lists(cap) scavenge_capability_mut_Lists1(cap)
43 #endif
44
45 /* -----------------------------------------------------------------------------
46 Scavenge a TSO.
47 -------------------------------------------------------------------------- */
48
49 static void
50 scavengeTSO (StgTSO *tso)
51 {
52 rtsBool saved_eager;
53
54 debugTrace(DEBUG_gc,"scavenging thread %d",(int)tso->id);
55
56 // update the pointer from the Task.
57 if (tso->bound != NULL) {
58 tso->bound->tso = tso;
59 }
60
61 saved_eager = gct->eager_promotion;
62 gct->eager_promotion = rtsFalse;
63
64 evacuate((StgClosure **)&tso->blocked_exceptions);
65 evacuate((StgClosure **)&tso->bq);
66
67 // scavange current transaction record
68 evacuate((StgClosure **)&tso->trec);
69
70 evacuate((StgClosure **)&tso->stackobj);
71
72 evacuate((StgClosure **)&tso->_link);
73 if ( tso->why_blocked == BlockedOnMVar
74 || tso->why_blocked == BlockedOnBlackHole
75 || tso->why_blocked == BlockedOnMsgThrowTo
76 || tso->why_blocked == NotBlocked
77 ) {
78 evacuate(&tso->block_info.closure);
79 }
80 #ifdef THREADED_RTS
81 // in the THREADED_RTS, block_info.closure must always point to a
82 // valid closure, because we assume this in throwTo(). In the
83 // non-threaded RTS it might be a FD (for
84 // BlockedOnRead/BlockedOnWrite) or a time value (BlockedOnDelay)
85 else {
86 tso->block_info.closure = (StgClosure *)END_TSO_QUEUE;
87 }
88 #endif
89
90 tso->dirty = gct->failed_to_evac;
91
92 gct->eager_promotion = saved_eager;
93 }
94
95 /* -----------------------------------------------------------------------------
96 Mutable arrays of pointers
97 -------------------------------------------------------------------------- */
98
99 static StgPtr scavenge_mut_arr_ptrs (StgMutArrPtrs *a)
100 {
101 W_ m;
102 rtsBool any_failed;
103 StgPtr p, q;
104
105 any_failed = rtsFalse;
106 p = (StgPtr)&a->payload[0];
107 for (m = 0; (int)m < (int)mutArrPtrsCards(a->ptrs) - 1; m++)
108 {
109 q = p + (1 << MUT_ARR_PTRS_CARD_BITS);
110 for (; p < q; p++) {
111 evacuate((StgClosure**)p);
112 }
113 if (gct->failed_to_evac) {
114 any_failed = rtsTrue;
115 *mutArrPtrsCard(a,m) = 1;
116 gct->failed_to_evac = rtsFalse;
117 } else {
118 *mutArrPtrsCard(a,m) = 0;
119 }
120 }
121
122 q = (StgPtr)&a->payload[a->ptrs];
123 if (p < q) {
124 for (; p < q; p++) {
125 evacuate((StgClosure**)p);
126 }
127 if (gct->failed_to_evac) {
128 any_failed = rtsTrue;
129 *mutArrPtrsCard(a,m) = 1;
130 gct->failed_to_evac = rtsFalse;
131 } else {
132 *mutArrPtrsCard(a,m) = 0;
133 }
134 }
135
136 gct->failed_to_evac = any_failed;
137 return (StgPtr)a + mut_arr_ptrs_sizeW(a);
138 }
139
140 // scavenge only the marked areas of a MUT_ARR_PTRS
141 static StgPtr scavenge_mut_arr_ptrs_marked (StgMutArrPtrs *a)
142 {
143 W_ m;
144 StgPtr p, q;
145 rtsBool any_failed;
146
147 any_failed = rtsFalse;
148 for (m = 0; m < mutArrPtrsCards(a->ptrs); m++)
149 {
150 if (*mutArrPtrsCard(a,m) != 0) {
151 p = (StgPtr)&a->payload[m << MUT_ARR_PTRS_CARD_BITS];
152 q = stg_min(p + (1 << MUT_ARR_PTRS_CARD_BITS),
153 (StgPtr)&a->payload[a->ptrs]);
154 for (; p < q; p++) {
155 evacuate((StgClosure**)p);
156 }
157 if (gct->failed_to_evac) {
158 any_failed = rtsTrue;
159 gct->failed_to_evac = rtsFalse;
160 } else {
161 *mutArrPtrsCard(a,m) = 0;
162 }
163 }
164 }
165
166 gct->failed_to_evac = any_failed;
167 return (StgPtr)a + mut_arr_ptrs_sizeW(a);
168 }
169
170 /* -----------------------------------------------------------------------------
171 Blocks of function args occur on the stack (at the top) and
172 in PAPs.
173 -------------------------------------------------------------------------- */
174
175 STATIC_INLINE StgPtr
176 scavenge_arg_block (StgFunInfoTable *fun_info, StgClosure **args)
177 {
178 StgPtr p;
179 StgWord bitmap;
180 nat size;
181
182 p = (StgPtr)args;
183 switch (fun_info->f.fun_type) {
184 case ARG_GEN:
185 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
186 size = BITMAP_SIZE(fun_info->f.b.bitmap);
187 goto small_bitmap;
188 case ARG_GEN_BIG:
189 size = GET_FUN_LARGE_BITMAP(fun_info)->size;
190 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
191 p += size;
192 break;
193 default:
194 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
195 size = BITMAP_SIZE(stg_arg_bitmaps[fun_info->f.fun_type]);
196 small_bitmap:
197 while (size > 0) {
198 if ((bitmap & 1) == 0) {
199 evacuate((StgClosure **)p);
200 }
201 p++;
202 bitmap = bitmap >> 1;
203 size--;
204 }
205 break;
206 }
207 return p;
208 }
209
210 STATIC_INLINE GNUC_ATTR_HOT StgPtr
211 scavenge_PAP_payload (StgClosure *fun, StgClosure **payload, StgWord size)
212 {
213 StgPtr p;
214 StgWord bitmap;
215 StgFunInfoTable *fun_info;
216
217 fun_info = get_fun_itbl(UNTAG_CLOSURE(fun));
218 ASSERT(fun_info->i.type != PAP);
219 p = (StgPtr)payload;
220
221 switch (fun_info->f.fun_type) {
222 case ARG_GEN:
223 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
224 goto small_bitmap;
225 case ARG_GEN_BIG:
226 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
227 p += size;
228 break;
229 case ARG_BCO:
230 scavenge_large_bitmap((StgPtr)payload, BCO_BITMAP(fun), size);
231 p += size;
232 break;
233 default:
234 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
235 small_bitmap:
236 while (size > 0) {
237 if ((bitmap & 1) == 0) {
238 evacuate((StgClosure **)p);
239 }
240 p++;
241 bitmap = bitmap >> 1;
242 size--;
243 }
244 break;
245 }
246 return p;
247 }
248
249 STATIC_INLINE GNUC_ATTR_HOT StgPtr
250 scavenge_PAP (StgPAP *pap)
251 {
252 evacuate(&pap->fun);
253 return scavenge_PAP_payload (pap->fun, pap->payload, pap->n_args);
254 }
255
256 STATIC_INLINE StgPtr
257 scavenge_AP (StgAP *ap)
258 {
259 evacuate(&ap->fun);
260 return scavenge_PAP_payload (ap->fun, ap->payload, ap->n_args);
261 }
262
263 /* -----------------------------------------------------------------------------
264 Scavenge SRTs
265 -------------------------------------------------------------------------- */
266
267 /* Similar to scavenge_large_bitmap(), but we don't write back the
268 * pointers we get back from evacuate().
269 */
270 static void
271 scavenge_large_srt_bitmap( StgLargeSRT *large_srt )
272 {
273 nat i, b, size;
274 StgWord bitmap;
275 StgClosure **p;
276
277 b = 0;
278 bitmap = large_srt->l.bitmap[b];
279 size = (nat)large_srt->l.size;
280 p = (StgClosure **)large_srt->srt;
281 for (i = 0; i < size; ) {
282 if ((bitmap & 1) != 0) {
283 evacuate(p);
284 }
285 i++;
286 p++;
287 if (i % BITS_IN(W_) == 0) {
288 b++;
289 bitmap = large_srt->l.bitmap[b];
290 } else {
291 bitmap = bitmap >> 1;
292 }
293 }
294 }
295
296 /* evacuate the SRT. If srt_bitmap is zero, then there isn't an
297 * srt field in the info table. That's ok, because we'll
298 * never dereference it.
299 */
300 STATIC_INLINE GNUC_ATTR_HOT void
301 scavenge_srt (StgClosure **srt, nat srt_bitmap)
302 {
303 nat bitmap;
304 StgClosure **p;
305
306 bitmap = srt_bitmap;
307 p = srt;
308
309 if (bitmap == (StgHalfWord)(-1)) {
310 scavenge_large_srt_bitmap( (StgLargeSRT *)srt );
311 return;
312 }
313
314 while (bitmap != 0) {
315 if ((bitmap & 1) != 0) {
316 #if defined(COMPILING_WINDOWS_DLL)
317 // Special-case to handle references to closures hiding out in DLLs, since
318 // double indirections required to get at those. The code generator knows
319 // which is which when generating the SRT, so it stores the (indirect)
320 // reference to the DLL closure in the table by first adding one to it.
321 // We check for this here, and undo the addition before evacuating it.
322 //
323 // If the SRT entry hasn't got bit 0 set, the SRT entry points to a
324 // closure that's fixed at link-time, and no extra magic is required.
325 if ( (W_)(*srt) & 0x1 ) {
326 evacuate( (StgClosure**) ((W_) (*srt) & ~0x1));
327 } else {
328 evacuate(p);
329 }
330 #else
331 evacuate(p);
332 #endif
333 }
334 p++;
335 bitmap = bitmap >> 1;
336 }
337 }
338
339
340 STATIC_INLINE GNUC_ATTR_HOT void
341 scavenge_thunk_srt(const StgInfoTable *info)
342 {
343 StgThunkInfoTable *thunk_info;
344
345 if (!major_gc) return;
346
347 thunk_info = itbl_to_thunk_itbl(info);
348 scavenge_srt((StgClosure **)GET_SRT(thunk_info), thunk_info->i.srt_bitmap);
349 }
350
351 STATIC_INLINE GNUC_ATTR_HOT void
352 scavenge_fun_srt(const StgInfoTable *info)
353 {
354 StgFunInfoTable *fun_info;
355
356 if (!major_gc) return;
357
358 fun_info = itbl_to_fun_itbl(info);
359 scavenge_srt((StgClosure **)GET_FUN_SRT(fun_info), fun_info->i.srt_bitmap);
360 }
361
362 /* -----------------------------------------------------------------------------
363 Scavenge a block from the given scan pointer up to bd->free.
364
365 evac_gen_no is set by the caller to be either zero (for a step in a
366 generation < N) or G where G is the generation of the step being
367 scavenged.
368
369 We sometimes temporarily change evac_gen_no back to zero if we're
370 scavenging a mutable object where eager promotion isn't such a good
371 idea.
372 -------------------------------------------------------------------------- */
373
374 static GNUC_ATTR_HOT void
375 scavenge_block (bdescr *bd)
376 {
377 StgPtr p, q;
378 StgInfoTable *info;
379 rtsBool saved_eager_promotion;
380 gen_workspace *ws;
381
382 debugTrace(DEBUG_gc, "scavenging block %p (gen %d) @ %p",
383 bd->start, bd->gen_no, bd->u.scan);
384
385 gct->scan_bd = bd;
386 gct->evac_gen_no = bd->gen_no;
387 saved_eager_promotion = gct->eager_promotion;
388 gct->failed_to_evac = rtsFalse;
389
390 ws = &gct->gens[bd->gen->no];
391
392 p = bd->u.scan;
393
394 // we might be evacuating into the very object that we're
395 // scavenging, so we have to check the real bd->free pointer each
396 // time around the loop.
397 while (p < bd->free || (bd == ws->todo_bd && p < ws->todo_free)) {
398
399 ASSERT(bd->link == NULL);
400 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
401 info = get_itbl((StgClosure *)p);
402
403 ASSERT(gct->thunk_selector_depth == 0);
404
405 q = p;
406 switch (info->type) {
407
408 case MVAR_CLEAN:
409 case MVAR_DIRTY:
410 {
411 StgMVar *mvar = ((StgMVar *)p);
412 gct->eager_promotion = rtsFalse;
413 evacuate((StgClosure **)&mvar->head);
414 evacuate((StgClosure **)&mvar->tail);
415 evacuate((StgClosure **)&mvar->value);
416 gct->eager_promotion = saved_eager_promotion;
417
418 if (gct->failed_to_evac) {
419 mvar->header.info = &stg_MVAR_DIRTY_info;
420 } else {
421 mvar->header.info = &stg_MVAR_CLEAN_info;
422 }
423 p += sizeofW(StgMVar);
424 break;
425 }
426
427 case TVAR:
428 {
429 StgTVar *tvar = ((StgTVar *)p);
430 gct->eager_promotion = rtsFalse;
431 evacuate((StgClosure **)&tvar->current_value);
432 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
433 gct->eager_promotion = saved_eager_promotion;
434
435 if (gct->failed_to_evac) {
436 tvar->header.info = &stg_TVAR_DIRTY_info;
437 } else {
438 tvar->header.info = &stg_TVAR_CLEAN_info;
439 }
440 p += sizeofW(StgTVar);
441 break;
442 }
443
444 case FUN_2_0:
445 scavenge_fun_srt(info);
446 evacuate(&((StgClosure *)p)->payload[1]);
447 evacuate(&((StgClosure *)p)->payload[0]);
448 p += sizeofW(StgHeader) + 2;
449 break;
450
451 case THUNK_2_0:
452 scavenge_thunk_srt(info);
453 evacuate(&((StgThunk *)p)->payload[1]);
454 evacuate(&((StgThunk *)p)->payload[0]);
455 p += sizeofW(StgThunk) + 2;
456 break;
457
458 case CONSTR_2_0:
459 evacuate(&((StgClosure *)p)->payload[1]);
460 evacuate(&((StgClosure *)p)->payload[0]);
461 p += sizeofW(StgHeader) + 2;
462 break;
463
464 case THUNK_1_0:
465 scavenge_thunk_srt(info);
466 evacuate(&((StgThunk *)p)->payload[0]);
467 p += sizeofW(StgThunk) + 1;
468 break;
469
470 case FUN_1_0:
471 scavenge_fun_srt(info);
472 case CONSTR_1_0:
473 evacuate(&((StgClosure *)p)->payload[0]);
474 p += sizeofW(StgHeader) + 1;
475 break;
476
477 case THUNK_0_1:
478 scavenge_thunk_srt(info);
479 p += sizeofW(StgThunk) + 1;
480 break;
481
482 case FUN_0_1:
483 scavenge_fun_srt(info);
484 case CONSTR_0_1:
485 p += sizeofW(StgHeader) + 1;
486 break;
487
488 case THUNK_0_2:
489 scavenge_thunk_srt(info);
490 p += sizeofW(StgThunk) + 2;
491 break;
492
493 case FUN_0_2:
494 scavenge_fun_srt(info);
495 case CONSTR_0_2:
496 p += sizeofW(StgHeader) + 2;
497 break;
498
499 case THUNK_1_1:
500 scavenge_thunk_srt(info);
501 evacuate(&((StgThunk *)p)->payload[0]);
502 p += sizeofW(StgThunk) + 2;
503 break;
504
505 case FUN_1_1:
506 scavenge_fun_srt(info);
507 case CONSTR_1_1:
508 evacuate(&((StgClosure *)p)->payload[0]);
509 p += sizeofW(StgHeader) + 2;
510 break;
511
512 case FUN:
513 scavenge_fun_srt(info);
514 goto gen_obj;
515
516 case THUNK:
517 {
518 StgPtr end;
519
520 scavenge_thunk_srt(info);
521 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
522 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
523 evacuate((StgClosure **)p);
524 }
525 p += info->layout.payload.nptrs;
526 break;
527 }
528
529 gen_obj:
530 case CONSTR:
531 case WEAK:
532 case PRIM:
533 {
534 StgPtr end;
535
536 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
537 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
538 evacuate((StgClosure **)p);
539 }
540 p += info->layout.payload.nptrs;
541 break;
542 }
543
544 case BCO: {
545 StgBCO *bco = (StgBCO *)p;
546 evacuate((StgClosure **)&bco->instrs);
547 evacuate((StgClosure **)&bco->literals);
548 evacuate((StgClosure **)&bco->ptrs);
549 p += bco_sizeW(bco);
550 break;
551 }
552
553 case IND_PERM:
554 case BLACKHOLE:
555 evacuate(&((StgInd *)p)->indirectee);
556 p += sizeofW(StgInd);
557 break;
558
559 case MUT_VAR_CLEAN:
560 case MUT_VAR_DIRTY:
561 gct->eager_promotion = rtsFalse;
562 evacuate(&((StgMutVar *)p)->var);
563 gct->eager_promotion = saved_eager_promotion;
564
565 if (gct->failed_to_evac) {
566 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
567 } else {
568 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
569 }
570 p += sizeofW(StgMutVar);
571 break;
572
573 case BLOCKING_QUEUE:
574 {
575 StgBlockingQueue *bq = (StgBlockingQueue *)p;
576
577 gct->eager_promotion = rtsFalse;
578 evacuate(&bq->bh);
579 evacuate((StgClosure**)&bq->owner);
580 evacuate((StgClosure**)&bq->queue);
581 evacuate((StgClosure**)&bq->link);
582 gct->eager_promotion = saved_eager_promotion;
583
584 if (gct->failed_to_evac) {
585 bq->header.info = &stg_BLOCKING_QUEUE_DIRTY_info;
586 } else {
587 bq->header.info = &stg_BLOCKING_QUEUE_CLEAN_info;
588 }
589 p += sizeofW(StgBlockingQueue);
590 break;
591 }
592
593 case THUNK_SELECTOR:
594 {
595 StgSelector *s = (StgSelector *)p;
596 evacuate(&s->selectee);
597 p += THUNK_SELECTOR_sizeW();
598 break;
599 }
600
601 // A chunk of stack saved in a heap object
602 case AP_STACK:
603 {
604 StgAP_STACK *ap = (StgAP_STACK *)p;
605
606 evacuate(&ap->fun);
607 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
608 p = (StgPtr)ap->payload + ap->size;
609 break;
610 }
611
612 case PAP:
613 p = scavenge_PAP((StgPAP *)p);
614 break;
615
616 case AP:
617 p = scavenge_AP((StgAP *)p);
618 break;
619
620 case ARR_WORDS:
621 // nothing to follow
622 p += arr_words_sizeW((StgArrWords *)p);
623 break;
624
625 case MUT_ARR_PTRS_CLEAN:
626 case MUT_ARR_PTRS_DIRTY:
627 {
628 // We don't eagerly promote objects pointed to by a mutable
629 // array, but if we find the array only points to objects in
630 // the same or an older generation, we mark it "clean" and
631 // avoid traversing it during minor GCs.
632 gct->eager_promotion = rtsFalse;
633
634 p = scavenge_mut_arr_ptrs((StgMutArrPtrs*)p);
635
636 if (gct->failed_to_evac) {
637 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
638 } else {
639 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
640 }
641
642 gct->eager_promotion = saved_eager_promotion;
643 gct->failed_to_evac = rtsTrue; // always put it on the mutable list.
644 break;
645 }
646
647 case MUT_ARR_PTRS_FROZEN:
648 case MUT_ARR_PTRS_FROZEN0:
649 // follow everything
650 {
651 p = scavenge_mut_arr_ptrs((StgMutArrPtrs*)p);
652
653 // If we're going to put this object on the mutable list, then
654 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
655 if (gct->failed_to_evac) {
656 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
657 } else {
658 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
659 }
660 break;
661 }
662
663 case TSO:
664 {
665 scavengeTSO((StgTSO *)p);
666 p += sizeofW(StgTSO);
667 break;
668 }
669
670 case STACK:
671 {
672 StgStack *stack = (StgStack*)p;
673
674 gct->eager_promotion = rtsFalse;
675
676 scavenge_stack(stack->sp, stack->stack + stack->stack_size);
677 stack->dirty = gct->failed_to_evac;
678 p += stack_sizeW(stack);
679
680 gct->eager_promotion = saved_eager_promotion;
681 break;
682 }
683
684 case MUT_PRIM:
685 {
686 StgPtr end;
687
688 gct->eager_promotion = rtsFalse;
689
690 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
691 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
692 evacuate((StgClosure **)p);
693 }
694 p += info->layout.payload.nptrs;
695
696 gct->eager_promotion = saved_eager_promotion;
697 gct->failed_to_evac = rtsTrue; // mutable
698 break;
699 }
700
701 case TREC_CHUNK:
702 {
703 StgWord i;
704 StgTRecChunk *tc = ((StgTRecChunk *) p);
705 TRecEntry *e = &(tc -> entries[0]);
706 gct->eager_promotion = rtsFalse;
707 evacuate((StgClosure **)&tc->prev_chunk);
708 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
709 evacuate((StgClosure **)&e->tvar);
710 evacuate((StgClosure **)&e->expected_value);
711 evacuate((StgClosure **)&e->new_value);
712 }
713 gct->eager_promotion = saved_eager_promotion;
714 gct->failed_to_evac = rtsTrue; // mutable
715 p += sizeofW(StgTRecChunk);
716 break;
717 }
718
719 default:
720 barf("scavenge: unimplemented/strange closure type %d @ %p",
721 info->type, p);
722 }
723
724 /*
725 * We need to record the current object on the mutable list if
726 * (a) It is actually mutable, or
727 * (b) It contains pointers to a younger generation.
728 * Case (b) arises if we didn't manage to promote everything that
729 * the current object points to into the current generation.
730 */
731 if (gct->failed_to_evac) {
732 gct->failed_to_evac = rtsFalse;
733 if (bd->gen_no > 0) {
734 recordMutableGen_GC((StgClosure *)q, bd->gen_no);
735 }
736 }
737 }
738
739 if (p > bd->free) {
740 gct->copied += ws->todo_free - bd->free;
741 bd->free = p;
742 }
743
744 debugTrace(DEBUG_gc, " scavenged %ld bytes",
745 (unsigned long)((bd->free - bd->u.scan) * sizeof(W_)));
746
747 // update stats: this is a block that has been scavenged
748 gct->scanned += bd->free - bd->u.scan;
749 bd->u.scan = bd->free;
750
751 if (bd != ws->todo_bd) {
752 // we're not going to evac any more objects into
753 // this block, so push it now.
754 push_scanned_block(bd, ws);
755 }
756
757 gct->scan_bd = NULL;
758 }
759 /* -----------------------------------------------------------------------------
760 Scavenge everything on the mark stack.
761
762 This is slightly different from scavenge():
763 - we don't walk linearly through the objects, so the scavenger
764 doesn't need to advance the pointer on to the next object.
765 -------------------------------------------------------------------------- */
766
767 static void
768 scavenge_mark_stack(void)
769 {
770 StgPtr p, q;
771 StgInfoTable *info;
772 rtsBool saved_eager_promotion;
773
774 gct->evac_gen_no = oldest_gen->no;
775 saved_eager_promotion = gct->eager_promotion;
776
777 while ((p = pop_mark_stack())) {
778
779 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
780 info = get_itbl((StgClosure *)p);
781
782 q = p;
783 switch (info->type) {
784
785 case MVAR_CLEAN:
786 case MVAR_DIRTY:
787 {
788 StgMVar *mvar = ((StgMVar *)p);
789 gct->eager_promotion = rtsFalse;
790 evacuate((StgClosure **)&mvar->head);
791 evacuate((StgClosure **)&mvar->tail);
792 evacuate((StgClosure **)&mvar->value);
793 gct->eager_promotion = saved_eager_promotion;
794
795 if (gct->failed_to_evac) {
796 mvar->header.info = &stg_MVAR_DIRTY_info;
797 } else {
798 mvar->header.info = &stg_MVAR_CLEAN_info;
799 }
800 break;
801 }
802
803 case TVAR:
804 {
805 StgTVar *tvar = ((StgTVar *)p);
806 gct->eager_promotion = rtsFalse;
807 evacuate((StgClosure **)&tvar->current_value);
808 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
809 gct->eager_promotion = saved_eager_promotion;
810
811 if (gct->failed_to_evac) {
812 tvar->header.info = &stg_TVAR_DIRTY_info;
813 } else {
814 tvar->header.info = &stg_TVAR_CLEAN_info;
815 }
816 break;
817 }
818
819 case FUN_2_0:
820 scavenge_fun_srt(info);
821 evacuate(&((StgClosure *)p)->payload[1]);
822 evacuate(&((StgClosure *)p)->payload[0]);
823 break;
824
825 case THUNK_2_0:
826 scavenge_thunk_srt(info);
827 evacuate(&((StgThunk *)p)->payload[1]);
828 evacuate(&((StgThunk *)p)->payload[0]);
829 break;
830
831 case CONSTR_2_0:
832 evacuate(&((StgClosure *)p)->payload[1]);
833 evacuate(&((StgClosure *)p)->payload[0]);
834 break;
835
836 case FUN_1_0:
837 case FUN_1_1:
838 scavenge_fun_srt(info);
839 evacuate(&((StgClosure *)p)->payload[0]);
840 break;
841
842 case THUNK_1_0:
843 case THUNK_1_1:
844 scavenge_thunk_srt(info);
845 evacuate(&((StgThunk *)p)->payload[0]);
846 break;
847
848 case CONSTR_1_0:
849 case CONSTR_1_1:
850 evacuate(&((StgClosure *)p)->payload[0]);
851 break;
852
853 case FUN_0_1:
854 case FUN_0_2:
855 scavenge_fun_srt(info);
856 break;
857
858 case THUNK_0_1:
859 case THUNK_0_2:
860 scavenge_thunk_srt(info);
861 break;
862
863 case CONSTR_0_1:
864 case CONSTR_0_2:
865 break;
866
867 case FUN:
868 scavenge_fun_srt(info);
869 goto gen_obj;
870
871 case THUNK:
872 {
873 StgPtr end;
874
875 scavenge_thunk_srt(info);
876 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
877 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
878 evacuate((StgClosure **)p);
879 }
880 break;
881 }
882
883 gen_obj:
884 case CONSTR:
885 case WEAK:
886 case PRIM:
887 {
888 StgPtr end;
889
890 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
891 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
892 evacuate((StgClosure **)p);
893 }
894 break;
895 }
896
897 case BCO: {
898 StgBCO *bco = (StgBCO *)p;
899 evacuate((StgClosure **)&bco->instrs);
900 evacuate((StgClosure **)&bco->literals);
901 evacuate((StgClosure **)&bco->ptrs);
902 break;
903 }
904
905 case IND_PERM:
906 // don't need to do anything here: the only possible case
907 // is that we're in a 1-space compacting collector, with
908 // no "old" generation.
909 break;
910
911 case IND:
912 case BLACKHOLE:
913 evacuate(&((StgInd *)p)->indirectee);
914 break;
915
916 case MUT_VAR_CLEAN:
917 case MUT_VAR_DIRTY: {
918 gct->eager_promotion = rtsFalse;
919 evacuate(&((StgMutVar *)p)->var);
920 gct->eager_promotion = saved_eager_promotion;
921
922 if (gct->failed_to_evac) {
923 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
924 } else {
925 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
926 }
927 break;
928 }
929
930 case BLOCKING_QUEUE:
931 {
932 StgBlockingQueue *bq = (StgBlockingQueue *)p;
933
934 gct->eager_promotion = rtsFalse;
935 evacuate(&bq->bh);
936 evacuate((StgClosure**)&bq->owner);
937 evacuate((StgClosure**)&bq->queue);
938 evacuate((StgClosure**)&bq->link);
939 gct->eager_promotion = saved_eager_promotion;
940
941 if (gct->failed_to_evac) {
942 bq->header.info = &stg_BLOCKING_QUEUE_DIRTY_info;
943 } else {
944 bq->header.info = &stg_BLOCKING_QUEUE_CLEAN_info;
945 }
946 break;
947 }
948
949 case ARR_WORDS:
950 break;
951
952 case THUNK_SELECTOR:
953 {
954 StgSelector *s = (StgSelector *)p;
955 evacuate(&s->selectee);
956 break;
957 }
958
959 // A chunk of stack saved in a heap object
960 case AP_STACK:
961 {
962 StgAP_STACK *ap = (StgAP_STACK *)p;
963
964 evacuate(&ap->fun);
965 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
966 break;
967 }
968
969 case PAP:
970 scavenge_PAP((StgPAP *)p);
971 break;
972
973 case AP:
974 scavenge_AP((StgAP *)p);
975 break;
976
977 case MUT_ARR_PTRS_CLEAN:
978 case MUT_ARR_PTRS_DIRTY:
979 // follow everything
980 {
981 // We don't eagerly promote objects pointed to by a mutable
982 // array, but if we find the array only points to objects in
983 // the same or an older generation, we mark it "clean" and
984 // avoid traversing it during minor GCs.
985 gct->eager_promotion = rtsFalse;
986
987 scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
988
989 if (gct->failed_to_evac) {
990 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
991 } else {
992 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
993 }
994
995 gct->eager_promotion = saved_eager_promotion;
996 gct->failed_to_evac = rtsTrue; // mutable anyhow.
997 break;
998 }
999
1000 case MUT_ARR_PTRS_FROZEN:
1001 case MUT_ARR_PTRS_FROZEN0:
1002 // follow everything
1003 {
1004 StgPtr q = p;
1005
1006 scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
1007
1008 // If we're going to put this object on the mutable list, then
1009 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
1010 if (gct->failed_to_evac) {
1011 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
1012 } else {
1013 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
1014 }
1015 break;
1016 }
1017
1018 case TSO:
1019 {
1020 scavengeTSO((StgTSO*)p);
1021 break;
1022 }
1023
1024 case STACK:
1025 {
1026 StgStack *stack = (StgStack*)p;
1027
1028 gct->eager_promotion = rtsFalse;
1029
1030 scavenge_stack(stack->sp, stack->stack + stack->stack_size);
1031 stack->dirty = gct->failed_to_evac;
1032
1033 gct->eager_promotion = saved_eager_promotion;
1034 break;
1035 }
1036
1037 case MUT_PRIM:
1038 {
1039 StgPtr end;
1040
1041 gct->eager_promotion = rtsFalse;
1042
1043 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1044 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
1045 evacuate((StgClosure **)p);
1046 }
1047
1048 gct->eager_promotion = saved_eager_promotion;
1049 gct->failed_to_evac = rtsTrue; // mutable
1050 break;
1051 }
1052
1053 case TREC_CHUNK:
1054 {
1055 StgWord i;
1056 StgTRecChunk *tc = ((StgTRecChunk *) p);
1057 TRecEntry *e = &(tc -> entries[0]);
1058 gct->eager_promotion = rtsFalse;
1059 evacuate((StgClosure **)&tc->prev_chunk);
1060 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
1061 evacuate((StgClosure **)&e->tvar);
1062 evacuate((StgClosure **)&e->expected_value);
1063 evacuate((StgClosure **)&e->new_value);
1064 }
1065 gct->eager_promotion = saved_eager_promotion;
1066 gct->failed_to_evac = rtsTrue; // mutable
1067 break;
1068 }
1069
1070 default:
1071 barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p",
1072 info->type, p);
1073 }
1074
1075 if (gct->failed_to_evac) {
1076 gct->failed_to_evac = rtsFalse;
1077 if (gct->evac_gen_no) {
1078 recordMutableGen_GC((StgClosure *)q, gct->evac_gen_no);
1079 }
1080 }
1081 } // while (p = pop_mark_stack())
1082 }
1083
1084 /* -----------------------------------------------------------------------------
1085 Scavenge one object.
1086
1087 This is used for objects that are temporarily marked as mutable
1088 because they contain old-to-new generation pointers. Only certain
1089 objects can have this property.
1090 -------------------------------------------------------------------------- */
1091
1092 static rtsBool
1093 scavenge_one(StgPtr p)
1094 {
1095 const StgInfoTable *info;
1096 rtsBool no_luck;
1097 rtsBool saved_eager_promotion;
1098
1099 saved_eager_promotion = gct->eager_promotion;
1100
1101 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1102 info = get_itbl((StgClosure *)p);
1103
1104 switch (info->type) {
1105
1106 case MVAR_CLEAN:
1107 case MVAR_DIRTY:
1108 {
1109 StgMVar *mvar = ((StgMVar *)p);
1110 gct->eager_promotion = rtsFalse;
1111 evacuate((StgClosure **)&mvar->head);
1112 evacuate((StgClosure **)&mvar->tail);
1113 evacuate((StgClosure **)&mvar->value);
1114 gct->eager_promotion = saved_eager_promotion;
1115
1116 if (gct->failed_to_evac) {
1117 mvar->header.info = &stg_MVAR_DIRTY_info;
1118 } else {
1119 mvar->header.info = &stg_MVAR_CLEAN_info;
1120 }
1121 break;
1122 }
1123
1124 case TVAR:
1125 {
1126 StgTVar *tvar = ((StgTVar *)p);
1127 gct->eager_promotion = rtsFalse;
1128 evacuate((StgClosure **)&tvar->current_value);
1129 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
1130 gct->eager_promotion = saved_eager_promotion;
1131
1132 if (gct->failed_to_evac) {
1133 tvar->header.info = &stg_TVAR_DIRTY_info;
1134 } else {
1135 tvar->header.info = &stg_TVAR_CLEAN_info;
1136 }
1137 break;
1138 }
1139
1140 case THUNK:
1141 case THUNK_1_0:
1142 case THUNK_0_1:
1143 case THUNK_1_1:
1144 case THUNK_0_2:
1145 case THUNK_2_0:
1146 {
1147 StgPtr q, end;
1148
1149 end = (StgPtr)((StgThunk *)p)->payload + info->layout.payload.ptrs;
1150 for (q = (StgPtr)((StgThunk *)p)->payload; q < end; q++) {
1151 evacuate((StgClosure **)q);
1152 }
1153 break;
1154 }
1155
1156 case FUN:
1157 case FUN_1_0: // hardly worth specialising these guys
1158 case FUN_0_1:
1159 case FUN_1_1:
1160 case FUN_0_2:
1161 case FUN_2_0:
1162 case CONSTR:
1163 case CONSTR_1_0:
1164 case CONSTR_0_1:
1165 case CONSTR_1_1:
1166 case CONSTR_0_2:
1167 case CONSTR_2_0:
1168 case WEAK:
1169 case PRIM:
1170 case IND_PERM:
1171 {
1172 StgPtr q, end;
1173
1174 end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1175 for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) {
1176 evacuate((StgClosure **)q);
1177 }
1178 break;
1179 }
1180
1181 case MUT_VAR_CLEAN:
1182 case MUT_VAR_DIRTY: {
1183 StgPtr q = p;
1184
1185 gct->eager_promotion = rtsFalse;
1186 evacuate(&((StgMutVar *)p)->var);
1187 gct->eager_promotion = saved_eager_promotion;
1188
1189 if (gct->failed_to_evac) {
1190 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
1191 } else {
1192 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
1193 }
1194 break;
1195 }
1196
1197 case BLOCKING_QUEUE:
1198 {
1199 StgBlockingQueue *bq = (StgBlockingQueue *)p;
1200
1201 gct->eager_promotion = rtsFalse;
1202 evacuate(&bq->bh);
1203 evacuate((StgClosure**)&bq->owner);
1204 evacuate((StgClosure**)&bq->queue);
1205 evacuate((StgClosure**)&bq->link);
1206 gct->eager_promotion = saved_eager_promotion;
1207
1208 if (gct->failed_to_evac) {
1209 bq->header.info = &stg_BLOCKING_QUEUE_DIRTY_info;
1210 } else {
1211 bq->header.info = &stg_BLOCKING_QUEUE_CLEAN_info;
1212 }
1213 break;
1214 }
1215
1216 case THUNK_SELECTOR:
1217 {
1218 StgSelector *s = (StgSelector *)p;
1219 evacuate(&s->selectee);
1220 break;
1221 }
1222
1223 case AP_STACK:
1224 {
1225 StgAP_STACK *ap = (StgAP_STACK *)p;
1226
1227 evacuate(&ap->fun);
1228 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
1229 p = (StgPtr)ap->payload + ap->size;
1230 break;
1231 }
1232
1233 case PAP:
1234 p = scavenge_PAP((StgPAP *)p);
1235 break;
1236
1237 case AP:
1238 p = scavenge_AP((StgAP *)p);
1239 break;
1240
1241 case ARR_WORDS:
1242 // nothing to follow
1243 break;
1244
1245 case MUT_ARR_PTRS_CLEAN:
1246 case MUT_ARR_PTRS_DIRTY:
1247 {
1248 // We don't eagerly promote objects pointed to by a mutable
1249 // array, but if we find the array only points to objects in
1250 // the same or an older generation, we mark it "clean" and
1251 // avoid traversing it during minor GCs.
1252 gct->eager_promotion = rtsFalse;
1253
1254 scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
1255
1256 if (gct->failed_to_evac) {
1257 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
1258 } else {
1259 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
1260 }
1261
1262 gct->eager_promotion = saved_eager_promotion;
1263 gct->failed_to_evac = rtsTrue;
1264 break;
1265 }
1266
1267 case MUT_ARR_PTRS_FROZEN:
1268 case MUT_ARR_PTRS_FROZEN0:
1269 {
1270 // follow everything
1271 scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
1272
1273 // If we're going to put this object on the mutable list, then
1274 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
1275 if (gct->failed_to_evac) {
1276 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
1277 } else {
1278 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
1279 }
1280 break;
1281 }
1282
1283 case TSO:
1284 {
1285 scavengeTSO((StgTSO*)p);
1286 break;
1287 }
1288
1289 case STACK:
1290 {
1291 StgStack *stack = (StgStack*)p;
1292
1293 gct->eager_promotion = rtsFalse;
1294
1295 scavenge_stack(stack->sp, stack->stack + stack->stack_size);
1296 stack->dirty = gct->failed_to_evac;
1297
1298 gct->eager_promotion = saved_eager_promotion;
1299 break;
1300 }
1301
1302 case MUT_PRIM:
1303 {
1304 StgPtr end;
1305
1306 gct->eager_promotion = rtsFalse;
1307
1308 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1309 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
1310 evacuate((StgClosure **)p);
1311 }
1312
1313 gct->eager_promotion = saved_eager_promotion;
1314 gct->failed_to_evac = rtsTrue; // mutable
1315 break;
1316
1317 }
1318
1319 case TREC_CHUNK:
1320 {
1321 StgWord i;
1322 StgTRecChunk *tc = ((StgTRecChunk *) p);
1323 TRecEntry *e = &(tc -> entries[0]);
1324 gct->eager_promotion = rtsFalse;
1325 evacuate((StgClosure **)&tc->prev_chunk);
1326 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
1327 evacuate((StgClosure **)&e->tvar);
1328 evacuate((StgClosure **)&e->expected_value);
1329 evacuate((StgClosure **)&e->new_value);
1330 }
1331 gct->eager_promotion = saved_eager_promotion;
1332 gct->failed_to_evac = rtsTrue; // mutable
1333 break;
1334 }
1335
1336 case IND:
1337 // IND can happen, for example, when the interpreter allocates
1338 // a gigantic AP closure (more than one block), which ends up
1339 // on the large-object list and then gets updated. See #3424.
1340 case BLACKHOLE:
1341 case IND_STATIC:
1342 evacuate(&((StgInd *)p)->indirectee);
1343
1344 #if 0 && defined(DEBUG)
1345 if (RtsFlags.DebugFlags.gc)
1346 /* Debugging code to print out the size of the thing we just
1347 * promoted
1348 */
1349 {
1350 StgPtr start = gen->scan;
1351 bdescr *start_bd = gen->scan_bd;
1352 nat size = 0;
1353 scavenge(&gen);
1354 if (start_bd != gen->scan_bd) {
1355 size += (P_)BLOCK_ROUND_UP(start) - start;
1356 start_bd = start_bd->link;
1357 while (start_bd != gen->scan_bd) {
1358 size += BLOCK_SIZE_W;
1359 start_bd = start_bd->link;
1360 }
1361 size += gen->scan -
1362 (P_)BLOCK_ROUND_DOWN(gen->scan);
1363 } else {
1364 size = gen->scan - start;
1365 }
1366 debugBelch("evac IND_OLDGEN: %ld bytes", size * sizeof(W_));
1367 }
1368 #endif
1369 break;
1370
1371 default:
1372 barf("scavenge_one: strange object %d", (int)(info->type));
1373 }
1374
1375 no_luck = gct->failed_to_evac;
1376 gct->failed_to_evac = rtsFalse;
1377 return (no_luck);
1378 }
1379
1380 /* -----------------------------------------------------------------------------
1381 Scavenging mutable lists.
1382
1383 We treat the mutable list of each generation > N (i.e. all the
1384 generations older than the one being collected) as roots. We also
1385 remove non-mutable objects from the mutable list at this point.
1386 -------------------------------------------------------------------------- */
1387
1388 void
1389 scavenge_mutable_list(bdescr *bd, generation *gen)
1390 {
1391 StgPtr p, q;
1392 nat gen_no;
1393
1394 gen_no = gen->no;
1395 gct->evac_gen_no = gen_no;
1396 for (; bd != NULL; bd = bd->link) {
1397 for (q = bd->start; q < bd->free; q++) {
1398 p = (StgPtr)*q;
1399 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1400
1401 #ifdef DEBUG
1402 switch (get_itbl((StgClosure *)p)->type) {
1403 case MUT_VAR_CLEAN:
1404 // can happen due to concurrent writeMutVars
1405 case MUT_VAR_DIRTY:
1406 mutlist_MUTVARS++; break;
1407 case MUT_ARR_PTRS_CLEAN:
1408 case MUT_ARR_PTRS_DIRTY:
1409 case MUT_ARR_PTRS_FROZEN:
1410 case MUT_ARR_PTRS_FROZEN0:
1411 mutlist_MUTARRS++; break;
1412 case MVAR_CLEAN:
1413 barf("MVAR_CLEAN on mutable list");
1414 case MVAR_DIRTY:
1415 mutlist_MVARS++; break;
1416 case TVAR:
1417 mutlist_TVAR++; break;
1418 case TREC_CHUNK:
1419 mutlist_TREC_CHUNK++; break;
1420 case MUT_PRIM:
1421 if (((StgClosure*)p)->header.info == &stg_TVAR_WATCH_QUEUE_info)
1422 mutlist_TVAR_WATCH_QUEUE++;
1423 else if (((StgClosure*)p)->header.info == &stg_TREC_HEADER_info)
1424 mutlist_TREC_HEADER++;
1425 else if (((StgClosure*)p)->header.info == &stg_ATOMIC_INVARIANT_info)
1426 mutlist_ATOMIC_INVARIANT++;
1427 else if (((StgClosure*)p)->header.info == &stg_INVARIANT_CHECK_QUEUE_info)
1428 mutlist_INVARIANT_CHECK_QUEUE++;
1429 else
1430 mutlist_OTHERS++;
1431 break;
1432 default:
1433 mutlist_OTHERS++; break;
1434 }
1435 #endif
1436
1437 // Check whether this object is "clean", that is it
1438 // definitely doesn't point into a young generation.
1439 // Clean objects don't need to be scavenged. Some clean
1440 // objects (MUT_VAR_CLEAN) are not kept on the mutable
1441 // list at all; others, such as MUT_ARR_PTRS
1442 // are always on the mutable list.
1443 //
1444 switch (get_itbl((StgClosure *)p)->type) {
1445 case MUT_ARR_PTRS_CLEAN:
1446 recordMutableGen_GC((StgClosure *)p,gen_no);
1447 continue;
1448 case MUT_ARR_PTRS_DIRTY:
1449 {
1450 rtsBool saved_eager_promotion;
1451 saved_eager_promotion = gct->eager_promotion;
1452 gct->eager_promotion = rtsFalse;
1453
1454 scavenge_mut_arr_ptrs_marked((StgMutArrPtrs *)p);
1455
1456 if (gct->failed_to_evac) {
1457 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
1458 } else {
1459 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
1460 }
1461
1462 gct->eager_promotion = saved_eager_promotion;
1463 gct->failed_to_evac = rtsFalse;
1464 recordMutableGen_GC((StgClosure *)p,gen_no);
1465 continue;
1466 }
1467 default:
1468 ;
1469 }
1470
1471 if (scavenge_one(p)) {
1472 // didn't manage to promote everything, so put the
1473 // object back on the list.
1474 recordMutableGen_GC((StgClosure *)p,gen_no);
1475 }
1476 }
1477 }
1478 }
1479
1480 void
1481 scavenge_capability_mut_lists (Capability *cap)
1482 {
1483 nat g;
1484
1485 /* Mutable lists from each generation > N
1486 * we want to *scavenge* these roots, not evacuate them: they're not
1487 * going to move in this GC.
1488 * Also do them in reverse generation order, for the usual reason:
1489 * namely to reduce the likelihood of spurious old->new pointers.
1490 */
1491 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
1492 scavenge_mutable_list(cap->saved_mut_lists[g], &generations[g]);
1493 freeChain_sync(cap->saved_mut_lists[g]);
1494 cap->saved_mut_lists[g] = NULL;
1495 }
1496 }
1497
1498 /* -----------------------------------------------------------------------------
1499 Scavenging the static objects.
1500
1501 We treat the mutable list of each generation > N (i.e. all the
1502 generations older than the one being collected) as roots. We also
1503 remove non-mutable objects from the mutable list at this point.
1504 -------------------------------------------------------------------------- */
1505
1506 static void
1507 scavenge_static(void)
1508 {
1509 StgClosure* p;
1510 const StgInfoTable *info;
1511
1512 debugTrace(DEBUG_gc, "scavenging static objects");
1513
1514 /* Always evacuate straight to the oldest generation for static
1515 * objects */
1516 gct->evac_gen_no = oldest_gen->no;
1517
1518 /* keep going until we've scavenged all the objects on the linked
1519 list... */
1520
1521 while (1) {
1522
1523 /* get the next static object from the list. Remember, there might
1524 * be more stuff on this list after each evacuation...
1525 * (static_objects is a global)
1526 */
1527 p = gct->static_objects;
1528 if (p == END_OF_STATIC_LIST) {
1529 break;
1530 }
1531
1532 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1533 info = get_itbl(p);
1534 /*
1535 if (info->type==RBH)
1536 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
1537 */
1538 // make sure the info pointer is into text space
1539
1540 /* Take this object *off* the static_objects list,
1541 * and put it on the scavenged_static_objects list.
1542 */
1543 gct->static_objects = *STATIC_LINK(info,p);
1544 *STATIC_LINK(info,p) = gct->scavenged_static_objects;
1545 gct->scavenged_static_objects = p;
1546
1547 switch (info -> type) {
1548
1549 case IND_STATIC:
1550 {
1551 StgInd *ind = (StgInd *)p;
1552 evacuate(&ind->indirectee);
1553
1554 /* might fail to evacuate it, in which case we have to pop it
1555 * back on the mutable list of the oldest generation. We
1556 * leave it *on* the scavenged_static_objects list, though,
1557 * in case we visit this object again.
1558 */
1559 if (gct->failed_to_evac) {
1560 gct->failed_to_evac = rtsFalse;
1561 recordMutableGen_GC((StgClosure *)p,oldest_gen->no);
1562 }
1563 break;
1564 }
1565
1566 case THUNK_STATIC:
1567 scavenge_thunk_srt(info);
1568 break;
1569
1570 case FUN_STATIC:
1571 scavenge_fun_srt(info);
1572 break;
1573
1574 case CONSTR_STATIC:
1575 {
1576 StgPtr q, next;
1577
1578 next = (P_)p->payload + info->layout.payload.ptrs;
1579 // evacuate the pointers
1580 for (q = (P_)p->payload; q < next; q++) {
1581 evacuate((StgClosure **)q);
1582 }
1583 break;
1584 }
1585
1586 default:
1587 barf("scavenge_static: strange closure %d", (int)(info->type));
1588 }
1589
1590 ASSERT(gct->failed_to_evac == rtsFalse);
1591 }
1592 }
1593
1594 /* -----------------------------------------------------------------------------
1595 scavenge a chunk of memory described by a bitmap
1596 -------------------------------------------------------------------------- */
1597
1598 static void
1599 scavenge_large_bitmap( StgPtr p, StgLargeBitmap *large_bitmap, nat size )
1600 {
1601 nat i, j, b;
1602 StgWord bitmap;
1603
1604 b = 0;
1605
1606 for (i = 0; i < size; b++) {
1607 bitmap = large_bitmap->bitmap[b];
1608 j = stg_min(size-i, BITS_IN(W_));
1609 i += j;
1610 for (; j > 0; j--, p++) {
1611 if ((bitmap & 1) == 0) {
1612 evacuate((StgClosure **)p);
1613 }
1614 bitmap = bitmap >> 1;
1615 }
1616 }
1617 }
1618
1619 STATIC_INLINE StgPtr
1620 scavenge_small_bitmap (StgPtr p, nat size, StgWord bitmap)
1621 {
1622 while (size > 0) {
1623 if ((bitmap & 1) == 0) {
1624 evacuate((StgClosure **)p);
1625 }
1626 p++;
1627 bitmap = bitmap >> 1;
1628 size--;
1629 }
1630 return p;
1631 }
1632
1633 /* -----------------------------------------------------------------------------
1634 scavenge_stack walks over a section of stack and evacuates all the
1635 objects pointed to by it. We can use the same code for walking
1636 AP_STACK_UPDs, since these are just sections of copied stack.
1637 -------------------------------------------------------------------------- */
1638
1639 static void
1640 scavenge_stack(StgPtr p, StgPtr stack_end)
1641 {
1642 const StgRetInfoTable* info;
1643 StgWord bitmap;
1644 nat size;
1645
1646 /*
1647 * Each time around this loop, we are looking at a chunk of stack
1648 * that starts with an activation record.
1649 */
1650
1651 while (p < stack_end) {
1652 info = get_ret_itbl((StgClosure *)p);
1653
1654 switch (info->i.type) {
1655
1656 case UPDATE_FRAME:
1657 // In SMP, we can get update frames that point to indirections
1658 // when two threads evaluate the same thunk. We do attempt to
1659 // discover this situation in threadPaused(), but it's
1660 // possible that the following sequence occurs:
1661 //
1662 // A B
1663 // enter T
1664 // enter T
1665 // blackhole T
1666 // update T
1667 // GC
1668 //
1669 // Now T is an indirection, and the update frame is already
1670 // marked on A's stack, so we won't traverse it again in
1671 // threadPaused(). We could traverse the whole stack again
1672 // before GC, but that seems like overkill.
1673 //
1674 // Scavenging this update frame as normal would be disastrous;
1675 // the updatee would end up pointing to the value. So we
1676 // check whether the value after evacuation is a BLACKHOLE,
1677 // and if not, we change the update frame to an stg_enter
1678 // frame that simply returns the value. Hence, blackholing is
1679 // compulsory (otherwise we would have to check for thunks
1680 // too).
1681 //
1682 // Note [upd-black-hole]
1683 // One slight hiccup is that the THUNK_SELECTOR machinery can
1684 // overwrite the updatee with an IND. In parallel GC, this
1685 // could even be happening concurrently, so we can't check for
1686 // the IND. Fortunately if we assume that blackholing is
1687 // happening (either lazy or eager), then we can be sure that
1688 // the updatee is never a THUNK_SELECTOR and we're ok.
1689 // NB. this is a new invariant: blackholing is not optional.
1690 {
1691 StgUpdateFrame *frame = (StgUpdateFrame *)p;
1692 StgClosure *v;
1693
1694 evacuate(&frame->updatee);
1695 v = frame->updatee;
1696 if (GET_CLOSURE_TAG(v) != 0 ||
1697 (get_itbl(v)->type != BLACKHOLE)) {
1698 // blackholing is compulsory, see above.
1699 frame->header.info = (const StgInfoTable*)&stg_enter_checkbh_info;
1700 }
1701 ASSERT(v->header.info != &stg_TSO_info);
1702 p += sizeofW(StgUpdateFrame);
1703 continue;
1704 }
1705
1706 // small bitmap (< 32 entries, or 64 on a 64-bit machine)
1707 case CATCH_STM_FRAME:
1708 case CATCH_RETRY_FRAME:
1709 case ATOMICALLY_FRAME:
1710 case UNDERFLOW_FRAME:
1711 case STOP_FRAME:
1712 case CATCH_FRAME:
1713 case RET_SMALL:
1714 bitmap = BITMAP_BITS(info->i.layout.bitmap);
1715 size = BITMAP_SIZE(info->i.layout.bitmap);
1716 // NOTE: the payload starts immediately after the info-ptr, we
1717 // don't have an StgHeader in the same sense as a heap closure.
1718 p++;
1719 p = scavenge_small_bitmap(p, size, bitmap);
1720
1721 follow_srt:
1722 if (major_gc)
1723 scavenge_srt((StgClosure **)GET_SRT(info), info->i.srt_bitmap);
1724 continue;
1725
1726 case RET_BCO: {
1727 StgBCO *bco;
1728 nat size;
1729
1730 p++;
1731 evacuate((StgClosure **)p);
1732 bco = (StgBCO *)*p;
1733 p++;
1734 size = BCO_BITMAP_SIZE(bco);
1735 scavenge_large_bitmap(p, BCO_BITMAP(bco), size);
1736 p += size;
1737 continue;
1738 }
1739
1740 // large bitmap (> 32 entries, or > 64 on a 64-bit machine)
1741 case RET_BIG:
1742 {
1743 nat size;
1744
1745 size = GET_LARGE_BITMAP(&info->i)->size;
1746 p++;
1747 scavenge_large_bitmap(p, GET_LARGE_BITMAP(&info->i), size);
1748 p += size;
1749 // and don't forget to follow the SRT
1750 goto follow_srt;
1751 }
1752
1753 case RET_FUN:
1754 {
1755 StgRetFun *ret_fun = (StgRetFun *)p;
1756 StgFunInfoTable *fun_info;
1757
1758 evacuate(&ret_fun->fun);
1759 fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun));
1760 p = scavenge_arg_block(fun_info, ret_fun->payload);
1761 goto follow_srt;
1762 }
1763
1764 default:
1765 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->i.type));
1766 }
1767 }
1768 }
1769
1770 /*-----------------------------------------------------------------------------
1771 scavenge the large object list.
1772
1773 evac_gen set by caller; similar games played with evac_gen as with
1774 scavenge() - see comment at the top of scavenge(). Most large
1775 objects are (repeatedly) mutable, so most of the time evac_gen will
1776 be zero.
1777 --------------------------------------------------------------------------- */
1778
1779 static void
1780 scavenge_large (gen_workspace *ws)
1781 {
1782 bdescr *bd;
1783 StgPtr p;
1784
1785 gct->evac_gen_no = ws->gen->no;
1786
1787 bd = ws->todo_large_objects;
1788
1789 for (; bd != NULL; bd = ws->todo_large_objects) {
1790
1791 // take this object *off* the large objects list and put it on
1792 // the scavenged large objects list. This is so that we can
1793 // treat new_large_objects as a stack and push new objects on
1794 // the front when evacuating.
1795 ws->todo_large_objects = bd->link;
1796
1797 ACQUIRE_SPIN_LOCK(&ws->gen->sync);
1798 dbl_link_onto(bd, &ws->gen->scavenged_large_objects);
1799 ws->gen->n_scavenged_large_blocks += bd->blocks;
1800 RELEASE_SPIN_LOCK(&ws->gen->sync);
1801
1802 p = bd->start;
1803 if (scavenge_one(p)) {
1804 if (ws->gen->no > 0) {
1805 recordMutableGen_GC((StgClosure *)p, ws->gen->no);
1806 }
1807 }
1808
1809 // stats
1810 gct->scanned += closure_sizeW((StgClosure*)p);
1811 }
1812 }
1813
1814 /* ----------------------------------------------------------------------------
1815 Look for work to do.
1816
1817 We look for the oldest gen that has either a todo block that can
1818 be scanned, or a block of work on the global queue that we can
1819 scan.
1820
1821 It is important to take work from the *oldest* generation that we
1822 has work available, because that minimizes the likelihood of
1823 evacuating objects into a young generation when they should have
1824 been eagerly promoted. This really does make a difference (the
1825 cacheprof benchmark is one that is affected).
1826
1827 We also want to scan the todo block if possible before grabbing
1828 work from the global queue, the reason being that we don't want to
1829 steal work from the global queue and starve other threads if there
1830 is other work we can usefully be doing.
1831 ------------------------------------------------------------------------- */
1832
1833 static rtsBool
1834 scavenge_find_work (void)
1835 {
1836 int g;
1837 gen_workspace *ws;
1838 rtsBool did_something, did_anything;
1839 bdescr *bd;
1840
1841 gct->scav_find_work++;
1842
1843 did_anything = rtsFalse;
1844
1845 loop:
1846 did_something = rtsFalse;
1847 for (g = RtsFlags.GcFlags.generations-1; g >= 0; g--) {
1848 ws = &gct->gens[g];
1849
1850 gct->scan_bd = NULL;
1851
1852 // If we have a scan block with some work to do,
1853 // scavenge everything up to the free pointer.
1854 if (ws->todo_bd->u.scan < ws->todo_free)
1855 {
1856 scavenge_block(ws->todo_bd);
1857 did_something = rtsTrue;
1858 break;
1859 }
1860
1861 // If we have any large objects to scavenge, do them now.
1862 if (ws->todo_large_objects) {
1863 scavenge_large(ws);
1864 did_something = rtsTrue;
1865 break;
1866 }
1867
1868 if ((bd = grab_local_todo_block(ws)) != NULL) {
1869 scavenge_block(bd);
1870 did_something = rtsTrue;
1871 break;
1872 }
1873 }
1874
1875 if (did_something) {
1876 did_anything = rtsTrue;
1877 goto loop;
1878 }
1879
1880 #if defined(THREADED_RTS)
1881 if (work_stealing) {
1882 // look for work to steal
1883 for (g = RtsFlags.GcFlags.generations-1; g >= 0; g--) {
1884 if ((bd = steal_todo_block(g)) != NULL) {
1885 scavenge_block(bd);
1886 did_something = rtsTrue;
1887 break;
1888 }
1889 }
1890
1891 if (did_something) {
1892 did_anything = rtsTrue;
1893 goto loop;
1894 }
1895 }
1896 #endif
1897
1898 // only return when there is no more work to do
1899
1900 return did_anything;
1901 }
1902
1903 /* ----------------------------------------------------------------------------
1904 Scavenge until we can't find anything more to scavenge.
1905 ------------------------------------------------------------------------- */
1906
1907 void
1908 scavenge_loop(void)
1909 {
1910 rtsBool work_to_do;
1911
1912 loop:
1913 work_to_do = rtsFalse;
1914
1915 // scavenge static objects
1916 if (major_gc && gct->static_objects != END_OF_STATIC_LIST) {
1917 IF_DEBUG(sanity, checkStaticObjects(gct->static_objects));
1918 scavenge_static();
1919 }
1920
1921 // scavenge objects in compacted generation
1922 if (mark_stack_bd != NULL && !mark_stack_empty()) {
1923 scavenge_mark_stack();
1924 work_to_do = rtsTrue;
1925 }
1926
1927 // Order is important here: we want to deal in full blocks as
1928 // much as possible, so go for global work in preference to
1929 // local work. Only if all the global work has been exhausted
1930 // do we start scavenging the fragments of blocks in the local
1931 // workspaces.
1932 if (scavenge_find_work()) goto loop;
1933
1934 if (work_to_do) goto loop;
1935 }
1936