Rts: Consistently use StgWord for sizes of bitmaps
[ghc.git] / rts / sm / Scav.c
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team 1998-2008
4 *
5 * Generational garbage collector: scavenging functions
6 *
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
9 *
10 * http://ghc.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/GC
11 *
12 * ---------------------------------------------------------------------------*/
13
14 #include "PosixSource.h"
15 #include "Rts.h"
16
17 #include "Storage.h"
18 #include "GC.h"
19 #include "GCThread.h"
20 #include "GCUtils.h"
21 #include "Compact.h"
22 #include "MarkStack.h"
23 #include "Evac.h"
24 #include "Scav.h"
25 #include "Apply.h"
26 #include "Trace.h"
27 #include "Sanity.h"
28 #include "Capability.h"
29 #include "LdvProfile.h"
30
31 static void scavenge_stack (StgPtr p, StgPtr stack_end);
32
33 static void scavenge_large_bitmap (StgPtr p,
34 StgLargeBitmap *large_bitmap,
35 StgWord size );
36
37 #if defined(THREADED_RTS) && !defined(PARALLEL_GC)
38 # define evacuate(a) evacuate1(a)
39 # define scavenge_loop(a) scavenge_loop1(a)
40 # define scavenge_block(a) scavenge_block1(a)
41 # define scavenge_mutable_list(bd,g) scavenge_mutable_list1(bd,g)
42 # define scavenge_capability_mut_lists(cap) scavenge_capability_mut_Lists1(cap)
43 #endif
44
45 /* -----------------------------------------------------------------------------
46 Scavenge a TSO.
47 -------------------------------------------------------------------------- */
48
49 static void
50 scavengeTSO (StgTSO *tso)
51 {
52 rtsBool saved_eager;
53
54 debugTrace(DEBUG_gc,"scavenging thread %d",(int)tso->id);
55
56 // update the pointer from the InCall.
57 if (tso->bound != NULL) {
58 tso->bound->tso = tso;
59 }
60
61 saved_eager = gct->eager_promotion;
62 gct->eager_promotion = rtsFalse;
63
64 evacuate((StgClosure **)&tso->blocked_exceptions);
65 evacuate((StgClosure **)&tso->bq);
66
67 // scavange current transaction record
68 evacuate((StgClosure **)&tso->trec);
69
70 evacuate((StgClosure **)&tso->stackobj);
71
72 evacuate((StgClosure **)&tso->_link);
73 if ( tso->why_blocked == BlockedOnMVar
74 || tso->why_blocked == BlockedOnMVarRead
75 || tso->why_blocked == BlockedOnBlackHole
76 || tso->why_blocked == BlockedOnMsgThrowTo
77 || tso->why_blocked == NotBlocked
78 ) {
79 evacuate(&tso->block_info.closure);
80 }
81 #ifdef THREADED_RTS
82 // in the THREADED_RTS, block_info.closure must always point to a
83 // valid closure, because we assume this in throwTo(). In the
84 // non-threaded RTS it might be a FD (for
85 // BlockedOnRead/BlockedOnWrite) or a time value (BlockedOnDelay)
86 else {
87 tso->block_info.closure = (StgClosure *)END_TSO_QUEUE;
88 }
89 #endif
90
91 tso->dirty = gct->failed_to_evac;
92
93 gct->eager_promotion = saved_eager;
94 }
95
96 /* -----------------------------------------------------------------------------
97 Mutable arrays of pointers
98 -------------------------------------------------------------------------- */
99
100 static StgPtr scavenge_mut_arr_ptrs (StgMutArrPtrs *a)
101 {
102 W_ m;
103 rtsBool any_failed;
104 StgPtr p, q;
105
106 any_failed = rtsFalse;
107 p = (StgPtr)&a->payload[0];
108 for (m = 0; (int)m < (int)mutArrPtrsCards(a->ptrs) - 1; m++)
109 {
110 q = p + (1 << MUT_ARR_PTRS_CARD_BITS);
111 for (; p < q; p++) {
112 evacuate((StgClosure**)p);
113 }
114 if (gct->failed_to_evac) {
115 any_failed = rtsTrue;
116 *mutArrPtrsCard(a,m) = 1;
117 gct->failed_to_evac = rtsFalse;
118 } else {
119 *mutArrPtrsCard(a,m) = 0;
120 }
121 }
122
123 q = (StgPtr)&a->payload[a->ptrs];
124 if (p < q) {
125 for (; p < q; p++) {
126 evacuate((StgClosure**)p);
127 }
128 if (gct->failed_to_evac) {
129 any_failed = rtsTrue;
130 *mutArrPtrsCard(a,m) = 1;
131 gct->failed_to_evac = rtsFalse;
132 } else {
133 *mutArrPtrsCard(a,m) = 0;
134 }
135 }
136
137 gct->failed_to_evac = any_failed;
138 return (StgPtr)a + mut_arr_ptrs_sizeW(a);
139 }
140
141 // scavenge only the marked areas of a MUT_ARR_PTRS
142 static StgPtr scavenge_mut_arr_ptrs_marked (StgMutArrPtrs *a)
143 {
144 W_ m;
145 StgPtr p, q;
146 rtsBool any_failed;
147
148 any_failed = rtsFalse;
149 for (m = 0; m < mutArrPtrsCards(a->ptrs); m++)
150 {
151 if (*mutArrPtrsCard(a,m) != 0) {
152 p = (StgPtr)&a->payload[m << MUT_ARR_PTRS_CARD_BITS];
153 q = stg_min(p + (1 << MUT_ARR_PTRS_CARD_BITS),
154 (StgPtr)&a->payload[a->ptrs]);
155 for (; p < q; p++) {
156 evacuate((StgClosure**)p);
157 }
158 if (gct->failed_to_evac) {
159 any_failed = rtsTrue;
160 gct->failed_to_evac = rtsFalse;
161 } else {
162 *mutArrPtrsCard(a,m) = 0;
163 }
164 }
165 }
166
167 gct->failed_to_evac = any_failed;
168 return (StgPtr)a + mut_arr_ptrs_sizeW(a);
169 }
170
171 /* -----------------------------------------------------------------------------
172 Blocks of function args occur on the stack (at the top) and
173 in PAPs.
174 -------------------------------------------------------------------------- */
175
176 STATIC_INLINE StgPtr
177 scavenge_arg_block (StgFunInfoTable *fun_info, StgClosure **args)
178 {
179 StgPtr p;
180 StgWord bitmap;
181 StgWord size;
182
183 p = (StgPtr)args;
184 switch (fun_info->f.fun_type) {
185 case ARG_GEN:
186 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
187 size = BITMAP_SIZE(fun_info->f.b.bitmap);
188 goto small_bitmap;
189 case ARG_GEN_BIG:
190 size = GET_FUN_LARGE_BITMAP(fun_info)->size;
191 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
192 p += size;
193 break;
194 default:
195 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
196 size = BITMAP_SIZE(stg_arg_bitmaps[fun_info->f.fun_type]);
197 small_bitmap:
198 while (size > 0) {
199 if ((bitmap & 1) == 0) {
200 evacuate((StgClosure **)p);
201 }
202 p++;
203 bitmap = bitmap >> 1;
204 size--;
205 }
206 break;
207 }
208 return p;
209 }
210
211 STATIC_INLINE GNUC_ATTR_HOT StgPtr
212 scavenge_PAP_payload (StgClosure *fun, StgClosure **payload, StgWord size)
213 {
214 StgPtr p;
215 StgWord bitmap;
216 StgFunInfoTable *fun_info;
217
218 fun_info = get_fun_itbl(UNTAG_CLOSURE(fun));
219 ASSERT(fun_info->i.type != PAP);
220 p = (StgPtr)payload;
221
222 switch (fun_info->f.fun_type) {
223 case ARG_GEN:
224 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
225 goto small_bitmap;
226 case ARG_GEN_BIG:
227 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
228 p += size;
229 break;
230 case ARG_BCO:
231 scavenge_large_bitmap((StgPtr)payload, BCO_BITMAP(fun), size);
232 p += size;
233 break;
234 default:
235 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
236 small_bitmap:
237 while (size > 0) {
238 if ((bitmap & 1) == 0) {
239 evacuate((StgClosure **)p);
240 }
241 p++;
242 bitmap = bitmap >> 1;
243 size--;
244 }
245 break;
246 }
247 return p;
248 }
249
250 STATIC_INLINE GNUC_ATTR_HOT StgPtr
251 scavenge_PAP (StgPAP *pap)
252 {
253 evacuate(&pap->fun);
254 return scavenge_PAP_payload (pap->fun, pap->payload, pap->n_args);
255 }
256
257 STATIC_INLINE StgPtr
258 scavenge_AP (StgAP *ap)
259 {
260 evacuate(&ap->fun);
261 return scavenge_PAP_payload (ap->fun, ap->payload, ap->n_args);
262 }
263
264 /* -----------------------------------------------------------------------------
265 Scavenge SRTs
266 -------------------------------------------------------------------------- */
267
268 /* Similar to scavenge_large_bitmap(), but we don't write back the
269 * pointers we get back from evacuate().
270 */
271 static void
272 scavenge_large_srt_bitmap( StgLargeSRT *large_srt )
273 {
274 nat i, b, size;
275 StgWord bitmap;
276 StgClosure **p;
277
278 b = 0;
279 bitmap = large_srt->l.bitmap[b];
280 size = (nat)large_srt->l.size;
281 p = (StgClosure **)large_srt->srt;
282 for (i = 0; i < size; ) {
283 if ((bitmap & 1) != 0) {
284 evacuate(p);
285 }
286 i++;
287 p++;
288 if (i % BITS_IN(W_) == 0) {
289 b++;
290 bitmap = large_srt->l.bitmap[b];
291 } else {
292 bitmap = bitmap >> 1;
293 }
294 }
295 }
296
297 /* evacuate the SRT. If srt_bitmap is zero, then there isn't an
298 * srt field in the info table. That's ok, because we'll
299 * never dereference it.
300 */
301 STATIC_INLINE GNUC_ATTR_HOT void
302 scavenge_srt (StgClosure **srt, nat srt_bitmap)
303 {
304 nat bitmap;
305 StgClosure **p;
306
307 bitmap = srt_bitmap;
308 p = srt;
309
310 if (bitmap == (StgHalfWord)(-1)) {
311 scavenge_large_srt_bitmap( (StgLargeSRT *)srt );
312 return;
313 }
314
315 while (bitmap != 0) {
316 if ((bitmap & 1) != 0) {
317 #if defined(COMPILING_WINDOWS_DLL)
318 // Special-case to handle references to closures hiding out in DLLs, since
319 // double indirections required to get at those. The code generator knows
320 // which is which when generating the SRT, so it stores the (indirect)
321 // reference to the DLL closure in the table by first adding one to it.
322 // We check for this here, and undo the addition before evacuating it.
323 //
324 // If the SRT entry hasn't got bit 0 set, the SRT entry points to a
325 // closure that's fixed at link-time, and no extra magic is required.
326 if ( (W_)(*srt) & 0x1 ) {
327 evacuate( (StgClosure**) ((W_) (*srt) & ~0x1));
328 } else {
329 evacuate(p);
330 }
331 #else
332 evacuate(p);
333 #endif
334 }
335 p++;
336 bitmap = bitmap >> 1;
337 }
338 }
339
340
341 STATIC_INLINE GNUC_ATTR_HOT void
342 scavenge_thunk_srt(const StgInfoTable *info)
343 {
344 StgThunkInfoTable *thunk_info;
345
346 if (!major_gc) return;
347
348 thunk_info = itbl_to_thunk_itbl(info);
349 scavenge_srt((StgClosure **)GET_SRT(thunk_info), thunk_info->i.srt_bitmap);
350 }
351
352 STATIC_INLINE GNUC_ATTR_HOT void
353 scavenge_fun_srt(const StgInfoTable *info)
354 {
355 StgFunInfoTable *fun_info;
356
357 if (!major_gc) return;
358
359 fun_info = itbl_to_fun_itbl(info);
360 scavenge_srt((StgClosure **)GET_FUN_SRT(fun_info), fun_info->i.srt_bitmap);
361 }
362
363 /* -----------------------------------------------------------------------------
364 Scavenge a block from the given scan pointer up to bd->free.
365
366 evac_gen_no is set by the caller to be either zero (for a step in a
367 generation < N) or G where G is the generation of the step being
368 scavenged.
369
370 We sometimes temporarily change evac_gen_no back to zero if we're
371 scavenging a mutable object where eager promotion isn't such a good
372 idea.
373 -------------------------------------------------------------------------- */
374
375 static GNUC_ATTR_HOT void
376 scavenge_block (bdescr *bd)
377 {
378 StgPtr p, q;
379 StgInfoTable *info;
380 rtsBool saved_eager_promotion;
381 gen_workspace *ws;
382
383 debugTrace(DEBUG_gc, "scavenging block %p (gen %d) @ %p",
384 bd->start, bd->gen_no, bd->u.scan);
385
386 gct->scan_bd = bd;
387 gct->evac_gen_no = bd->gen_no;
388 saved_eager_promotion = gct->eager_promotion;
389 gct->failed_to_evac = rtsFalse;
390
391 ws = &gct->gens[bd->gen->no];
392
393 p = bd->u.scan;
394
395 // we might be evacuating into the very object that we're
396 // scavenging, so we have to check the real bd->free pointer each
397 // time around the loop.
398 while (p < bd->free || (bd == ws->todo_bd && p < ws->todo_free)) {
399
400 ASSERT(bd->link == NULL);
401 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
402 info = get_itbl((StgClosure *)p);
403
404 ASSERT(gct->thunk_selector_depth == 0);
405
406 q = p;
407 switch (info->type) {
408
409 case MVAR_CLEAN:
410 case MVAR_DIRTY:
411 {
412 StgMVar *mvar = ((StgMVar *)p);
413 gct->eager_promotion = rtsFalse;
414 evacuate((StgClosure **)&mvar->head);
415 evacuate((StgClosure **)&mvar->tail);
416 evacuate((StgClosure **)&mvar->value);
417 gct->eager_promotion = saved_eager_promotion;
418
419 if (gct->failed_to_evac) {
420 mvar->header.info = &stg_MVAR_DIRTY_info;
421 } else {
422 mvar->header.info = &stg_MVAR_CLEAN_info;
423 }
424 p += sizeofW(StgMVar);
425 break;
426 }
427
428 case TVAR:
429 {
430 StgTVar *tvar = ((StgTVar *)p);
431 gct->eager_promotion = rtsFalse;
432 evacuate((StgClosure **)&tvar->current_value);
433 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
434 gct->eager_promotion = saved_eager_promotion;
435
436 if (gct->failed_to_evac) {
437 tvar->header.info = &stg_TVAR_DIRTY_info;
438 } else {
439 tvar->header.info = &stg_TVAR_CLEAN_info;
440 }
441 p += sizeofW(StgTVar);
442 break;
443 }
444
445 case FUN_2_0:
446 scavenge_fun_srt(info);
447 evacuate(&((StgClosure *)p)->payload[1]);
448 evacuate(&((StgClosure *)p)->payload[0]);
449 p += sizeofW(StgHeader) + 2;
450 break;
451
452 case THUNK_2_0:
453 scavenge_thunk_srt(info);
454 evacuate(&((StgThunk *)p)->payload[1]);
455 evacuate(&((StgThunk *)p)->payload[0]);
456 p += sizeofW(StgThunk) + 2;
457 break;
458
459 case CONSTR_2_0:
460 evacuate(&((StgClosure *)p)->payload[1]);
461 evacuate(&((StgClosure *)p)->payload[0]);
462 p += sizeofW(StgHeader) + 2;
463 break;
464
465 case THUNK_1_0:
466 scavenge_thunk_srt(info);
467 evacuate(&((StgThunk *)p)->payload[0]);
468 p += sizeofW(StgThunk) + 1;
469 break;
470
471 case FUN_1_0:
472 scavenge_fun_srt(info);
473 case CONSTR_1_0:
474 evacuate(&((StgClosure *)p)->payload[0]);
475 p += sizeofW(StgHeader) + 1;
476 break;
477
478 case THUNK_0_1:
479 scavenge_thunk_srt(info);
480 p += sizeofW(StgThunk) + 1;
481 break;
482
483 case FUN_0_1:
484 scavenge_fun_srt(info);
485 case CONSTR_0_1:
486 p += sizeofW(StgHeader) + 1;
487 break;
488
489 case THUNK_0_2:
490 scavenge_thunk_srt(info);
491 p += sizeofW(StgThunk) + 2;
492 break;
493
494 case FUN_0_2:
495 scavenge_fun_srt(info);
496 case CONSTR_0_2:
497 p += sizeofW(StgHeader) + 2;
498 break;
499
500 case THUNK_1_1:
501 scavenge_thunk_srt(info);
502 evacuate(&((StgThunk *)p)->payload[0]);
503 p += sizeofW(StgThunk) + 2;
504 break;
505
506 case FUN_1_1:
507 scavenge_fun_srt(info);
508 case CONSTR_1_1:
509 evacuate(&((StgClosure *)p)->payload[0]);
510 p += sizeofW(StgHeader) + 2;
511 break;
512
513 case FUN:
514 scavenge_fun_srt(info);
515 goto gen_obj;
516
517 case THUNK:
518 {
519 StgPtr end;
520
521 scavenge_thunk_srt(info);
522 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
523 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
524 evacuate((StgClosure **)p);
525 }
526 p += info->layout.payload.nptrs;
527 break;
528 }
529
530 gen_obj:
531 case CONSTR:
532 case WEAK:
533 case PRIM:
534 {
535 StgPtr end;
536
537 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
538 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
539 evacuate((StgClosure **)p);
540 }
541 p += info->layout.payload.nptrs;
542 break;
543 }
544
545 case BCO: {
546 StgBCO *bco = (StgBCO *)p;
547 evacuate((StgClosure **)&bco->instrs);
548 evacuate((StgClosure **)&bco->literals);
549 evacuate((StgClosure **)&bco->ptrs);
550 p += bco_sizeW(bco);
551 break;
552 }
553
554 case IND_PERM:
555 case BLACKHOLE:
556 evacuate(&((StgInd *)p)->indirectee);
557 p += sizeofW(StgInd);
558 break;
559
560 case MUT_VAR_CLEAN:
561 case MUT_VAR_DIRTY:
562 gct->eager_promotion = rtsFalse;
563 evacuate(&((StgMutVar *)p)->var);
564 gct->eager_promotion = saved_eager_promotion;
565
566 if (gct->failed_to_evac) {
567 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
568 } else {
569 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
570 }
571 p += sizeofW(StgMutVar);
572 break;
573
574 case BLOCKING_QUEUE:
575 {
576 StgBlockingQueue *bq = (StgBlockingQueue *)p;
577
578 gct->eager_promotion = rtsFalse;
579 evacuate(&bq->bh);
580 evacuate((StgClosure**)&bq->owner);
581 evacuate((StgClosure**)&bq->queue);
582 evacuate((StgClosure**)&bq->link);
583 gct->eager_promotion = saved_eager_promotion;
584
585 if (gct->failed_to_evac) {
586 bq->header.info = &stg_BLOCKING_QUEUE_DIRTY_info;
587 } else {
588 bq->header.info = &stg_BLOCKING_QUEUE_CLEAN_info;
589 }
590 p += sizeofW(StgBlockingQueue);
591 break;
592 }
593
594 case THUNK_SELECTOR:
595 {
596 StgSelector *s = (StgSelector *)p;
597 evacuate(&s->selectee);
598 p += THUNK_SELECTOR_sizeW();
599 break;
600 }
601
602 // A chunk of stack saved in a heap object
603 case AP_STACK:
604 {
605 StgAP_STACK *ap = (StgAP_STACK *)p;
606
607 evacuate(&ap->fun);
608 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
609 p = (StgPtr)ap->payload + ap->size;
610 break;
611 }
612
613 case PAP:
614 p = scavenge_PAP((StgPAP *)p);
615 break;
616
617 case AP:
618 p = scavenge_AP((StgAP *)p);
619 break;
620
621 case ARR_WORDS:
622 // nothing to follow
623 p += arr_words_sizeW((StgArrWords *)p);
624 break;
625
626 case MUT_ARR_PTRS_CLEAN:
627 case MUT_ARR_PTRS_DIRTY:
628 {
629 // We don't eagerly promote objects pointed to by a mutable
630 // array, but if we find the array only points to objects in
631 // the same or an older generation, we mark it "clean" and
632 // avoid traversing it during minor GCs.
633 gct->eager_promotion = rtsFalse;
634
635 p = scavenge_mut_arr_ptrs((StgMutArrPtrs*)p);
636
637 if (gct->failed_to_evac) {
638 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
639 } else {
640 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
641 }
642
643 gct->eager_promotion = saved_eager_promotion;
644 gct->failed_to_evac = rtsTrue; // always put it on the mutable list.
645 break;
646 }
647
648 case MUT_ARR_PTRS_FROZEN:
649 case MUT_ARR_PTRS_FROZEN0:
650 // follow everything
651 {
652 p = scavenge_mut_arr_ptrs((StgMutArrPtrs*)p);
653
654 // If we're going to put this object on the mutable list, then
655 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
656 if (gct->failed_to_evac) {
657 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
658 } else {
659 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
660 }
661 break;
662 }
663
664 case SMALL_MUT_ARR_PTRS_CLEAN:
665 case SMALL_MUT_ARR_PTRS_DIRTY:
666 // follow everything
667 {
668 StgPtr next;
669
670 // We don't eagerly promote objects pointed to by a mutable
671 // array, but if we find the array only points to objects in
672 // the same or an older generation, we mark it "clean" and
673 // avoid traversing it during minor GCs.
674 gct->eager_promotion = rtsFalse;
675 next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
676 for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
677 evacuate((StgClosure **)p);
678 }
679 gct->eager_promotion = saved_eager_promotion;
680
681 if (gct->failed_to_evac) {
682 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_DIRTY_info;
683 } else {
684 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_CLEAN_info;
685 }
686
687 gct->failed_to_evac = rtsTrue; // always put it on the mutable list.
688 break;
689 }
690
691 case SMALL_MUT_ARR_PTRS_FROZEN:
692 case SMALL_MUT_ARR_PTRS_FROZEN0:
693 // follow everything
694 {
695 StgPtr next;
696
697 next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
698 for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
699 evacuate((StgClosure **)p);
700 }
701
702 // If we're going to put this object on the mutable list, then
703 // set its info ptr to SMALL_MUT_ARR_PTRS_FROZEN0 to indicate that.
704 if (gct->failed_to_evac) {
705 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN0_info;
706 } else {
707 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN_info;
708 }
709 break;
710 }
711
712 case TSO:
713 {
714 scavengeTSO((StgTSO *)p);
715 p += sizeofW(StgTSO);
716 break;
717 }
718
719 case STACK:
720 {
721 StgStack *stack = (StgStack*)p;
722
723 gct->eager_promotion = rtsFalse;
724
725 scavenge_stack(stack->sp, stack->stack + stack->stack_size);
726 stack->dirty = gct->failed_to_evac;
727 p += stack_sizeW(stack);
728
729 gct->eager_promotion = saved_eager_promotion;
730 break;
731 }
732
733 case MUT_PRIM:
734 {
735 StgPtr end;
736
737 gct->eager_promotion = rtsFalse;
738
739 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
740 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
741 evacuate((StgClosure **)p);
742 }
743 p += info->layout.payload.nptrs;
744
745 gct->eager_promotion = saved_eager_promotion;
746 gct->failed_to_evac = rtsTrue; // mutable
747 break;
748 }
749
750 case TREC_CHUNK:
751 {
752 StgWord i;
753 StgTRecChunk *tc = ((StgTRecChunk *) p);
754 TRecEntry *e = &(tc -> entries[0]);
755 gct->eager_promotion = rtsFalse;
756 evacuate((StgClosure **)&tc->prev_chunk);
757 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
758 evacuate((StgClosure **)&e->tvar);
759 evacuate((StgClosure **)&e->expected_value);
760 evacuate((StgClosure **)&e->new_value);
761 }
762 gct->eager_promotion = saved_eager_promotion;
763 gct->failed_to_evac = rtsTrue; // mutable
764 p += sizeofW(StgTRecChunk);
765 break;
766 }
767
768 default:
769 barf("scavenge: unimplemented/strange closure type %d @ %p",
770 info->type, p);
771 }
772
773 /*
774 * We need to record the current object on the mutable list if
775 * (a) It is actually mutable, or
776 * (b) It contains pointers to a younger generation.
777 * Case (b) arises if we didn't manage to promote everything that
778 * the current object points to into the current generation.
779 */
780 if (gct->failed_to_evac) {
781 gct->failed_to_evac = rtsFalse;
782 if (bd->gen_no > 0) {
783 recordMutableGen_GC((StgClosure *)q, bd->gen_no);
784 }
785 }
786 }
787
788 if (p > bd->free) {
789 gct->copied += ws->todo_free - bd->free;
790 bd->free = p;
791 }
792
793 debugTrace(DEBUG_gc, " scavenged %ld bytes",
794 (unsigned long)((bd->free - bd->u.scan) * sizeof(W_)));
795
796 // update stats: this is a block that has been scavenged
797 gct->scanned += bd->free - bd->u.scan;
798 bd->u.scan = bd->free;
799
800 if (bd != ws->todo_bd) {
801 // we're not going to evac any more objects into
802 // this block, so push it now.
803 push_scanned_block(bd, ws);
804 }
805
806 gct->scan_bd = NULL;
807 }
808 /* -----------------------------------------------------------------------------
809 Scavenge everything on the mark stack.
810
811 This is slightly different from scavenge():
812 - we don't walk linearly through the objects, so the scavenger
813 doesn't need to advance the pointer on to the next object.
814 -------------------------------------------------------------------------- */
815
816 static void
817 scavenge_mark_stack(void)
818 {
819 StgPtr p, q;
820 StgInfoTable *info;
821 rtsBool saved_eager_promotion;
822
823 gct->evac_gen_no = oldest_gen->no;
824 saved_eager_promotion = gct->eager_promotion;
825
826 while ((p = pop_mark_stack())) {
827
828 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
829 info = get_itbl((StgClosure *)p);
830
831 q = p;
832 switch (info->type) {
833
834 case MVAR_CLEAN:
835 case MVAR_DIRTY:
836 {
837 StgMVar *mvar = ((StgMVar *)p);
838 gct->eager_promotion = rtsFalse;
839 evacuate((StgClosure **)&mvar->head);
840 evacuate((StgClosure **)&mvar->tail);
841 evacuate((StgClosure **)&mvar->value);
842 gct->eager_promotion = saved_eager_promotion;
843
844 if (gct->failed_to_evac) {
845 mvar->header.info = &stg_MVAR_DIRTY_info;
846 } else {
847 mvar->header.info = &stg_MVAR_CLEAN_info;
848 }
849 break;
850 }
851
852 case TVAR:
853 {
854 StgTVar *tvar = ((StgTVar *)p);
855 gct->eager_promotion = rtsFalse;
856 evacuate((StgClosure **)&tvar->current_value);
857 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
858 gct->eager_promotion = saved_eager_promotion;
859
860 if (gct->failed_to_evac) {
861 tvar->header.info = &stg_TVAR_DIRTY_info;
862 } else {
863 tvar->header.info = &stg_TVAR_CLEAN_info;
864 }
865 break;
866 }
867
868 case FUN_2_0:
869 scavenge_fun_srt(info);
870 evacuate(&((StgClosure *)p)->payload[1]);
871 evacuate(&((StgClosure *)p)->payload[0]);
872 break;
873
874 case THUNK_2_0:
875 scavenge_thunk_srt(info);
876 evacuate(&((StgThunk *)p)->payload[1]);
877 evacuate(&((StgThunk *)p)->payload[0]);
878 break;
879
880 case CONSTR_2_0:
881 evacuate(&((StgClosure *)p)->payload[1]);
882 evacuate(&((StgClosure *)p)->payload[0]);
883 break;
884
885 case FUN_1_0:
886 case FUN_1_1:
887 scavenge_fun_srt(info);
888 evacuate(&((StgClosure *)p)->payload[0]);
889 break;
890
891 case THUNK_1_0:
892 case THUNK_1_1:
893 scavenge_thunk_srt(info);
894 evacuate(&((StgThunk *)p)->payload[0]);
895 break;
896
897 case CONSTR_1_0:
898 case CONSTR_1_1:
899 evacuate(&((StgClosure *)p)->payload[0]);
900 break;
901
902 case FUN_0_1:
903 case FUN_0_2:
904 scavenge_fun_srt(info);
905 break;
906
907 case THUNK_0_1:
908 case THUNK_0_2:
909 scavenge_thunk_srt(info);
910 break;
911
912 case CONSTR_0_1:
913 case CONSTR_0_2:
914 break;
915
916 case FUN:
917 scavenge_fun_srt(info);
918 goto gen_obj;
919
920 case THUNK:
921 {
922 StgPtr end;
923
924 scavenge_thunk_srt(info);
925 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
926 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
927 evacuate((StgClosure **)p);
928 }
929 break;
930 }
931
932 gen_obj:
933 case CONSTR:
934 case WEAK:
935 case PRIM:
936 {
937 StgPtr end;
938
939 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
940 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
941 evacuate((StgClosure **)p);
942 }
943 break;
944 }
945
946 case BCO: {
947 StgBCO *bco = (StgBCO *)p;
948 evacuate((StgClosure **)&bco->instrs);
949 evacuate((StgClosure **)&bco->literals);
950 evacuate((StgClosure **)&bco->ptrs);
951 break;
952 }
953
954 case IND_PERM:
955 // don't need to do anything here: the only possible case
956 // is that we're in a 1-space compacting collector, with
957 // no "old" generation.
958 break;
959
960 case IND:
961 case BLACKHOLE:
962 evacuate(&((StgInd *)p)->indirectee);
963 break;
964
965 case MUT_VAR_CLEAN:
966 case MUT_VAR_DIRTY: {
967 gct->eager_promotion = rtsFalse;
968 evacuate(&((StgMutVar *)p)->var);
969 gct->eager_promotion = saved_eager_promotion;
970
971 if (gct->failed_to_evac) {
972 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
973 } else {
974 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
975 }
976 break;
977 }
978
979 case BLOCKING_QUEUE:
980 {
981 StgBlockingQueue *bq = (StgBlockingQueue *)p;
982
983 gct->eager_promotion = rtsFalse;
984 evacuate(&bq->bh);
985 evacuate((StgClosure**)&bq->owner);
986 evacuate((StgClosure**)&bq->queue);
987 evacuate((StgClosure**)&bq->link);
988 gct->eager_promotion = saved_eager_promotion;
989
990 if (gct->failed_to_evac) {
991 bq->header.info = &stg_BLOCKING_QUEUE_DIRTY_info;
992 } else {
993 bq->header.info = &stg_BLOCKING_QUEUE_CLEAN_info;
994 }
995 break;
996 }
997
998 case ARR_WORDS:
999 break;
1000
1001 case THUNK_SELECTOR:
1002 {
1003 StgSelector *s = (StgSelector *)p;
1004 evacuate(&s->selectee);
1005 break;
1006 }
1007
1008 // A chunk of stack saved in a heap object
1009 case AP_STACK:
1010 {
1011 StgAP_STACK *ap = (StgAP_STACK *)p;
1012
1013 evacuate(&ap->fun);
1014 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
1015 break;
1016 }
1017
1018 case PAP:
1019 scavenge_PAP((StgPAP *)p);
1020 break;
1021
1022 case AP:
1023 scavenge_AP((StgAP *)p);
1024 break;
1025
1026 case MUT_ARR_PTRS_CLEAN:
1027 case MUT_ARR_PTRS_DIRTY:
1028 // follow everything
1029 {
1030 // We don't eagerly promote objects pointed to by a mutable
1031 // array, but if we find the array only points to objects in
1032 // the same or an older generation, we mark it "clean" and
1033 // avoid traversing it during minor GCs.
1034 gct->eager_promotion = rtsFalse;
1035
1036 scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
1037
1038 if (gct->failed_to_evac) {
1039 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
1040 } else {
1041 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
1042 }
1043
1044 gct->eager_promotion = saved_eager_promotion;
1045 gct->failed_to_evac = rtsTrue; // mutable anyhow.
1046 break;
1047 }
1048
1049 case MUT_ARR_PTRS_FROZEN:
1050 case MUT_ARR_PTRS_FROZEN0:
1051 // follow everything
1052 {
1053 StgPtr q = p;
1054
1055 scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
1056
1057 // If we're going to put this object on the mutable list, then
1058 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
1059 if (gct->failed_to_evac) {
1060 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
1061 } else {
1062 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
1063 }
1064 break;
1065 }
1066
1067 case SMALL_MUT_ARR_PTRS_CLEAN:
1068 case SMALL_MUT_ARR_PTRS_DIRTY:
1069 // follow everything
1070 {
1071 StgPtr next;
1072 rtsBool saved_eager;
1073
1074 // We don't eagerly promote objects pointed to by a mutable
1075 // array, but if we find the array only points to objects in
1076 // the same or an older generation, we mark it "clean" and
1077 // avoid traversing it during minor GCs.
1078 saved_eager = gct->eager_promotion;
1079 gct->eager_promotion = rtsFalse;
1080 next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
1081 for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
1082 evacuate((StgClosure **)p);
1083 }
1084 gct->eager_promotion = saved_eager;
1085
1086 if (gct->failed_to_evac) {
1087 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_DIRTY_info;
1088 } else {
1089 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_CLEAN_info;
1090 }
1091
1092 gct->failed_to_evac = rtsTrue; // mutable anyhow.
1093 break;
1094 }
1095
1096 case SMALL_MUT_ARR_PTRS_FROZEN:
1097 case SMALL_MUT_ARR_PTRS_FROZEN0:
1098 // follow everything
1099 {
1100 StgPtr next, q = p;
1101
1102 next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
1103 for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
1104 evacuate((StgClosure **)p);
1105 }
1106
1107 // If we're going to put this object on the mutable list, then
1108 // set its info ptr to SMALL_MUT_ARR_PTRS_FROZEN0 to indicate that.
1109 if (gct->failed_to_evac) {
1110 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN0_info;
1111 } else {
1112 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN_info;
1113 }
1114 break;
1115 }
1116
1117 case TSO:
1118 {
1119 scavengeTSO((StgTSO*)p);
1120 break;
1121 }
1122
1123 case STACK:
1124 {
1125 StgStack *stack = (StgStack*)p;
1126
1127 gct->eager_promotion = rtsFalse;
1128
1129 scavenge_stack(stack->sp, stack->stack + stack->stack_size);
1130 stack->dirty = gct->failed_to_evac;
1131
1132 gct->eager_promotion = saved_eager_promotion;
1133 break;
1134 }
1135
1136 case MUT_PRIM:
1137 {
1138 StgPtr end;
1139
1140 gct->eager_promotion = rtsFalse;
1141
1142 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1143 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
1144 evacuate((StgClosure **)p);
1145 }
1146
1147 gct->eager_promotion = saved_eager_promotion;
1148 gct->failed_to_evac = rtsTrue; // mutable
1149 break;
1150 }
1151
1152 case TREC_CHUNK:
1153 {
1154 StgWord i;
1155 StgTRecChunk *tc = ((StgTRecChunk *) p);
1156 TRecEntry *e = &(tc -> entries[0]);
1157 gct->eager_promotion = rtsFalse;
1158 evacuate((StgClosure **)&tc->prev_chunk);
1159 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
1160 evacuate((StgClosure **)&e->tvar);
1161 evacuate((StgClosure **)&e->expected_value);
1162 evacuate((StgClosure **)&e->new_value);
1163 }
1164 gct->eager_promotion = saved_eager_promotion;
1165 gct->failed_to_evac = rtsTrue; // mutable
1166 break;
1167 }
1168
1169 default:
1170 barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p",
1171 info->type, p);
1172 }
1173
1174 if (gct->failed_to_evac) {
1175 gct->failed_to_evac = rtsFalse;
1176 if (gct->evac_gen_no) {
1177 recordMutableGen_GC((StgClosure *)q, gct->evac_gen_no);
1178 }
1179 }
1180 } // while (p = pop_mark_stack())
1181 }
1182
1183 /* -----------------------------------------------------------------------------
1184 Scavenge one object.
1185
1186 This is used for objects that are temporarily marked as mutable
1187 because they contain old-to-new generation pointers. Only certain
1188 objects can have this property.
1189 -------------------------------------------------------------------------- */
1190
1191 static rtsBool
1192 scavenge_one(StgPtr p)
1193 {
1194 const StgInfoTable *info;
1195 rtsBool no_luck;
1196 rtsBool saved_eager_promotion;
1197
1198 saved_eager_promotion = gct->eager_promotion;
1199
1200 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1201 info = get_itbl((StgClosure *)p);
1202
1203 switch (info->type) {
1204
1205 case MVAR_CLEAN:
1206 case MVAR_DIRTY:
1207 {
1208 StgMVar *mvar = ((StgMVar *)p);
1209 gct->eager_promotion = rtsFalse;
1210 evacuate((StgClosure **)&mvar->head);
1211 evacuate((StgClosure **)&mvar->tail);
1212 evacuate((StgClosure **)&mvar->value);
1213 gct->eager_promotion = saved_eager_promotion;
1214
1215 if (gct->failed_to_evac) {
1216 mvar->header.info = &stg_MVAR_DIRTY_info;
1217 } else {
1218 mvar->header.info = &stg_MVAR_CLEAN_info;
1219 }
1220 break;
1221 }
1222
1223 case TVAR:
1224 {
1225 StgTVar *tvar = ((StgTVar *)p);
1226 gct->eager_promotion = rtsFalse;
1227 evacuate((StgClosure **)&tvar->current_value);
1228 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
1229 gct->eager_promotion = saved_eager_promotion;
1230
1231 if (gct->failed_to_evac) {
1232 tvar->header.info = &stg_TVAR_DIRTY_info;
1233 } else {
1234 tvar->header.info = &stg_TVAR_CLEAN_info;
1235 }
1236 break;
1237 }
1238
1239 case THUNK:
1240 case THUNK_1_0:
1241 case THUNK_0_1:
1242 case THUNK_1_1:
1243 case THUNK_0_2:
1244 case THUNK_2_0:
1245 {
1246 StgPtr q, end;
1247
1248 end = (StgPtr)((StgThunk *)p)->payload + info->layout.payload.ptrs;
1249 for (q = (StgPtr)((StgThunk *)p)->payload; q < end; q++) {
1250 evacuate((StgClosure **)q);
1251 }
1252 break;
1253 }
1254
1255 case FUN:
1256 case FUN_1_0: // hardly worth specialising these guys
1257 case FUN_0_1:
1258 case FUN_1_1:
1259 case FUN_0_2:
1260 case FUN_2_0:
1261 case CONSTR:
1262 case CONSTR_1_0:
1263 case CONSTR_0_1:
1264 case CONSTR_1_1:
1265 case CONSTR_0_2:
1266 case CONSTR_2_0:
1267 case WEAK:
1268 case PRIM:
1269 case IND_PERM:
1270 {
1271 StgPtr q, end;
1272
1273 end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1274 for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) {
1275 evacuate((StgClosure **)q);
1276 }
1277 break;
1278 }
1279
1280 case MUT_VAR_CLEAN:
1281 case MUT_VAR_DIRTY: {
1282 StgPtr q = p;
1283
1284 gct->eager_promotion = rtsFalse;
1285 evacuate(&((StgMutVar *)p)->var);
1286 gct->eager_promotion = saved_eager_promotion;
1287
1288 if (gct->failed_to_evac) {
1289 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
1290 } else {
1291 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
1292 }
1293 break;
1294 }
1295
1296 case BLOCKING_QUEUE:
1297 {
1298 StgBlockingQueue *bq = (StgBlockingQueue *)p;
1299
1300 gct->eager_promotion = rtsFalse;
1301 evacuate(&bq->bh);
1302 evacuate((StgClosure**)&bq->owner);
1303 evacuate((StgClosure**)&bq->queue);
1304 evacuate((StgClosure**)&bq->link);
1305 gct->eager_promotion = saved_eager_promotion;
1306
1307 if (gct->failed_to_evac) {
1308 bq->header.info = &stg_BLOCKING_QUEUE_DIRTY_info;
1309 } else {
1310 bq->header.info = &stg_BLOCKING_QUEUE_CLEAN_info;
1311 }
1312 break;
1313 }
1314
1315 case THUNK_SELECTOR:
1316 {
1317 StgSelector *s = (StgSelector *)p;
1318 evacuate(&s->selectee);
1319 break;
1320 }
1321
1322 case AP_STACK:
1323 {
1324 StgAP_STACK *ap = (StgAP_STACK *)p;
1325
1326 evacuate(&ap->fun);
1327 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
1328 p = (StgPtr)ap->payload + ap->size;
1329 break;
1330 }
1331
1332 case PAP:
1333 p = scavenge_PAP((StgPAP *)p);
1334 break;
1335
1336 case AP:
1337 p = scavenge_AP((StgAP *)p);
1338 break;
1339
1340 case ARR_WORDS:
1341 // nothing to follow
1342 break;
1343
1344 case MUT_ARR_PTRS_CLEAN:
1345 case MUT_ARR_PTRS_DIRTY:
1346 {
1347 // We don't eagerly promote objects pointed to by a mutable
1348 // array, but if we find the array only points to objects in
1349 // the same or an older generation, we mark it "clean" and
1350 // avoid traversing it during minor GCs.
1351 gct->eager_promotion = rtsFalse;
1352
1353 scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
1354
1355 if (gct->failed_to_evac) {
1356 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
1357 } else {
1358 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
1359 }
1360
1361 gct->eager_promotion = saved_eager_promotion;
1362 gct->failed_to_evac = rtsTrue;
1363 break;
1364 }
1365
1366 case MUT_ARR_PTRS_FROZEN:
1367 case MUT_ARR_PTRS_FROZEN0:
1368 {
1369 // follow everything
1370 scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
1371
1372 // If we're going to put this object on the mutable list, then
1373 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
1374 if (gct->failed_to_evac) {
1375 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
1376 } else {
1377 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
1378 }
1379 break;
1380 }
1381
1382 case SMALL_MUT_ARR_PTRS_CLEAN:
1383 case SMALL_MUT_ARR_PTRS_DIRTY:
1384 {
1385 StgPtr next, q;
1386 rtsBool saved_eager;
1387
1388 // We don't eagerly promote objects pointed to by a mutable
1389 // array, but if we find the array only points to objects in
1390 // the same or an older generation, we mark it "clean" and
1391 // avoid traversing it during minor GCs.
1392 saved_eager = gct->eager_promotion;
1393 gct->eager_promotion = rtsFalse;
1394 q = p;
1395 next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
1396 for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
1397 evacuate((StgClosure **)p);
1398 }
1399 gct->eager_promotion = saved_eager;
1400
1401 if (gct->failed_to_evac) {
1402 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_DIRTY_info;
1403 } else {
1404 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_CLEAN_info;
1405 }
1406
1407 gct->failed_to_evac = rtsTrue;
1408 break;
1409 }
1410
1411 case SMALL_MUT_ARR_PTRS_FROZEN:
1412 case SMALL_MUT_ARR_PTRS_FROZEN0:
1413 {
1414 // follow everything
1415 StgPtr next, q=p;
1416
1417 next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
1418 for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
1419 evacuate((StgClosure **)p);
1420 }
1421
1422 // If we're going to put this object on the mutable list, then
1423 // set its info ptr to SMALL_MUT_ARR_PTRS_FROZEN0 to indicate that.
1424 if (gct->failed_to_evac) {
1425 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN0_info;
1426 } else {
1427 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN_info;
1428 }
1429 break;
1430 }
1431
1432 case TSO:
1433 {
1434 scavengeTSO((StgTSO*)p);
1435 break;
1436 }
1437
1438 case STACK:
1439 {
1440 StgStack *stack = (StgStack*)p;
1441
1442 gct->eager_promotion = rtsFalse;
1443
1444 scavenge_stack(stack->sp, stack->stack + stack->stack_size);
1445 stack->dirty = gct->failed_to_evac;
1446
1447 gct->eager_promotion = saved_eager_promotion;
1448 break;
1449 }
1450
1451 case MUT_PRIM:
1452 {
1453 StgPtr end;
1454
1455 gct->eager_promotion = rtsFalse;
1456
1457 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1458 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
1459 evacuate((StgClosure **)p);
1460 }
1461
1462 gct->eager_promotion = saved_eager_promotion;
1463 gct->failed_to_evac = rtsTrue; // mutable
1464 break;
1465
1466 }
1467
1468 case TREC_CHUNK:
1469 {
1470 StgWord i;
1471 StgTRecChunk *tc = ((StgTRecChunk *) p);
1472 TRecEntry *e = &(tc -> entries[0]);
1473 gct->eager_promotion = rtsFalse;
1474 evacuate((StgClosure **)&tc->prev_chunk);
1475 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
1476 evacuate((StgClosure **)&e->tvar);
1477 evacuate((StgClosure **)&e->expected_value);
1478 evacuate((StgClosure **)&e->new_value);
1479 }
1480 gct->eager_promotion = saved_eager_promotion;
1481 gct->failed_to_evac = rtsTrue; // mutable
1482 break;
1483 }
1484
1485 case IND:
1486 // IND can happen, for example, when the interpreter allocates
1487 // a gigantic AP closure (more than one block), which ends up
1488 // on the large-object list and then gets updated. See #3424.
1489 case BLACKHOLE:
1490 case IND_STATIC:
1491 evacuate(&((StgInd *)p)->indirectee);
1492
1493 #if 0 && defined(DEBUG)
1494 if (RtsFlags.DebugFlags.gc)
1495 /* Debugging code to print out the size of the thing we just
1496 * promoted
1497 */
1498 {
1499 StgPtr start = gen->scan;
1500 bdescr *start_bd = gen->scan_bd;
1501 StgWord size = 0;
1502 scavenge(&gen);
1503 if (start_bd != gen->scan_bd) {
1504 size += (P_)BLOCK_ROUND_UP(start) - start;
1505 start_bd = start_bd->link;
1506 while (start_bd != gen->scan_bd) {
1507 size += BLOCK_SIZE_W;
1508 start_bd = start_bd->link;
1509 }
1510 size += gen->scan -
1511 (P_)BLOCK_ROUND_DOWN(gen->scan);
1512 } else {
1513 size = gen->scan - start;
1514 }
1515 debugBelch("evac IND_OLDGEN: %ld bytes", size * sizeof(W_));
1516 }
1517 #endif
1518 break;
1519
1520 default:
1521 barf("scavenge_one: strange object %d", (int)(info->type));
1522 }
1523
1524 no_luck = gct->failed_to_evac;
1525 gct->failed_to_evac = rtsFalse;
1526 return (no_luck);
1527 }
1528
1529 /* -----------------------------------------------------------------------------
1530 Scavenging mutable lists.
1531
1532 We treat the mutable list of each generation > N (i.e. all the
1533 generations older than the one being collected) as roots. We also
1534 remove non-mutable objects from the mutable list at this point.
1535 -------------------------------------------------------------------------- */
1536
1537 void
1538 scavenge_mutable_list(bdescr *bd, generation *gen)
1539 {
1540 StgPtr p, q;
1541 nat gen_no;
1542
1543 gen_no = gen->no;
1544 gct->evac_gen_no = gen_no;
1545 for (; bd != NULL; bd = bd->link) {
1546 for (q = bd->start; q < bd->free; q++) {
1547 p = (StgPtr)*q;
1548 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1549
1550 #ifdef DEBUG
1551 switch (get_itbl((StgClosure *)p)->type) {
1552 case MUT_VAR_CLEAN:
1553 // can happen due to concurrent writeMutVars
1554 case MUT_VAR_DIRTY:
1555 mutlist_MUTVARS++; break;
1556 case MUT_ARR_PTRS_CLEAN:
1557 case MUT_ARR_PTRS_DIRTY:
1558 case MUT_ARR_PTRS_FROZEN:
1559 case MUT_ARR_PTRS_FROZEN0:
1560 mutlist_MUTARRS++; break;
1561 case MVAR_CLEAN:
1562 barf("MVAR_CLEAN on mutable list");
1563 case MVAR_DIRTY:
1564 mutlist_MVARS++; break;
1565 case TVAR:
1566 mutlist_TVAR++; break;
1567 case TREC_CHUNK:
1568 mutlist_TREC_CHUNK++; break;
1569 case MUT_PRIM:
1570 if (((StgClosure*)p)->header.info == &stg_TVAR_WATCH_QUEUE_info)
1571 mutlist_TVAR_WATCH_QUEUE++;
1572 else if (((StgClosure*)p)->header.info == &stg_TREC_HEADER_info)
1573 mutlist_TREC_HEADER++;
1574 else if (((StgClosure*)p)->header.info == &stg_ATOMIC_INVARIANT_info)
1575 mutlist_ATOMIC_INVARIANT++;
1576 else if (((StgClosure*)p)->header.info == &stg_INVARIANT_CHECK_QUEUE_info)
1577 mutlist_INVARIANT_CHECK_QUEUE++;
1578 else
1579 mutlist_OTHERS++;
1580 break;
1581 default:
1582 mutlist_OTHERS++; break;
1583 }
1584 #endif
1585
1586 // Check whether this object is "clean", that is it
1587 // definitely doesn't point into a young generation.
1588 // Clean objects don't need to be scavenged. Some clean
1589 // objects (MUT_VAR_CLEAN) are not kept on the mutable
1590 // list at all; others, such as MUT_ARR_PTRS
1591 // are always on the mutable list.
1592 //
1593 switch (get_itbl((StgClosure *)p)->type) {
1594 case MUT_ARR_PTRS_CLEAN:
1595 recordMutableGen_GC((StgClosure *)p,gen_no);
1596 continue;
1597 case MUT_ARR_PTRS_DIRTY:
1598 {
1599 rtsBool saved_eager_promotion;
1600 saved_eager_promotion = gct->eager_promotion;
1601 gct->eager_promotion = rtsFalse;
1602
1603 scavenge_mut_arr_ptrs_marked((StgMutArrPtrs *)p);
1604
1605 if (gct->failed_to_evac) {
1606 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
1607 } else {
1608 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
1609 }
1610
1611 gct->eager_promotion = saved_eager_promotion;
1612 gct->failed_to_evac = rtsFalse;
1613 recordMutableGen_GC((StgClosure *)p,gen_no);
1614 continue;
1615 }
1616 default:
1617 ;
1618 }
1619
1620 if (scavenge_one(p)) {
1621 // didn't manage to promote everything, so put the
1622 // object back on the list.
1623 recordMutableGen_GC((StgClosure *)p,gen_no);
1624 }
1625 }
1626 }
1627 }
1628
1629 void
1630 scavenge_capability_mut_lists (Capability *cap)
1631 {
1632 nat g;
1633
1634 /* Mutable lists from each generation > N
1635 * we want to *scavenge* these roots, not evacuate them: they're not
1636 * going to move in this GC.
1637 * Also do them in reverse generation order, for the usual reason:
1638 * namely to reduce the likelihood of spurious old->new pointers.
1639 */
1640 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
1641 scavenge_mutable_list(cap->saved_mut_lists[g], &generations[g]);
1642 freeChain_sync(cap->saved_mut_lists[g]);
1643 cap->saved_mut_lists[g] = NULL;
1644 }
1645 }
1646
1647 /* -----------------------------------------------------------------------------
1648 Scavenging the static objects.
1649
1650 We treat the mutable list of each generation > N (i.e. all the
1651 generations older than the one being collected) as roots. We also
1652 remove non-mutable objects from the mutable list at this point.
1653 -------------------------------------------------------------------------- */
1654
1655 static void
1656 scavenge_static(void)
1657 {
1658 StgClosure* p;
1659 const StgInfoTable *info;
1660
1661 debugTrace(DEBUG_gc, "scavenging static objects");
1662
1663 /* Always evacuate straight to the oldest generation for static
1664 * objects */
1665 gct->evac_gen_no = oldest_gen->no;
1666
1667 /* keep going until we've scavenged all the objects on the linked
1668 list... */
1669
1670 while (1) {
1671
1672 /* get the next static object from the list. Remember, there might
1673 * be more stuff on this list after each evacuation...
1674 * (static_objects is a global)
1675 */
1676 p = gct->static_objects;
1677 if (p == END_OF_STATIC_LIST) {
1678 break;
1679 }
1680
1681 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1682 info = get_itbl(p);
1683 /*
1684 if (info->type==RBH)
1685 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
1686 */
1687 // make sure the info pointer is into text space
1688
1689 /* Take this object *off* the static_objects list,
1690 * and put it on the scavenged_static_objects list.
1691 */
1692 gct->static_objects = *STATIC_LINK(info,p);
1693 *STATIC_LINK(info,p) = gct->scavenged_static_objects;
1694 gct->scavenged_static_objects = p;
1695
1696 switch (info -> type) {
1697
1698 case IND_STATIC:
1699 {
1700 StgInd *ind = (StgInd *)p;
1701 evacuate(&ind->indirectee);
1702
1703 /* might fail to evacuate it, in which case we have to pop it
1704 * back on the mutable list of the oldest generation. We
1705 * leave it *on* the scavenged_static_objects list, though,
1706 * in case we visit this object again.
1707 */
1708 if (gct->failed_to_evac) {
1709 gct->failed_to_evac = rtsFalse;
1710 recordMutableGen_GC((StgClosure *)p,oldest_gen->no);
1711 }
1712 break;
1713 }
1714
1715 case THUNK_STATIC:
1716 scavenge_thunk_srt(info);
1717 break;
1718
1719 case FUN_STATIC:
1720 scavenge_fun_srt(info);
1721 break;
1722
1723 case CONSTR_STATIC:
1724 {
1725 StgPtr q, next;
1726
1727 next = (P_)p->payload + info->layout.payload.ptrs;
1728 // evacuate the pointers
1729 for (q = (P_)p->payload; q < next; q++) {
1730 evacuate((StgClosure **)q);
1731 }
1732 break;
1733 }
1734
1735 default:
1736 barf("scavenge_static: strange closure %d", (int)(info->type));
1737 }
1738
1739 ASSERT(gct->failed_to_evac == rtsFalse);
1740 }
1741 }
1742
1743 /* -----------------------------------------------------------------------------
1744 scavenge a chunk of memory described by a bitmap
1745 -------------------------------------------------------------------------- */
1746
1747 static void
1748 scavenge_large_bitmap( StgPtr p, StgLargeBitmap *large_bitmap, StgWord size )
1749 {
1750 nat i, j, b;
1751 StgWord bitmap;
1752
1753 b = 0;
1754
1755 for (i = 0; i < size; b++) {
1756 bitmap = large_bitmap->bitmap[b];
1757 j = stg_min(size-i, BITS_IN(W_));
1758 i += j;
1759 for (; j > 0; j--, p++) {
1760 if ((bitmap & 1) == 0) {
1761 evacuate((StgClosure **)p);
1762 }
1763 bitmap = bitmap >> 1;
1764 }
1765 }
1766 }
1767
1768 STATIC_INLINE StgPtr
1769 scavenge_small_bitmap (StgPtr p, StgWord size, StgWord bitmap)
1770 {
1771 while (size > 0) {
1772 if ((bitmap & 1) == 0) {
1773 evacuate((StgClosure **)p);
1774 }
1775 p++;
1776 bitmap = bitmap >> 1;
1777 size--;
1778 }
1779 return p;
1780 }
1781
1782 /* -----------------------------------------------------------------------------
1783 scavenge_stack walks over a section of stack and evacuates all the
1784 objects pointed to by it. We can use the same code for walking
1785 AP_STACK_UPDs, since these are just sections of copied stack.
1786 -------------------------------------------------------------------------- */
1787
1788 static void
1789 scavenge_stack(StgPtr p, StgPtr stack_end)
1790 {
1791 const StgRetInfoTable* info;
1792 StgWord bitmap;
1793 StgWord size;
1794
1795 /*
1796 * Each time around this loop, we are looking at a chunk of stack
1797 * that starts with an activation record.
1798 */
1799
1800 while (p < stack_end) {
1801 info = get_ret_itbl((StgClosure *)p);
1802
1803 switch (info->i.type) {
1804
1805 case UPDATE_FRAME:
1806 // In SMP, we can get update frames that point to indirections
1807 // when two threads evaluate the same thunk. We do attempt to
1808 // discover this situation in threadPaused(), but it's
1809 // possible that the following sequence occurs:
1810 //
1811 // A B
1812 // enter T
1813 // enter T
1814 // blackhole T
1815 // update T
1816 // GC
1817 //
1818 // Now T is an indirection, and the update frame is already
1819 // marked on A's stack, so we won't traverse it again in
1820 // threadPaused(). We could traverse the whole stack again
1821 // before GC, but that seems like overkill.
1822 //
1823 // Scavenging this update frame as normal would be disastrous;
1824 // the updatee would end up pointing to the value. So we
1825 // check whether the value after evacuation is a BLACKHOLE,
1826 // and if not, we change the update frame to an stg_enter
1827 // frame that simply returns the value. Hence, blackholing is
1828 // compulsory (otherwise we would have to check for thunks
1829 // too).
1830 //
1831 // Note [upd-black-hole]
1832 // One slight hiccup is that the THUNK_SELECTOR machinery can
1833 // overwrite the updatee with an IND. In parallel GC, this
1834 // could even be happening concurrently, so we can't check for
1835 // the IND. Fortunately if we assume that blackholing is
1836 // happening (either lazy or eager), then we can be sure that
1837 // the updatee is never a THUNK_SELECTOR and we're ok.
1838 // NB. this is a new invariant: blackholing is not optional.
1839 {
1840 StgUpdateFrame *frame = (StgUpdateFrame *)p;
1841 StgClosure *v;
1842
1843 evacuate(&frame->updatee);
1844 v = frame->updatee;
1845 if (GET_CLOSURE_TAG(v) != 0 ||
1846 (get_itbl(v)->type != BLACKHOLE)) {
1847 // blackholing is compulsory, see above.
1848 frame->header.info = (const StgInfoTable*)&stg_enter_checkbh_info;
1849 }
1850 ASSERT(v->header.info != &stg_TSO_info);
1851 p += sizeofW(StgUpdateFrame);
1852 continue;
1853 }
1854
1855 // small bitmap (< 32 entries, or 64 on a 64-bit machine)
1856 case CATCH_STM_FRAME:
1857 case CATCH_RETRY_FRAME:
1858 case ATOMICALLY_FRAME:
1859 case UNDERFLOW_FRAME:
1860 case STOP_FRAME:
1861 case CATCH_FRAME:
1862 case RET_SMALL:
1863 bitmap = BITMAP_BITS(info->i.layout.bitmap);
1864 size = BITMAP_SIZE(info->i.layout.bitmap);
1865 // NOTE: the payload starts immediately after the info-ptr, we
1866 // don't have an StgHeader in the same sense as a heap closure.
1867 p++;
1868 p = scavenge_small_bitmap(p, size, bitmap);
1869
1870 follow_srt:
1871 if (major_gc)
1872 scavenge_srt((StgClosure **)GET_SRT(info), info->i.srt_bitmap);
1873 continue;
1874
1875 case RET_BCO: {
1876 StgBCO *bco;
1877 StgWord size;
1878
1879 p++;
1880 evacuate((StgClosure **)p);
1881 bco = (StgBCO *)*p;
1882 p++;
1883 size = BCO_BITMAP_SIZE(bco);
1884 scavenge_large_bitmap(p, BCO_BITMAP(bco), size);
1885 p += size;
1886 continue;
1887 }
1888
1889 // large bitmap (> 32 entries, or > 64 on a 64-bit machine)
1890 case RET_BIG:
1891 {
1892 StgWord size;
1893
1894 size = GET_LARGE_BITMAP(&info->i)->size;
1895 p++;
1896 scavenge_large_bitmap(p, GET_LARGE_BITMAP(&info->i), size);
1897 p += size;
1898 // and don't forget to follow the SRT
1899 goto follow_srt;
1900 }
1901
1902 case RET_FUN:
1903 {
1904 StgRetFun *ret_fun = (StgRetFun *)p;
1905 StgFunInfoTable *fun_info;
1906
1907 evacuate(&ret_fun->fun);
1908 fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun));
1909 p = scavenge_arg_block(fun_info, ret_fun->payload);
1910 goto follow_srt;
1911 }
1912
1913 default:
1914 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->i.type));
1915 }
1916 }
1917 }
1918
1919 /*-----------------------------------------------------------------------------
1920 scavenge the large object list.
1921
1922 evac_gen set by caller; similar games played with evac_gen as with
1923 scavenge() - see comment at the top of scavenge(). Most large
1924 objects are (repeatedly) mutable, so most of the time evac_gen will
1925 be zero.
1926 --------------------------------------------------------------------------- */
1927
1928 static void
1929 scavenge_large (gen_workspace *ws)
1930 {
1931 bdescr *bd;
1932 StgPtr p;
1933
1934 gct->evac_gen_no = ws->gen->no;
1935
1936 bd = ws->todo_large_objects;
1937
1938 for (; bd != NULL; bd = ws->todo_large_objects) {
1939
1940 // take this object *off* the large objects list and put it on
1941 // the scavenged large objects list. This is so that we can
1942 // treat new_large_objects as a stack and push new objects on
1943 // the front when evacuating.
1944 ws->todo_large_objects = bd->link;
1945
1946 ACQUIRE_SPIN_LOCK(&ws->gen->sync);
1947 dbl_link_onto(bd, &ws->gen->scavenged_large_objects);
1948 ws->gen->n_scavenged_large_blocks += bd->blocks;
1949 RELEASE_SPIN_LOCK(&ws->gen->sync);
1950
1951 p = bd->start;
1952 if (scavenge_one(p)) {
1953 if (ws->gen->no > 0) {
1954 recordMutableGen_GC((StgClosure *)p, ws->gen->no);
1955 }
1956 }
1957
1958 // stats
1959 gct->scanned += closure_sizeW((StgClosure*)p);
1960 }
1961 }
1962
1963 /* ----------------------------------------------------------------------------
1964 Look for work to do.
1965
1966 We look for the oldest gen that has either a todo block that can
1967 be scanned, or a block of work on the global queue that we can
1968 scan.
1969
1970 It is important to take work from the *oldest* generation that we
1971 has work available, because that minimizes the likelihood of
1972 evacuating objects into a young generation when they should have
1973 been eagerly promoted. This really does make a difference (the
1974 cacheprof benchmark is one that is affected).
1975
1976 We also want to scan the todo block if possible before grabbing
1977 work from the global queue, the reason being that we don't want to
1978 steal work from the global queue and starve other threads if there
1979 is other work we can usefully be doing.
1980 ------------------------------------------------------------------------- */
1981
1982 static rtsBool
1983 scavenge_find_work (void)
1984 {
1985 int g;
1986 gen_workspace *ws;
1987 rtsBool did_something, did_anything;
1988 bdescr *bd;
1989
1990 gct->scav_find_work++;
1991
1992 did_anything = rtsFalse;
1993
1994 loop:
1995 did_something = rtsFalse;
1996 for (g = RtsFlags.GcFlags.generations-1; g >= 0; g--) {
1997 ws = &gct->gens[g];
1998
1999 gct->scan_bd = NULL;
2000
2001 // If we have a scan block with some work to do,
2002 // scavenge everything up to the free pointer.
2003 if (ws->todo_bd->u.scan < ws->todo_free)
2004 {
2005 scavenge_block(ws->todo_bd);
2006 did_something = rtsTrue;
2007 break;
2008 }
2009
2010 // If we have any large objects to scavenge, do them now.
2011 if (ws->todo_large_objects) {
2012 scavenge_large(ws);
2013 did_something = rtsTrue;
2014 break;
2015 }
2016
2017 if ((bd = grab_local_todo_block(ws)) != NULL) {
2018 scavenge_block(bd);
2019 did_something = rtsTrue;
2020 break;
2021 }
2022 }
2023
2024 if (did_something) {
2025 did_anything = rtsTrue;
2026 goto loop;
2027 }
2028
2029 #if defined(THREADED_RTS)
2030 if (work_stealing) {
2031 // look for work to steal
2032 for (g = RtsFlags.GcFlags.generations-1; g >= 0; g--) {
2033 if ((bd = steal_todo_block(g)) != NULL) {
2034 scavenge_block(bd);
2035 did_something = rtsTrue;
2036 break;
2037 }
2038 }
2039
2040 if (did_something) {
2041 did_anything = rtsTrue;
2042 goto loop;
2043 }
2044 }
2045 #endif
2046
2047 // only return when there is no more work to do
2048
2049 return did_anything;
2050 }
2051
2052 /* ----------------------------------------------------------------------------
2053 Scavenge until we can't find anything more to scavenge.
2054 ------------------------------------------------------------------------- */
2055
2056 void
2057 scavenge_loop(void)
2058 {
2059 rtsBool work_to_do;
2060
2061 loop:
2062 work_to_do = rtsFalse;
2063
2064 // scavenge static objects
2065 if (major_gc && gct->static_objects != END_OF_STATIC_LIST) {
2066 IF_DEBUG(sanity, checkStaticObjects(gct->static_objects));
2067 scavenge_static();
2068 }
2069
2070 // scavenge objects in compacted generation
2071 if (mark_stack_bd != NULL && !mark_stack_empty()) {
2072 scavenge_mark_stack();
2073 work_to_do = rtsTrue;
2074 }
2075
2076 // Order is important here: we want to deal in full blocks as
2077 // much as possible, so go for global work in preference to
2078 // local work. Only if all the global work has been exhausted
2079 // do we start scavenging the fragments of blocks in the local
2080 // workspaces.
2081 if (scavenge_find_work()) goto loop;
2082
2083 if (work_to_do) goto loop;
2084 }
2085