Fix variable name in allocate()
[ghc.git] / rts / sm / Scav.c
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team 1998-2008
4 *
5 * Generational garbage collector: scavenging functions
6 *
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
9 *
10 * http://ghc.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/GC
11 *
12 * ---------------------------------------------------------------------------*/
13
14 #include "PosixSource.h"
15 #include "Rts.h"
16
17 #include "Storage.h"
18 #include "GC.h"
19 #include "GCThread.h"
20 #include "GCUtils.h"
21 #include "Compact.h"
22 #include "MarkStack.h"
23 #include "Evac.h"
24 #include "Scav.h"
25 #include "Apply.h"
26 #include "Trace.h"
27 #include "Sanity.h"
28 #include "Capability.h"
29 #include "LdvProfile.h"
30
31 static void scavenge_stack (StgPtr p, StgPtr stack_end);
32
33 static void scavenge_large_bitmap (StgPtr p,
34 StgLargeBitmap *large_bitmap,
35 StgWord size );
36
37 #if defined(THREADED_RTS) && !defined(PARALLEL_GC)
38 # define evacuate(a) evacuate1(a)
39 # define scavenge_loop(a) scavenge_loop1(a)
40 # define scavenge_block(a) scavenge_block1(a)
41 # define scavenge_mutable_list(bd,g) scavenge_mutable_list1(bd,g)
42 # define scavenge_capability_mut_lists(cap) scavenge_capability_mut_Lists1(cap)
43 #endif
44
45 /* -----------------------------------------------------------------------------
46 Scavenge a TSO.
47 -------------------------------------------------------------------------- */
48
49 static void
50 scavengeTSO (StgTSO *tso)
51 {
52 rtsBool saved_eager;
53
54 debugTrace(DEBUG_gc,"scavenging thread %d",(int)tso->id);
55
56 // update the pointer from the InCall.
57 if (tso->bound != NULL) {
58 tso->bound->tso = tso;
59 }
60
61 saved_eager = gct->eager_promotion;
62 gct->eager_promotion = rtsFalse;
63
64 evacuate((StgClosure **)&tso->blocked_exceptions);
65 evacuate((StgClosure **)&tso->bq);
66
67 // scavange current transaction record
68 evacuate((StgClosure **)&tso->trec);
69
70 evacuate((StgClosure **)&tso->stackobj);
71
72 evacuate((StgClosure **)&tso->_link);
73 if ( tso->why_blocked == BlockedOnMVar
74 || tso->why_blocked == BlockedOnMVarRead
75 || tso->why_blocked == BlockedOnBlackHole
76 || tso->why_blocked == BlockedOnMsgThrowTo
77 || tso->why_blocked == NotBlocked
78 ) {
79 evacuate(&tso->block_info.closure);
80 }
81 #ifdef THREADED_RTS
82 // in the THREADED_RTS, block_info.closure must always point to a
83 // valid closure, because we assume this in throwTo(). In the
84 // non-threaded RTS it might be a FD (for
85 // BlockedOnRead/BlockedOnWrite) or a time value (BlockedOnDelay)
86 else {
87 tso->block_info.closure = (StgClosure *)END_TSO_QUEUE;
88 }
89 #endif
90
91 tso->dirty = gct->failed_to_evac;
92
93 gct->eager_promotion = saved_eager;
94 }
95
96 /* -----------------------------------------------------------------------------
97 Mutable arrays of pointers
98 -------------------------------------------------------------------------- */
99
100 static StgPtr scavenge_mut_arr_ptrs (StgMutArrPtrs *a)
101 {
102 W_ m;
103 rtsBool any_failed;
104 StgPtr p, q;
105
106 any_failed = rtsFalse;
107 p = (StgPtr)&a->payload[0];
108 for (m = 0; (int)m < (int)mutArrPtrsCards(a->ptrs) - 1; m++)
109 {
110 q = p + (1 << MUT_ARR_PTRS_CARD_BITS);
111 for (; p < q; p++) {
112 evacuate((StgClosure**)p);
113 }
114 if (gct->failed_to_evac) {
115 any_failed = rtsTrue;
116 *mutArrPtrsCard(a,m) = 1;
117 gct->failed_to_evac = rtsFalse;
118 } else {
119 *mutArrPtrsCard(a,m) = 0;
120 }
121 }
122
123 q = (StgPtr)&a->payload[a->ptrs];
124 if (p < q) {
125 for (; p < q; p++) {
126 evacuate((StgClosure**)p);
127 }
128 if (gct->failed_to_evac) {
129 any_failed = rtsTrue;
130 *mutArrPtrsCard(a,m) = 1;
131 gct->failed_to_evac = rtsFalse;
132 } else {
133 *mutArrPtrsCard(a,m) = 0;
134 }
135 }
136
137 gct->failed_to_evac = any_failed;
138 return (StgPtr)a + mut_arr_ptrs_sizeW(a);
139 }
140
141 // scavenge only the marked areas of a MUT_ARR_PTRS
142 static StgPtr scavenge_mut_arr_ptrs_marked (StgMutArrPtrs *a)
143 {
144 W_ m;
145 StgPtr p, q;
146 rtsBool any_failed;
147
148 any_failed = rtsFalse;
149 for (m = 0; m < mutArrPtrsCards(a->ptrs); m++)
150 {
151 if (*mutArrPtrsCard(a,m) != 0) {
152 p = (StgPtr)&a->payload[m << MUT_ARR_PTRS_CARD_BITS];
153 q = stg_min(p + (1 << MUT_ARR_PTRS_CARD_BITS),
154 (StgPtr)&a->payload[a->ptrs]);
155 for (; p < q; p++) {
156 evacuate((StgClosure**)p);
157 }
158 if (gct->failed_to_evac) {
159 any_failed = rtsTrue;
160 gct->failed_to_evac = rtsFalse;
161 } else {
162 *mutArrPtrsCard(a,m) = 0;
163 }
164 }
165 }
166
167 gct->failed_to_evac = any_failed;
168 return (StgPtr)a + mut_arr_ptrs_sizeW(a);
169 }
170
171 STATIC_INLINE StgPtr
172 scavenge_small_bitmap (StgPtr p, StgWord size, StgWord bitmap)
173 {
174 while (size > 0) {
175 if ((bitmap & 1) == 0) {
176 evacuate((StgClosure **)p);
177 }
178 p++;
179 bitmap = bitmap >> 1;
180 size--;
181 }
182 return p;
183 }
184
185 /* -----------------------------------------------------------------------------
186 Blocks of function args occur on the stack (at the top) and
187 in PAPs.
188 -------------------------------------------------------------------------- */
189
190 STATIC_INLINE StgPtr
191 scavenge_arg_block (StgFunInfoTable *fun_info, StgClosure **args)
192 {
193 StgPtr p;
194 StgWord bitmap;
195 StgWord size;
196
197 p = (StgPtr)args;
198 switch (fun_info->f.fun_type) {
199 case ARG_GEN:
200 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
201 size = BITMAP_SIZE(fun_info->f.b.bitmap);
202 goto small_bitmap;
203 case ARG_GEN_BIG:
204 size = GET_FUN_LARGE_BITMAP(fun_info)->size;
205 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
206 p += size;
207 break;
208 default:
209 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
210 size = BITMAP_SIZE(stg_arg_bitmaps[fun_info->f.fun_type]);
211 small_bitmap:
212 p = scavenge_small_bitmap(p, size, bitmap);
213 break;
214 }
215 return p;
216 }
217
218 STATIC_INLINE GNUC_ATTR_HOT StgPtr
219 scavenge_PAP_payload (StgClosure *fun, StgClosure **payload, StgWord size)
220 {
221 StgPtr p;
222 StgWord bitmap;
223 StgFunInfoTable *fun_info;
224
225 fun_info = get_fun_itbl(UNTAG_CLOSURE(fun));
226 ASSERT(fun_info->i.type != PAP);
227 p = (StgPtr)payload;
228
229 switch (fun_info->f.fun_type) {
230 case ARG_GEN:
231 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
232 goto small_bitmap;
233 case ARG_GEN_BIG:
234 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
235 p += size;
236 break;
237 case ARG_BCO:
238 scavenge_large_bitmap((StgPtr)payload, BCO_BITMAP(fun), size);
239 p += size;
240 break;
241 default:
242 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
243 small_bitmap:
244 p = scavenge_small_bitmap(p, size, bitmap);
245 break;
246 }
247 return p;
248 }
249
250 STATIC_INLINE GNUC_ATTR_HOT StgPtr
251 scavenge_PAP (StgPAP *pap)
252 {
253 evacuate(&pap->fun);
254 return scavenge_PAP_payload (pap->fun, pap->payload, pap->n_args);
255 }
256
257 STATIC_INLINE StgPtr
258 scavenge_AP (StgAP *ap)
259 {
260 evacuate(&ap->fun);
261 return scavenge_PAP_payload (ap->fun, ap->payload, ap->n_args);
262 }
263
264 /* -----------------------------------------------------------------------------
265 Scavenge SRTs
266 -------------------------------------------------------------------------- */
267
268 /* Similar to scavenge_large_bitmap(), but we don't write back the
269 * pointers we get back from evacuate().
270 */
271 static void
272 scavenge_large_srt_bitmap( StgLargeSRT *large_srt )
273 {
274 nat i, b, size;
275 StgWord bitmap;
276 StgClosure **p;
277
278 b = 0;
279 bitmap = large_srt->l.bitmap[b];
280 size = (nat)large_srt->l.size;
281 p = (StgClosure **)large_srt->srt;
282 for (i = 0; i < size; ) {
283 if ((bitmap & 1) != 0) {
284 evacuate(p);
285 }
286 i++;
287 p++;
288 if (i % BITS_IN(W_) == 0) {
289 b++;
290 bitmap = large_srt->l.bitmap[b];
291 } else {
292 bitmap = bitmap >> 1;
293 }
294 }
295 }
296
297 /* evacuate the SRT. If srt_bitmap is zero, then there isn't an
298 * srt field in the info table. That's ok, because we'll
299 * never dereference it.
300 */
301 STATIC_INLINE GNUC_ATTR_HOT void
302 scavenge_srt (StgClosure **srt, nat srt_bitmap)
303 {
304 nat bitmap;
305 StgClosure **p;
306
307 bitmap = srt_bitmap;
308 p = srt;
309
310 if (bitmap == (StgHalfWord)(-1)) {
311 scavenge_large_srt_bitmap( (StgLargeSRT *)srt );
312 return;
313 }
314
315 while (bitmap != 0) {
316 if ((bitmap & 1) != 0) {
317 #if defined(COMPILING_WINDOWS_DLL)
318 // Special-case to handle references to closures hiding out in DLLs, since
319 // double indirections required to get at those. The code generator knows
320 // which is which when generating the SRT, so it stores the (indirect)
321 // reference to the DLL closure in the table by first adding one to it.
322 // We check for this here, and undo the addition before evacuating it.
323 //
324 // If the SRT entry hasn't got bit 0 set, the SRT entry points to a
325 // closure that's fixed at link-time, and no extra magic is required.
326 if ( (W_)(*srt) & 0x1 ) {
327 evacuate( (StgClosure**) ((W_) (*srt) & ~0x1));
328 } else {
329 evacuate(p);
330 }
331 #else
332 evacuate(p);
333 #endif
334 }
335 p++;
336 bitmap = bitmap >> 1;
337 }
338 }
339
340
341 STATIC_INLINE GNUC_ATTR_HOT void
342 scavenge_thunk_srt(const StgInfoTable *info)
343 {
344 StgThunkInfoTable *thunk_info;
345
346 if (!major_gc) return;
347
348 thunk_info = itbl_to_thunk_itbl(info);
349 scavenge_srt((StgClosure **)GET_SRT(thunk_info), thunk_info->i.srt_bitmap);
350 }
351
352 STATIC_INLINE GNUC_ATTR_HOT void
353 scavenge_fun_srt(const StgInfoTable *info)
354 {
355 StgFunInfoTable *fun_info;
356
357 if (!major_gc) return;
358
359 fun_info = itbl_to_fun_itbl(info);
360 scavenge_srt((StgClosure **)GET_FUN_SRT(fun_info), fun_info->i.srt_bitmap);
361 }
362
363 /* -----------------------------------------------------------------------------
364 Scavenge a block from the given scan pointer up to bd->free.
365
366 evac_gen_no is set by the caller to be either zero (for a step in a
367 generation < N) or G where G is the generation of the step being
368 scavenged.
369
370 We sometimes temporarily change evac_gen_no back to zero if we're
371 scavenging a mutable object where eager promotion isn't such a good
372 idea.
373 -------------------------------------------------------------------------- */
374
375 static GNUC_ATTR_HOT void
376 scavenge_block (bdescr *bd)
377 {
378 StgPtr p, q;
379 StgInfoTable *info;
380 rtsBool saved_eager_promotion;
381 gen_workspace *ws;
382
383 debugTrace(DEBUG_gc, "scavenging block %p (gen %d) @ %p",
384 bd->start, bd->gen_no, bd->u.scan);
385
386 gct->scan_bd = bd;
387 gct->evac_gen_no = bd->gen_no;
388 saved_eager_promotion = gct->eager_promotion;
389 gct->failed_to_evac = rtsFalse;
390
391 ws = &gct->gens[bd->gen->no];
392
393 p = bd->u.scan;
394
395 // we might be evacuating into the very object that we're
396 // scavenging, so we have to check the real bd->free pointer each
397 // time around the loop.
398 while (p < bd->free || (bd == ws->todo_bd && p < ws->todo_free)) {
399
400 ASSERT(bd->link == NULL);
401 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
402 info = get_itbl((StgClosure *)p);
403
404 ASSERT(gct->thunk_selector_depth == 0);
405
406 q = p;
407 switch (info->type) {
408
409 case MVAR_CLEAN:
410 case MVAR_DIRTY:
411 {
412 StgMVar *mvar = ((StgMVar *)p);
413 gct->eager_promotion = rtsFalse;
414 evacuate((StgClosure **)&mvar->head);
415 evacuate((StgClosure **)&mvar->tail);
416 evacuate((StgClosure **)&mvar->value);
417 gct->eager_promotion = saved_eager_promotion;
418
419 if (gct->failed_to_evac) {
420 mvar->header.info = &stg_MVAR_DIRTY_info;
421 } else {
422 mvar->header.info = &stg_MVAR_CLEAN_info;
423 }
424 p += sizeofW(StgMVar);
425 break;
426 }
427
428 case TVAR:
429 {
430 StgTVar *tvar = ((StgTVar *)p);
431 gct->eager_promotion = rtsFalse;
432 evacuate((StgClosure **)&tvar->current_value);
433 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
434 gct->eager_promotion = saved_eager_promotion;
435
436 if (gct->failed_to_evac) {
437 tvar->header.info = &stg_TVAR_DIRTY_info;
438 } else {
439 tvar->header.info = &stg_TVAR_CLEAN_info;
440 }
441 p += sizeofW(StgTVar);
442 break;
443 }
444
445 case FUN_2_0:
446 scavenge_fun_srt(info);
447 evacuate(&((StgClosure *)p)->payload[1]);
448 evacuate(&((StgClosure *)p)->payload[0]);
449 p += sizeofW(StgHeader) + 2;
450 break;
451
452 case THUNK_2_0:
453 scavenge_thunk_srt(info);
454 evacuate(&((StgThunk *)p)->payload[1]);
455 evacuate(&((StgThunk *)p)->payload[0]);
456 p += sizeofW(StgThunk) + 2;
457 break;
458
459 case CONSTR_2_0:
460 evacuate(&((StgClosure *)p)->payload[1]);
461 evacuate(&((StgClosure *)p)->payload[0]);
462 p += sizeofW(StgHeader) + 2;
463 break;
464
465 case THUNK_1_0:
466 scavenge_thunk_srt(info);
467 evacuate(&((StgThunk *)p)->payload[0]);
468 p += sizeofW(StgThunk) + 1;
469 break;
470
471 case FUN_1_0:
472 scavenge_fun_srt(info);
473 case CONSTR_1_0:
474 evacuate(&((StgClosure *)p)->payload[0]);
475 p += sizeofW(StgHeader) + 1;
476 break;
477
478 case THUNK_0_1:
479 scavenge_thunk_srt(info);
480 p += sizeofW(StgThunk) + 1;
481 break;
482
483 case FUN_0_1:
484 scavenge_fun_srt(info);
485 case CONSTR_0_1:
486 p += sizeofW(StgHeader) + 1;
487 break;
488
489 case THUNK_0_2:
490 scavenge_thunk_srt(info);
491 p += sizeofW(StgThunk) + 2;
492 break;
493
494 case FUN_0_2:
495 scavenge_fun_srt(info);
496 case CONSTR_0_2:
497 p += sizeofW(StgHeader) + 2;
498 break;
499
500 case THUNK_1_1:
501 scavenge_thunk_srt(info);
502 evacuate(&((StgThunk *)p)->payload[0]);
503 p += sizeofW(StgThunk) + 2;
504 break;
505
506 case FUN_1_1:
507 scavenge_fun_srt(info);
508 case CONSTR_1_1:
509 evacuate(&((StgClosure *)p)->payload[0]);
510 p += sizeofW(StgHeader) + 2;
511 break;
512
513 case FUN:
514 scavenge_fun_srt(info);
515 goto gen_obj;
516
517 case THUNK:
518 {
519 StgPtr end;
520
521 scavenge_thunk_srt(info);
522 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
523 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
524 evacuate((StgClosure **)p);
525 }
526 p += info->layout.payload.nptrs;
527 break;
528 }
529
530 gen_obj:
531 case CONSTR:
532 case WEAK:
533 case PRIM:
534 {
535 StgPtr end;
536
537 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
538 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
539 evacuate((StgClosure **)p);
540 }
541 p += info->layout.payload.nptrs;
542 break;
543 }
544
545 case BCO: {
546 StgBCO *bco = (StgBCO *)p;
547 evacuate((StgClosure **)&bco->instrs);
548 evacuate((StgClosure **)&bco->literals);
549 evacuate((StgClosure **)&bco->ptrs);
550 p += bco_sizeW(bco);
551 break;
552 }
553
554 case IND_PERM:
555 case BLACKHOLE:
556 evacuate(&((StgInd *)p)->indirectee);
557 p += sizeofW(StgInd);
558 break;
559
560 case MUT_VAR_CLEAN:
561 case MUT_VAR_DIRTY:
562 gct->eager_promotion = rtsFalse;
563 evacuate(&((StgMutVar *)p)->var);
564 gct->eager_promotion = saved_eager_promotion;
565
566 if (gct->failed_to_evac) {
567 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
568 } else {
569 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
570 }
571 p += sizeofW(StgMutVar);
572 break;
573
574 case BLOCKING_QUEUE:
575 {
576 StgBlockingQueue *bq = (StgBlockingQueue *)p;
577
578 gct->eager_promotion = rtsFalse;
579 evacuate(&bq->bh);
580 evacuate((StgClosure**)&bq->owner);
581 evacuate((StgClosure**)&bq->queue);
582 evacuate((StgClosure**)&bq->link);
583 gct->eager_promotion = saved_eager_promotion;
584
585 if (gct->failed_to_evac) {
586 bq->header.info = &stg_BLOCKING_QUEUE_DIRTY_info;
587 } else {
588 bq->header.info = &stg_BLOCKING_QUEUE_CLEAN_info;
589 }
590 p += sizeofW(StgBlockingQueue);
591 break;
592 }
593
594 case THUNK_SELECTOR:
595 {
596 StgSelector *s = (StgSelector *)p;
597 evacuate(&s->selectee);
598 p += THUNK_SELECTOR_sizeW();
599 break;
600 }
601
602 // A chunk of stack saved in a heap object
603 case AP_STACK:
604 {
605 StgAP_STACK *ap = (StgAP_STACK *)p;
606
607 evacuate(&ap->fun);
608 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
609 p = (StgPtr)ap->payload + ap->size;
610 break;
611 }
612
613 case PAP:
614 p = scavenge_PAP((StgPAP *)p);
615 break;
616
617 case AP:
618 p = scavenge_AP((StgAP *)p);
619 break;
620
621 case ARR_WORDS:
622 // nothing to follow
623 p += arr_words_sizeW((StgArrWords *)p);
624 break;
625
626 case MUT_ARR_PTRS_CLEAN:
627 case MUT_ARR_PTRS_DIRTY:
628 {
629 // We don't eagerly promote objects pointed to by a mutable
630 // array, but if we find the array only points to objects in
631 // the same or an older generation, we mark it "clean" and
632 // avoid traversing it during minor GCs.
633 gct->eager_promotion = rtsFalse;
634
635 p = scavenge_mut_arr_ptrs((StgMutArrPtrs*)p);
636
637 if (gct->failed_to_evac) {
638 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
639 } else {
640 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
641 }
642
643 gct->eager_promotion = saved_eager_promotion;
644 gct->failed_to_evac = rtsTrue; // always put it on the mutable list.
645 break;
646 }
647
648 case MUT_ARR_PTRS_FROZEN:
649 case MUT_ARR_PTRS_FROZEN0:
650 // follow everything
651 {
652 p = scavenge_mut_arr_ptrs((StgMutArrPtrs*)p);
653
654 // If we're going to put this object on the mutable list, then
655 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
656 if (gct->failed_to_evac) {
657 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
658 } else {
659 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
660 }
661 break;
662 }
663
664 case SMALL_MUT_ARR_PTRS_CLEAN:
665 case SMALL_MUT_ARR_PTRS_DIRTY:
666 // follow everything
667 {
668 StgPtr next;
669
670 // We don't eagerly promote objects pointed to by a mutable
671 // array, but if we find the array only points to objects in
672 // the same or an older generation, we mark it "clean" and
673 // avoid traversing it during minor GCs.
674 gct->eager_promotion = rtsFalse;
675 next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
676 for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
677 evacuate((StgClosure **)p);
678 }
679 gct->eager_promotion = saved_eager_promotion;
680
681 if (gct->failed_to_evac) {
682 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_DIRTY_info;
683 } else {
684 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_CLEAN_info;
685 }
686
687 gct->failed_to_evac = rtsTrue; // always put it on the mutable list.
688 break;
689 }
690
691 case SMALL_MUT_ARR_PTRS_FROZEN:
692 case SMALL_MUT_ARR_PTRS_FROZEN0:
693 // follow everything
694 {
695 StgPtr next;
696
697 next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
698 for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
699 evacuate((StgClosure **)p);
700 }
701
702 // If we're going to put this object on the mutable list, then
703 // set its info ptr to SMALL_MUT_ARR_PTRS_FROZEN0 to indicate that.
704 if (gct->failed_to_evac) {
705 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN0_info;
706 } else {
707 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN_info;
708 }
709 break;
710 }
711
712 case TSO:
713 {
714 scavengeTSO((StgTSO *)p);
715 p += sizeofW(StgTSO);
716 break;
717 }
718
719 case STACK:
720 {
721 StgStack *stack = (StgStack*)p;
722
723 gct->eager_promotion = rtsFalse;
724
725 scavenge_stack(stack->sp, stack->stack + stack->stack_size);
726 stack->dirty = gct->failed_to_evac;
727 p += stack_sizeW(stack);
728
729 gct->eager_promotion = saved_eager_promotion;
730 break;
731 }
732
733 case MUT_PRIM:
734 {
735 StgPtr end;
736
737 gct->eager_promotion = rtsFalse;
738
739 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
740 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
741 evacuate((StgClosure **)p);
742 }
743 p += info->layout.payload.nptrs;
744
745 gct->eager_promotion = saved_eager_promotion;
746 gct->failed_to_evac = rtsTrue; // mutable
747 break;
748 }
749
750 case TREC_CHUNK:
751 {
752 StgWord i;
753 StgTRecChunk *tc = ((StgTRecChunk *) p);
754 TRecEntry *e = &(tc -> entries[0]);
755 gct->eager_promotion = rtsFalse;
756 evacuate((StgClosure **)&tc->prev_chunk);
757 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
758 evacuate((StgClosure **)&e->tvar);
759 evacuate((StgClosure **)&e->expected_value);
760 evacuate((StgClosure **)&e->new_value);
761 }
762 gct->eager_promotion = saved_eager_promotion;
763 gct->failed_to_evac = rtsTrue; // mutable
764 p += sizeofW(StgTRecChunk);
765 break;
766 }
767
768 default:
769 barf("scavenge: unimplemented/strange closure type %d @ %p",
770 info->type, p);
771 }
772
773 /*
774 * We need to record the current object on the mutable list if
775 * (a) It is actually mutable, or
776 * (b) It contains pointers to a younger generation.
777 * Case (b) arises if we didn't manage to promote everything that
778 * the current object points to into the current generation.
779 */
780 if (gct->failed_to_evac) {
781 gct->failed_to_evac = rtsFalse;
782 if (bd->gen_no > 0) {
783 recordMutableGen_GC((StgClosure *)q, bd->gen_no);
784 }
785 }
786 }
787
788 if (p > bd->free) {
789 gct->copied += ws->todo_free - bd->free;
790 bd->free = p;
791 }
792
793 debugTrace(DEBUG_gc, " scavenged %ld bytes",
794 (unsigned long)((bd->free - bd->u.scan) * sizeof(W_)));
795
796 // update stats: this is a block that has been scavenged
797 gct->scanned += bd->free - bd->u.scan;
798 bd->u.scan = bd->free;
799
800 if (bd != ws->todo_bd) {
801 // we're not going to evac any more objects into
802 // this block, so push it now.
803 push_scanned_block(bd, ws);
804 }
805
806 gct->scan_bd = NULL;
807 }
808 /* -----------------------------------------------------------------------------
809 Scavenge everything on the mark stack.
810
811 This is slightly different from scavenge():
812 - we don't walk linearly through the objects, so the scavenger
813 doesn't need to advance the pointer on to the next object.
814 -------------------------------------------------------------------------- */
815
816 static void
817 scavenge_mark_stack(void)
818 {
819 StgPtr p, q;
820 StgInfoTable *info;
821 rtsBool saved_eager_promotion;
822
823 gct->evac_gen_no = oldest_gen->no;
824 saved_eager_promotion = gct->eager_promotion;
825
826 while ((p = pop_mark_stack())) {
827
828 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
829 info = get_itbl((StgClosure *)p);
830
831 q = p;
832 switch (info->type) {
833
834 case MVAR_CLEAN:
835 case MVAR_DIRTY:
836 {
837 StgMVar *mvar = ((StgMVar *)p);
838 gct->eager_promotion = rtsFalse;
839 evacuate((StgClosure **)&mvar->head);
840 evacuate((StgClosure **)&mvar->tail);
841 evacuate((StgClosure **)&mvar->value);
842 gct->eager_promotion = saved_eager_promotion;
843
844 if (gct->failed_to_evac) {
845 mvar->header.info = &stg_MVAR_DIRTY_info;
846 } else {
847 mvar->header.info = &stg_MVAR_CLEAN_info;
848 }
849 break;
850 }
851
852 case TVAR:
853 {
854 StgTVar *tvar = ((StgTVar *)p);
855 gct->eager_promotion = rtsFalse;
856 evacuate((StgClosure **)&tvar->current_value);
857 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
858 gct->eager_promotion = saved_eager_promotion;
859
860 if (gct->failed_to_evac) {
861 tvar->header.info = &stg_TVAR_DIRTY_info;
862 } else {
863 tvar->header.info = &stg_TVAR_CLEAN_info;
864 }
865 break;
866 }
867
868 case FUN_2_0:
869 scavenge_fun_srt(info);
870 evacuate(&((StgClosure *)p)->payload[1]);
871 evacuate(&((StgClosure *)p)->payload[0]);
872 break;
873
874 case THUNK_2_0:
875 scavenge_thunk_srt(info);
876 evacuate(&((StgThunk *)p)->payload[1]);
877 evacuate(&((StgThunk *)p)->payload[0]);
878 break;
879
880 case CONSTR_2_0:
881 evacuate(&((StgClosure *)p)->payload[1]);
882 evacuate(&((StgClosure *)p)->payload[0]);
883 break;
884
885 case FUN_1_0:
886 case FUN_1_1:
887 scavenge_fun_srt(info);
888 evacuate(&((StgClosure *)p)->payload[0]);
889 break;
890
891 case THUNK_1_0:
892 case THUNK_1_1:
893 scavenge_thunk_srt(info);
894 evacuate(&((StgThunk *)p)->payload[0]);
895 break;
896
897 case CONSTR_1_0:
898 case CONSTR_1_1:
899 evacuate(&((StgClosure *)p)->payload[0]);
900 break;
901
902 case FUN_0_1:
903 case FUN_0_2:
904 scavenge_fun_srt(info);
905 break;
906
907 case THUNK_0_1:
908 case THUNK_0_2:
909 scavenge_thunk_srt(info);
910 break;
911
912 case CONSTR_0_1:
913 case CONSTR_0_2:
914 break;
915
916 case FUN:
917 scavenge_fun_srt(info);
918 goto gen_obj;
919
920 case THUNK:
921 {
922 StgPtr end;
923
924 scavenge_thunk_srt(info);
925 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
926 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
927 evacuate((StgClosure **)p);
928 }
929 break;
930 }
931
932 gen_obj:
933 case CONSTR:
934 case WEAK:
935 case PRIM:
936 {
937 StgPtr end;
938
939 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
940 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
941 evacuate((StgClosure **)p);
942 }
943 break;
944 }
945
946 case BCO: {
947 StgBCO *bco = (StgBCO *)p;
948 evacuate((StgClosure **)&bco->instrs);
949 evacuate((StgClosure **)&bco->literals);
950 evacuate((StgClosure **)&bco->ptrs);
951 break;
952 }
953
954 case IND_PERM:
955 // don't need to do anything here: the only possible case
956 // is that we're in a 1-space compacting collector, with
957 // no "old" generation.
958 break;
959
960 case IND:
961 case BLACKHOLE:
962 evacuate(&((StgInd *)p)->indirectee);
963 break;
964
965 case MUT_VAR_CLEAN:
966 case MUT_VAR_DIRTY: {
967 gct->eager_promotion = rtsFalse;
968 evacuate(&((StgMutVar *)p)->var);
969 gct->eager_promotion = saved_eager_promotion;
970
971 if (gct->failed_to_evac) {
972 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
973 } else {
974 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
975 }
976 break;
977 }
978
979 case BLOCKING_QUEUE:
980 {
981 StgBlockingQueue *bq = (StgBlockingQueue *)p;
982
983 gct->eager_promotion = rtsFalse;
984 evacuate(&bq->bh);
985 evacuate((StgClosure**)&bq->owner);
986 evacuate((StgClosure**)&bq->queue);
987 evacuate((StgClosure**)&bq->link);
988 gct->eager_promotion = saved_eager_promotion;
989
990 if (gct->failed_to_evac) {
991 bq->header.info = &stg_BLOCKING_QUEUE_DIRTY_info;
992 } else {
993 bq->header.info = &stg_BLOCKING_QUEUE_CLEAN_info;
994 }
995 break;
996 }
997
998 case ARR_WORDS:
999 break;
1000
1001 case THUNK_SELECTOR:
1002 {
1003 StgSelector *s = (StgSelector *)p;
1004 evacuate(&s->selectee);
1005 break;
1006 }
1007
1008 // A chunk of stack saved in a heap object
1009 case AP_STACK:
1010 {
1011 StgAP_STACK *ap = (StgAP_STACK *)p;
1012
1013 evacuate(&ap->fun);
1014 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
1015 break;
1016 }
1017
1018 case PAP:
1019 scavenge_PAP((StgPAP *)p);
1020 break;
1021
1022 case AP:
1023 scavenge_AP((StgAP *)p);
1024 break;
1025
1026 case MUT_ARR_PTRS_CLEAN:
1027 case MUT_ARR_PTRS_DIRTY:
1028 // follow everything
1029 {
1030 // We don't eagerly promote objects pointed to by a mutable
1031 // array, but if we find the array only points to objects in
1032 // the same or an older generation, we mark it "clean" and
1033 // avoid traversing it during minor GCs.
1034 gct->eager_promotion = rtsFalse;
1035
1036 scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
1037
1038 if (gct->failed_to_evac) {
1039 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
1040 } else {
1041 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
1042 }
1043
1044 gct->eager_promotion = saved_eager_promotion;
1045 gct->failed_to_evac = rtsTrue; // mutable anyhow.
1046 break;
1047 }
1048
1049 case MUT_ARR_PTRS_FROZEN:
1050 case MUT_ARR_PTRS_FROZEN0:
1051 // follow everything
1052 {
1053 StgPtr q = p;
1054
1055 scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
1056
1057 // If we're going to put this object on the mutable list, then
1058 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
1059 if (gct->failed_to_evac) {
1060 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
1061 } else {
1062 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
1063 }
1064 break;
1065 }
1066
1067 case SMALL_MUT_ARR_PTRS_CLEAN:
1068 case SMALL_MUT_ARR_PTRS_DIRTY:
1069 // follow everything
1070 {
1071 StgPtr next;
1072 rtsBool saved_eager;
1073
1074 // We don't eagerly promote objects pointed to by a mutable
1075 // array, but if we find the array only points to objects in
1076 // the same or an older generation, we mark it "clean" and
1077 // avoid traversing it during minor GCs.
1078 saved_eager = gct->eager_promotion;
1079 gct->eager_promotion = rtsFalse;
1080 next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
1081 for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
1082 evacuate((StgClosure **)p);
1083 }
1084 gct->eager_promotion = saved_eager;
1085
1086 if (gct->failed_to_evac) {
1087 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_DIRTY_info;
1088 } else {
1089 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_CLEAN_info;
1090 }
1091
1092 gct->failed_to_evac = rtsTrue; // mutable anyhow.
1093 break;
1094 }
1095
1096 case SMALL_MUT_ARR_PTRS_FROZEN:
1097 case SMALL_MUT_ARR_PTRS_FROZEN0:
1098 // follow everything
1099 {
1100 StgPtr next, q = p;
1101
1102 next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
1103 for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
1104 evacuate((StgClosure **)p);
1105 }
1106
1107 // If we're going to put this object on the mutable list, then
1108 // set its info ptr to SMALL_MUT_ARR_PTRS_FROZEN0 to indicate that.
1109 if (gct->failed_to_evac) {
1110 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN0_info;
1111 } else {
1112 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN_info;
1113 }
1114 break;
1115 }
1116
1117 case TSO:
1118 {
1119 scavengeTSO((StgTSO*)p);
1120 break;
1121 }
1122
1123 case STACK:
1124 {
1125 StgStack *stack = (StgStack*)p;
1126
1127 gct->eager_promotion = rtsFalse;
1128
1129 scavenge_stack(stack->sp, stack->stack + stack->stack_size);
1130 stack->dirty = gct->failed_to_evac;
1131
1132 gct->eager_promotion = saved_eager_promotion;
1133 break;
1134 }
1135
1136 case MUT_PRIM:
1137 {
1138 StgPtr end;
1139
1140 gct->eager_promotion = rtsFalse;
1141
1142 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1143 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
1144 evacuate((StgClosure **)p);
1145 }
1146
1147 gct->eager_promotion = saved_eager_promotion;
1148 gct->failed_to_evac = rtsTrue; // mutable
1149 break;
1150 }
1151
1152 case TREC_CHUNK:
1153 {
1154 StgWord i;
1155 StgTRecChunk *tc = ((StgTRecChunk *) p);
1156 TRecEntry *e = &(tc -> entries[0]);
1157 gct->eager_promotion = rtsFalse;
1158 evacuate((StgClosure **)&tc->prev_chunk);
1159 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
1160 evacuate((StgClosure **)&e->tvar);
1161 evacuate((StgClosure **)&e->expected_value);
1162 evacuate((StgClosure **)&e->new_value);
1163 }
1164 gct->eager_promotion = saved_eager_promotion;
1165 gct->failed_to_evac = rtsTrue; // mutable
1166 break;
1167 }
1168
1169 default:
1170 barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p",
1171 info->type, p);
1172 }
1173
1174 if (gct->failed_to_evac) {
1175 gct->failed_to_evac = rtsFalse;
1176 if (gct->evac_gen_no) {
1177 recordMutableGen_GC((StgClosure *)q, gct->evac_gen_no);
1178 }
1179 }
1180 } // while (p = pop_mark_stack())
1181 }
1182
1183 /* -----------------------------------------------------------------------------
1184 Scavenge one object.
1185
1186 This is used for objects that are temporarily marked as mutable
1187 because they contain old-to-new generation pointers. Only certain
1188 objects can have this property.
1189 -------------------------------------------------------------------------- */
1190
1191 static rtsBool
1192 scavenge_one(StgPtr p)
1193 {
1194 const StgInfoTable *info;
1195 rtsBool no_luck;
1196 rtsBool saved_eager_promotion;
1197
1198 saved_eager_promotion = gct->eager_promotion;
1199
1200 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1201 info = get_itbl((StgClosure *)p);
1202
1203 switch (info->type) {
1204
1205 case MVAR_CLEAN:
1206 case MVAR_DIRTY:
1207 {
1208 StgMVar *mvar = ((StgMVar *)p);
1209 gct->eager_promotion = rtsFalse;
1210 evacuate((StgClosure **)&mvar->head);
1211 evacuate((StgClosure **)&mvar->tail);
1212 evacuate((StgClosure **)&mvar->value);
1213 gct->eager_promotion = saved_eager_promotion;
1214
1215 if (gct->failed_to_evac) {
1216 mvar->header.info = &stg_MVAR_DIRTY_info;
1217 } else {
1218 mvar->header.info = &stg_MVAR_CLEAN_info;
1219 }
1220 break;
1221 }
1222
1223 case TVAR:
1224 {
1225 StgTVar *tvar = ((StgTVar *)p);
1226 gct->eager_promotion = rtsFalse;
1227 evacuate((StgClosure **)&tvar->current_value);
1228 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
1229 gct->eager_promotion = saved_eager_promotion;
1230
1231 if (gct->failed_to_evac) {
1232 tvar->header.info = &stg_TVAR_DIRTY_info;
1233 } else {
1234 tvar->header.info = &stg_TVAR_CLEAN_info;
1235 }
1236 break;
1237 }
1238
1239 case THUNK:
1240 case THUNK_1_0:
1241 case THUNK_0_1:
1242 case THUNK_1_1:
1243 case THUNK_0_2:
1244 case THUNK_2_0:
1245 {
1246 StgPtr q, end;
1247
1248 end = (StgPtr)((StgThunk *)p)->payload + info->layout.payload.ptrs;
1249 for (q = (StgPtr)((StgThunk *)p)->payload; q < end; q++) {
1250 evacuate((StgClosure **)q);
1251 }
1252 break;
1253 }
1254
1255 case FUN:
1256 case FUN_1_0: // hardly worth specialising these guys
1257 case FUN_0_1:
1258 case FUN_1_1:
1259 case FUN_0_2:
1260 case FUN_2_0:
1261 case CONSTR:
1262 case CONSTR_1_0:
1263 case CONSTR_0_1:
1264 case CONSTR_1_1:
1265 case CONSTR_0_2:
1266 case CONSTR_2_0:
1267 case WEAK:
1268 case PRIM:
1269 case IND_PERM:
1270 {
1271 StgPtr q, end;
1272
1273 end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1274 for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) {
1275 evacuate((StgClosure **)q);
1276 }
1277 break;
1278 }
1279
1280 case MUT_VAR_CLEAN:
1281 case MUT_VAR_DIRTY: {
1282 StgPtr q = p;
1283
1284 gct->eager_promotion = rtsFalse;
1285 evacuate(&((StgMutVar *)p)->var);
1286 gct->eager_promotion = saved_eager_promotion;
1287
1288 if (gct->failed_to_evac) {
1289 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
1290 } else {
1291 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
1292 }
1293 break;
1294 }
1295
1296 case BLOCKING_QUEUE:
1297 {
1298 StgBlockingQueue *bq = (StgBlockingQueue *)p;
1299
1300 gct->eager_promotion = rtsFalse;
1301 evacuate(&bq->bh);
1302 evacuate((StgClosure**)&bq->owner);
1303 evacuate((StgClosure**)&bq->queue);
1304 evacuate((StgClosure**)&bq->link);
1305 gct->eager_promotion = saved_eager_promotion;
1306
1307 if (gct->failed_to_evac) {
1308 bq->header.info = &stg_BLOCKING_QUEUE_DIRTY_info;
1309 } else {
1310 bq->header.info = &stg_BLOCKING_QUEUE_CLEAN_info;
1311 }
1312 break;
1313 }
1314
1315 case THUNK_SELECTOR:
1316 {
1317 StgSelector *s = (StgSelector *)p;
1318 evacuate(&s->selectee);
1319 break;
1320 }
1321
1322 case AP_STACK:
1323 {
1324 StgAP_STACK *ap = (StgAP_STACK *)p;
1325
1326 evacuate(&ap->fun);
1327 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
1328 p = (StgPtr)ap->payload + ap->size;
1329 break;
1330 }
1331
1332 case PAP:
1333 p = scavenge_PAP((StgPAP *)p);
1334 break;
1335
1336 case AP:
1337 p = scavenge_AP((StgAP *)p);
1338 break;
1339
1340 case ARR_WORDS:
1341 // nothing to follow
1342 break;
1343
1344 case MUT_ARR_PTRS_CLEAN:
1345 case MUT_ARR_PTRS_DIRTY:
1346 {
1347 // We don't eagerly promote objects pointed to by a mutable
1348 // array, but if we find the array only points to objects in
1349 // the same or an older generation, we mark it "clean" and
1350 // avoid traversing it during minor GCs.
1351 gct->eager_promotion = rtsFalse;
1352
1353 scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
1354
1355 if (gct->failed_to_evac) {
1356 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
1357 } else {
1358 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
1359 }
1360
1361 gct->eager_promotion = saved_eager_promotion;
1362 gct->failed_to_evac = rtsTrue;
1363 break;
1364 }
1365
1366 case MUT_ARR_PTRS_FROZEN:
1367 case MUT_ARR_PTRS_FROZEN0:
1368 {
1369 // follow everything
1370 scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
1371
1372 // If we're going to put this object on the mutable list, then
1373 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
1374 if (gct->failed_to_evac) {
1375 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
1376 } else {
1377 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
1378 }
1379 break;
1380 }
1381
1382 case SMALL_MUT_ARR_PTRS_CLEAN:
1383 case SMALL_MUT_ARR_PTRS_DIRTY:
1384 {
1385 StgPtr next, q;
1386 rtsBool saved_eager;
1387
1388 // We don't eagerly promote objects pointed to by a mutable
1389 // array, but if we find the array only points to objects in
1390 // the same or an older generation, we mark it "clean" and
1391 // avoid traversing it during minor GCs.
1392 saved_eager = gct->eager_promotion;
1393 gct->eager_promotion = rtsFalse;
1394 q = p;
1395 next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
1396 for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
1397 evacuate((StgClosure **)p);
1398 }
1399 gct->eager_promotion = saved_eager;
1400
1401 if (gct->failed_to_evac) {
1402 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_DIRTY_info;
1403 } else {
1404 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_CLEAN_info;
1405 }
1406
1407 gct->failed_to_evac = rtsTrue;
1408 break;
1409 }
1410
1411 case SMALL_MUT_ARR_PTRS_FROZEN:
1412 case SMALL_MUT_ARR_PTRS_FROZEN0:
1413 {
1414 // follow everything
1415 StgPtr next, q=p;
1416
1417 next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
1418 for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
1419 evacuate((StgClosure **)p);
1420 }
1421
1422 // If we're going to put this object on the mutable list, then
1423 // set its info ptr to SMALL_MUT_ARR_PTRS_FROZEN0 to indicate that.
1424 if (gct->failed_to_evac) {
1425 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN0_info;
1426 } else {
1427 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN_info;
1428 }
1429 break;
1430 }
1431
1432 case TSO:
1433 {
1434 scavengeTSO((StgTSO*)p);
1435 break;
1436 }
1437
1438 case STACK:
1439 {
1440 StgStack *stack = (StgStack*)p;
1441
1442 gct->eager_promotion = rtsFalse;
1443
1444 scavenge_stack(stack->sp, stack->stack + stack->stack_size);
1445 stack->dirty = gct->failed_to_evac;
1446
1447 gct->eager_promotion = saved_eager_promotion;
1448 break;
1449 }
1450
1451 case MUT_PRIM:
1452 {
1453 StgPtr end;
1454
1455 gct->eager_promotion = rtsFalse;
1456
1457 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1458 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
1459 evacuate((StgClosure **)p);
1460 }
1461
1462 gct->eager_promotion = saved_eager_promotion;
1463 gct->failed_to_evac = rtsTrue; // mutable
1464 break;
1465
1466 }
1467
1468 case TREC_CHUNK:
1469 {
1470 StgWord i;
1471 StgTRecChunk *tc = ((StgTRecChunk *) p);
1472 TRecEntry *e = &(tc -> entries[0]);
1473 gct->eager_promotion = rtsFalse;
1474 evacuate((StgClosure **)&tc->prev_chunk);
1475 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
1476 evacuate((StgClosure **)&e->tvar);
1477 evacuate((StgClosure **)&e->expected_value);
1478 evacuate((StgClosure **)&e->new_value);
1479 }
1480 gct->eager_promotion = saved_eager_promotion;
1481 gct->failed_to_evac = rtsTrue; // mutable
1482 break;
1483 }
1484
1485 case IND:
1486 // IND can happen, for example, when the interpreter allocates
1487 // a gigantic AP closure (more than one block), which ends up
1488 // on the large-object list and then gets updated. See #3424.
1489 case BLACKHOLE:
1490 case IND_STATIC:
1491 evacuate(&((StgInd *)p)->indirectee);
1492
1493 #if 0 && defined(DEBUG)
1494 if (RtsFlags.DebugFlags.gc)
1495 /* Debugging code to print out the size of the thing we just
1496 * promoted
1497 */
1498 {
1499 StgPtr start = gen->scan;
1500 bdescr *start_bd = gen->scan_bd;
1501 StgWord size = 0;
1502 scavenge(&gen);
1503 if (start_bd != gen->scan_bd) {
1504 size += (P_)BLOCK_ROUND_UP(start) - start;
1505 start_bd = start_bd->link;
1506 while (start_bd != gen->scan_bd) {
1507 size += BLOCK_SIZE_W;
1508 start_bd = start_bd->link;
1509 }
1510 size += gen->scan -
1511 (P_)BLOCK_ROUND_DOWN(gen->scan);
1512 } else {
1513 size = gen->scan - start;
1514 }
1515 debugBelch("evac IND_OLDGEN: %ld bytes", size * sizeof(W_));
1516 }
1517 #endif
1518 break;
1519
1520 default:
1521 barf("scavenge_one: strange object %d", (int)(info->type));
1522 }
1523
1524 no_luck = gct->failed_to_evac;
1525 gct->failed_to_evac = rtsFalse;
1526 return (no_luck);
1527 }
1528
1529 /* -----------------------------------------------------------------------------
1530 Scavenging mutable lists.
1531
1532 We treat the mutable list of each generation > N (i.e. all the
1533 generations older than the one being collected) as roots. We also
1534 remove non-mutable objects from the mutable list at this point.
1535 -------------------------------------------------------------------------- */
1536
1537 void
1538 scavenge_mutable_list(bdescr *bd, generation *gen)
1539 {
1540 StgPtr p, q;
1541 nat gen_no;
1542
1543 gen_no = gen->no;
1544 gct->evac_gen_no = gen_no;
1545 for (; bd != NULL; bd = bd->link) {
1546 for (q = bd->start; q < bd->free; q++) {
1547 p = (StgPtr)*q;
1548 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1549
1550 #ifdef DEBUG
1551 switch (get_itbl((StgClosure *)p)->type) {
1552 case MUT_VAR_CLEAN:
1553 // can happen due to concurrent writeMutVars
1554 case MUT_VAR_DIRTY:
1555 mutlist_MUTVARS++; break;
1556 case MUT_ARR_PTRS_CLEAN:
1557 case MUT_ARR_PTRS_DIRTY:
1558 case MUT_ARR_PTRS_FROZEN:
1559 case MUT_ARR_PTRS_FROZEN0:
1560 mutlist_MUTARRS++; break;
1561 case MVAR_CLEAN:
1562 barf("MVAR_CLEAN on mutable list");
1563 case MVAR_DIRTY:
1564 mutlist_MVARS++; break;
1565 case TVAR:
1566 mutlist_TVAR++; break;
1567 case TREC_CHUNK:
1568 mutlist_TREC_CHUNK++; break;
1569 case MUT_PRIM:
1570 if (((StgClosure*)p)->header.info == &stg_TVAR_WATCH_QUEUE_info)
1571 mutlist_TVAR_WATCH_QUEUE++;
1572 else if (((StgClosure*)p)->header.info == &stg_TREC_HEADER_info)
1573 mutlist_TREC_HEADER++;
1574 else if (((StgClosure*)p)->header.info == &stg_ATOMIC_INVARIANT_info)
1575 mutlist_ATOMIC_INVARIANT++;
1576 else if (((StgClosure*)p)->header.info == &stg_INVARIANT_CHECK_QUEUE_info)
1577 mutlist_INVARIANT_CHECK_QUEUE++;
1578 else
1579 mutlist_OTHERS++;
1580 break;
1581 default:
1582 mutlist_OTHERS++; break;
1583 }
1584 #endif
1585
1586 // Check whether this object is "clean", that is it
1587 // definitely doesn't point into a young generation.
1588 // Clean objects don't need to be scavenged. Some clean
1589 // objects (MUT_VAR_CLEAN) are not kept on the mutable
1590 // list at all; others, such as MUT_ARR_PTRS
1591 // are always on the mutable list.
1592 //
1593 switch (get_itbl((StgClosure *)p)->type) {
1594 case MUT_ARR_PTRS_CLEAN:
1595 recordMutableGen_GC((StgClosure *)p,gen_no);
1596 continue;
1597 case MUT_ARR_PTRS_DIRTY:
1598 {
1599 rtsBool saved_eager_promotion;
1600 saved_eager_promotion = gct->eager_promotion;
1601 gct->eager_promotion = rtsFalse;
1602
1603 scavenge_mut_arr_ptrs_marked((StgMutArrPtrs *)p);
1604
1605 if (gct->failed_to_evac) {
1606 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
1607 } else {
1608 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
1609 }
1610
1611 gct->eager_promotion = saved_eager_promotion;
1612 gct->failed_to_evac = rtsFalse;
1613 recordMutableGen_GC((StgClosure *)p,gen_no);
1614 continue;
1615 }
1616 default:
1617 ;
1618 }
1619
1620 if (scavenge_one(p)) {
1621 // didn't manage to promote everything, so put the
1622 // object back on the list.
1623 recordMutableGen_GC((StgClosure *)p,gen_no);
1624 }
1625 }
1626 }
1627 }
1628
1629 void
1630 scavenge_capability_mut_lists (Capability *cap)
1631 {
1632 nat g;
1633
1634 /* Mutable lists from each generation > N
1635 * we want to *scavenge* these roots, not evacuate them: they're not
1636 * going to move in this GC.
1637 * Also do them in reverse generation order, for the usual reason:
1638 * namely to reduce the likelihood of spurious old->new pointers.
1639 */
1640 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
1641 scavenge_mutable_list(cap->saved_mut_lists[g], &generations[g]);
1642 freeChain_sync(cap->saved_mut_lists[g]);
1643 cap->saved_mut_lists[g] = NULL;
1644 }
1645 }
1646
1647 /* -----------------------------------------------------------------------------
1648 Scavenging the static objects.
1649
1650 We treat the mutable list of each generation > N (i.e. all the
1651 generations older than the one being collected) as roots. We also
1652 remove non-mutable objects from the mutable list at this point.
1653 -------------------------------------------------------------------------- */
1654
1655 static void
1656 scavenge_static(void)
1657 {
1658 StgClosure* p;
1659 const StgInfoTable *info;
1660
1661 debugTrace(DEBUG_gc, "scavenging static objects");
1662
1663 /* Always evacuate straight to the oldest generation for static
1664 * objects */
1665 gct->evac_gen_no = oldest_gen->no;
1666
1667 /* keep going until we've scavenged all the objects on the linked
1668 list... */
1669
1670 while (1) {
1671
1672 /* get the next static object from the list. Remember, there might
1673 * be more stuff on this list after each evacuation...
1674 * (static_objects is a global)
1675 */
1676 p = gct->static_objects;
1677 if (p == END_OF_STATIC_LIST) {
1678 break;
1679 }
1680
1681 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1682 info = get_itbl(p);
1683 /*
1684 if (info->type==RBH)
1685 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
1686 */
1687 // make sure the info pointer is into text space
1688
1689 /* Take this object *off* the static_objects list,
1690 * and put it on the scavenged_static_objects list.
1691 */
1692 gct->static_objects = *STATIC_LINK(info,p);
1693 *STATIC_LINK(info,p) = gct->scavenged_static_objects;
1694 gct->scavenged_static_objects = p;
1695
1696 switch (info -> type) {
1697
1698 case IND_STATIC:
1699 {
1700 StgInd *ind = (StgInd *)p;
1701 evacuate(&ind->indirectee);
1702
1703 /* might fail to evacuate it, in which case we have to pop it
1704 * back on the mutable list of the oldest generation. We
1705 * leave it *on* the scavenged_static_objects list, though,
1706 * in case we visit this object again.
1707 */
1708 if (gct->failed_to_evac) {
1709 gct->failed_to_evac = rtsFalse;
1710 recordMutableGen_GC((StgClosure *)p,oldest_gen->no);
1711 }
1712 break;
1713 }
1714
1715 case THUNK_STATIC:
1716 scavenge_thunk_srt(info);
1717 break;
1718
1719 case FUN_STATIC:
1720 scavenge_fun_srt(info);
1721 break;
1722
1723 case CONSTR_STATIC:
1724 {
1725 StgPtr q, next;
1726
1727 next = (P_)p->payload + info->layout.payload.ptrs;
1728 // evacuate the pointers
1729 for (q = (P_)p->payload; q < next; q++) {
1730 evacuate((StgClosure **)q);
1731 }
1732 break;
1733 }
1734
1735 default:
1736 barf("scavenge_static: strange closure %d", (int)(info->type));
1737 }
1738
1739 ASSERT(gct->failed_to_evac == rtsFalse);
1740 }
1741 }
1742
1743 /* -----------------------------------------------------------------------------
1744 scavenge a chunk of memory described by a bitmap
1745 -------------------------------------------------------------------------- */
1746
1747 static void
1748 scavenge_large_bitmap( StgPtr p, StgLargeBitmap *large_bitmap, StgWord size )
1749 {
1750 nat i, j, b;
1751 StgWord bitmap;
1752
1753 b = 0;
1754
1755 for (i = 0; i < size; b++) {
1756 bitmap = large_bitmap->bitmap[b];
1757 j = stg_min(size-i, BITS_IN(W_));
1758 i += j;
1759 for (; j > 0; j--, p++) {
1760 if ((bitmap & 1) == 0) {
1761 evacuate((StgClosure **)p);
1762 }
1763 bitmap = bitmap >> 1;
1764 }
1765 }
1766 }
1767
1768
1769 /* -----------------------------------------------------------------------------
1770 scavenge_stack walks over a section of stack and evacuates all the
1771 objects pointed to by it. We can use the same code for walking
1772 AP_STACK_UPDs, since these are just sections of copied stack.
1773 -------------------------------------------------------------------------- */
1774
1775 static void
1776 scavenge_stack(StgPtr p, StgPtr stack_end)
1777 {
1778 const StgRetInfoTable* info;
1779 StgWord bitmap;
1780 StgWord size;
1781
1782 /*
1783 * Each time around this loop, we are looking at a chunk of stack
1784 * that starts with an activation record.
1785 */
1786
1787 while (p < stack_end) {
1788 info = get_ret_itbl((StgClosure *)p);
1789
1790 switch (info->i.type) {
1791
1792 case UPDATE_FRAME:
1793 // In SMP, we can get update frames that point to indirections
1794 // when two threads evaluate the same thunk. We do attempt to
1795 // discover this situation in threadPaused(), but it's
1796 // possible that the following sequence occurs:
1797 //
1798 // A B
1799 // enter T
1800 // enter T
1801 // blackhole T
1802 // update T
1803 // GC
1804 //
1805 // Now T is an indirection, and the update frame is already
1806 // marked on A's stack, so we won't traverse it again in
1807 // threadPaused(). We could traverse the whole stack again
1808 // before GC, but that seems like overkill.
1809 //
1810 // Scavenging this update frame as normal would be disastrous;
1811 // the updatee would end up pointing to the value. So we
1812 // check whether the value after evacuation is a BLACKHOLE,
1813 // and if not, we change the update frame to an stg_enter
1814 // frame that simply returns the value. Hence, blackholing is
1815 // compulsory (otherwise we would have to check for thunks
1816 // too).
1817 //
1818 // Note [upd-black-hole]
1819 // One slight hiccup is that the THUNK_SELECTOR machinery can
1820 // overwrite the updatee with an IND. In parallel GC, this
1821 // could even be happening concurrently, so we can't check for
1822 // the IND. Fortunately if we assume that blackholing is
1823 // happening (either lazy or eager), then we can be sure that
1824 // the updatee is never a THUNK_SELECTOR and we're ok.
1825 // NB. this is a new invariant: blackholing is not optional.
1826 {
1827 StgUpdateFrame *frame = (StgUpdateFrame *)p;
1828 StgClosure *v;
1829
1830 evacuate(&frame->updatee);
1831 v = frame->updatee;
1832 if (GET_CLOSURE_TAG(v) != 0 ||
1833 (get_itbl(v)->type != BLACKHOLE)) {
1834 // blackholing is compulsory, see above.
1835 frame->header.info = (const StgInfoTable*)&stg_enter_checkbh_info;
1836 }
1837 ASSERT(v->header.info != &stg_TSO_info);
1838 p += sizeofW(StgUpdateFrame);
1839 continue;
1840 }
1841
1842 // small bitmap (< 32 entries, or 64 on a 64-bit machine)
1843 case CATCH_STM_FRAME:
1844 case CATCH_RETRY_FRAME:
1845 case ATOMICALLY_FRAME:
1846 case UNDERFLOW_FRAME:
1847 case STOP_FRAME:
1848 case CATCH_FRAME:
1849 case RET_SMALL:
1850 bitmap = BITMAP_BITS(info->i.layout.bitmap);
1851 size = BITMAP_SIZE(info->i.layout.bitmap);
1852 // NOTE: the payload starts immediately after the info-ptr, we
1853 // don't have an StgHeader in the same sense as a heap closure.
1854 p++;
1855 p = scavenge_small_bitmap(p, size, bitmap);
1856
1857 follow_srt:
1858 if (major_gc)
1859 scavenge_srt((StgClosure **)GET_SRT(info), info->i.srt_bitmap);
1860 continue;
1861
1862 case RET_BCO: {
1863 StgBCO *bco;
1864 StgWord size;
1865
1866 p++;
1867 evacuate((StgClosure **)p);
1868 bco = (StgBCO *)*p;
1869 p++;
1870 size = BCO_BITMAP_SIZE(bco);
1871 scavenge_large_bitmap(p, BCO_BITMAP(bco), size);
1872 p += size;
1873 continue;
1874 }
1875
1876 // large bitmap (> 32 entries, or > 64 on a 64-bit machine)
1877 case RET_BIG:
1878 {
1879 StgWord size;
1880
1881 size = GET_LARGE_BITMAP(&info->i)->size;
1882 p++;
1883 scavenge_large_bitmap(p, GET_LARGE_BITMAP(&info->i), size);
1884 p += size;
1885 // and don't forget to follow the SRT
1886 goto follow_srt;
1887 }
1888
1889 case RET_FUN:
1890 {
1891 StgRetFun *ret_fun = (StgRetFun *)p;
1892 StgFunInfoTable *fun_info;
1893
1894 evacuate(&ret_fun->fun);
1895 fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun));
1896 p = scavenge_arg_block(fun_info, ret_fun->payload);
1897 goto follow_srt;
1898 }
1899
1900 default:
1901 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->i.type));
1902 }
1903 }
1904 }
1905
1906 /*-----------------------------------------------------------------------------
1907 scavenge the large object list.
1908
1909 evac_gen set by caller; similar games played with evac_gen as with
1910 scavenge() - see comment at the top of scavenge(). Most large
1911 objects are (repeatedly) mutable, so most of the time evac_gen will
1912 be zero.
1913 --------------------------------------------------------------------------- */
1914
1915 static void
1916 scavenge_large (gen_workspace *ws)
1917 {
1918 bdescr *bd;
1919 StgPtr p;
1920
1921 gct->evac_gen_no = ws->gen->no;
1922
1923 bd = ws->todo_large_objects;
1924
1925 for (; bd != NULL; bd = ws->todo_large_objects) {
1926
1927 // take this object *off* the large objects list and put it on
1928 // the scavenged large objects list. This is so that we can
1929 // treat new_large_objects as a stack and push new objects on
1930 // the front when evacuating.
1931 ws->todo_large_objects = bd->link;
1932
1933 ACQUIRE_SPIN_LOCK(&ws->gen->sync);
1934 dbl_link_onto(bd, &ws->gen->scavenged_large_objects);
1935 ws->gen->n_scavenged_large_blocks += bd->blocks;
1936 RELEASE_SPIN_LOCK(&ws->gen->sync);
1937
1938 p = bd->start;
1939 if (scavenge_one(p)) {
1940 if (ws->gen->no > 0) {
1941 recordMutableGen_GC((StgClosure *)p, ws->gen->no);
1942 }
1943 }
1944
1945 // stats
1946 gct->scanned += closure_sizeW((StgClosure*)p);
1947 }
1948 }
1949
1950 /* ----------------------------------------------------------------------------
1951 Look for work to do.
1952
1953 We look for the oldest gen that has either a todo block that can
1954 be scanned, or a block of work on the global queue that we can
1955 scan.
1956
1957 It is important to take work from the *oldest* generation that we
1958 has work available, because that minimizes the likelihood of
1959 evacuating objects into a young generation when they should have
1960 been eagerly promoted. This really does make a difference (the
1961 cacheprof benchmark is one that is affected).
1962
1963 We also want to scan the todo block if possible before grabbing
1964 work from the global queue, the reason being that we don't want to
1965 steal work from the global queue and starve other threads if there
1966 is other work we can usefully be doing.
1967 ------------------------------------------------------------------------- */
1968
1969 static rtsBool
1970 scavenge_find_work (void)
1971 {
1972 int g;
1973 gen_workspace *ws;
1974 rtsBool did_something, did_anything;
1975 bdescr *bd;
1976
1977 gct->scav_find_work++;
1978
1979 did_anything = rtsFalse;
1980
1981 loop:
1982 did_something = rtsFalse;
1983 for (g = RtsFlags.GcFlags.generations-1; g >= 0; g--) {
1984 ws = &gct->gens[g];
1985
1986 gct->scan_bd = NULL;
1987
1988 // If we have a scan block with some work to do,
1989 // scavenge everything up to the free pointer.
1990 if (ws->todo_bd->u.scan < ws->todo_free)
1991 {
1992 scavenge_block(ws->todo_bd);
1993 did_something = rtsTrue;
1994 break;
1995 }
1996
1997 // If we have any large objects to scavenge, do them now.
1998 if (ws->todo_large_objects) {
1999 scavenge_large(ws);
2000 did_something = rtsTrue;
2001 break;
2002 }
2003
2004 if ((bd = grab_local_todo_block(ws)) != NULL) {
2005 scavenge_block(bd);
2006 did_something = rtsTrue;
2007 break;
2008 }
2009 }
2010
2011 if (did_something) {
2012 did_anything = rtsTrue;
2013 goto loop;
2014 }
2015
2016 #if defined(THREADED_RTS)
2017 if (work_stealing) {
2018 // look for work to steal
2019 for (g = RtsFlags.GcFlags.generations-1; g >= 0; g--) {
2020 if ((bd = steal_todo_block(g)) != NULL) {
2021 scavenge_block(bd);
2022 did_something = rtsTrue;
2023 break;
2024 }
2025 }
2026
2027 if (did_something) {
2028 did_anything = rtsTrue;
2029 goto loop;
2030 }
2031 }
2032 #endif
2033
2034 // only return when there is no more work to do
2035
2036 return did_anything;
2037 }
2038
2039 /* ----------------------------------------------------------------------------
2040 Scavenge until we can't find anything more to scavenge.
2041 ------------------------------------------------------------------------- */
2042
2043 void
2044 scavenge_loop(void)
2045 {
2046 rtsBool work_to_do;
2047
2048 loop:
2049 work_to_do = rtsFalse;
2050
2051 // scavenge static objects
2052 if (major_gc && gct->static_objects != END_OF_STATIC_LIST) {
2053 IF_DEBUG(sanity, checkStaticObjects(gct->static_objects));
2054 scavenge_static();
2055 }
2056
2057 // scavenge objects in compacted generation
2058 if (mark_stack_bd != NULL && !mark_stack_empty()) {
2059 scavenge_mark_stack();
2060 work_to_do = rtsTrue;
2061 }
2062
2063 // Order is important here: we want to deal in full blocks as
2064 // much as possible, so go for global work in preference to
2065 // local work. Only if all the global work has been exhausted
2066 // do we start scavenging the fragments of blocks in the local
2067 // workspaces.
2068 if (scavenge_find_work()) goto loop;
2069
2070 if (work_to_do) goto loop;
2071 }
2072