a8f0ab037ff2b1153fbec78cb8cf575bfded266e
[ghc.git] / rts / sm / Scav.c
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team 1998-2008
4 *
5 * Generational garbage collector: scavenging functions
6 *
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
9 *
10 * http://ghc.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/GC
11 *
12 * ---------------------------------------------------------------------------*/
13
14 #include "PosixSource.h"
15 #include "Rts.h"
16
17 #include "Storage.h"
18 #include "GC.h"
19 #include "GCThread.h"
20 #include "GCUtils.h"
21 #include "Compact.h"
22 #include "MarkStack.h"
23 #include "Evac.h"
24 #include "Scav.h"
25 #include "Apply.h"
26 #include "Trace.h"
27 #include "Sanity.h"
28 #include "Capability.h"
29 #include "LdvProfile.h"
30
31 static void scavenge_stack (StgPtr p, StgPtr stack_end);
32
33 static void scavenge_large_bitmap (StgPtr p,
34 StgLargeBitmap *large_bitmap,
35 StgWord size );
36
37 #if defined(THREADED_RTS) && !defined(PARALLEL_GC)
38 # define evacuate(a) evacuate1(a)
39 # define scavenge_loop(a) scavenge_loop1(a)
40 # define scavenge_block(a) scavenge_block1(a)
41 # define scavenge_mutable_list(bd,g) scavenge_mutable_list1(bd,g)
42 # define scavenge_capability_mut_lists(cap) scavenge_capability_mut_Lists1(cap)
43 #endif
44
45 /* -----------------------------------------------------------------------------
46 Scavenge a TSO.
47 -------------------------------------------------------------------------- */
48
49 static void
50 scavengeTSO (StgTSO *tso)
51 {
52 rtsBool saved_eager;
53
54 debugTrace(DEBUG_gc,"scavenging thread %d",(int)tso->id);
55
56 // update the pointer from the InCall.
57 if (tso->bound != NULL) {
58 // NB. We can't just set tso->bound->tso = tso, because this
59 // might be an invalid copy the TSO resulting from multiple
60 // threads evacuating the TSO simultaneously (see
61 // Evac.c:copy_tag()). Calling evacuate() on this pointer
62 // will ensure that we update it to point to the correct copy.
63 evacuate((StgClosure **)&tso->bound->tso);
64 }
65
66 saved_eager = gct->eager_promotion;
67 gct->eager_promotion = rtsFalse;
68
69 evacuate((StgClosure **)&tso->blocked_exceptions);
70 evacuate((StgClosure **)&tso->bq);
71
72 // scavange current transaction record
73 evacuate((StgClosure **)&tso->trec);
74
75 evacuate((StgClosure **)&tso->stackobj);
76
77 evacuate((StgClosure **)&tso->_link);
78 if ( tso->why_blocked == BlockedOnMVar
79 || tso->why_blocked == BlockedOnMVarRead
80 || tso->why_blocked == BlockedOnBlackHole
81 || tso->why_blocked == BlockedOnMsgThrowTo
82 || tso->why_blocked == NotBlocked
83 ) {
84 evacuate(&tso->block_info.closure);
85 }
86 #ifdef THREADED_RTS
87 // in the THREADED_RTS, block_info.closure must always point to a
88 // valid closure, because we assume this in throwTo(). In the
89 // non-threaded RTS it might be a FD (for
90 // BlockedOnRead/BlockedOnWrite) or a time value (BlockedOnDelay)
91 else {
92 tso->block_info.closure = (StgClosure *)END_TSO_QUEUE;
93 }
94 #endif
95
96 tso->dirty = gct->failed_to_evac;
97
98 gct->eager_promotion = saved_eager;
99 }
100
101 /* -----------------------------------------------------------------------------
102 Mutable arrays of pointers
103 -------------------------------------------------------------------------- */
104
105 static StgPtr scavenge_mut_arr_ptrs (StgMutArrPtrs *a)
106 {
107 W_ m;
108 rtsBool any_failed;
109 StgPtr p, q;
110
111 any_failed = rtsFalse;
112 p = (StgPtr)&a->payload[0];
113 for (m = 0; (int)m < (int)mutArrPtrsCards(a->ptrs) - 1; m++)
114 {
115 q = p + (1 << MUT_ARR_PTRS_CARD_BITS);
116 for (; p < q; p++) {
117 evacuate((StgClosure**)p);
118 }
119 if (gct->failed_to_evac) {
120 any_failed = rtsTrue;
121 *mutArrPtrsCard(a,m) = 1;
122 gct->failed_to_evac = rtsFalse;
123 } else {
124 *mutArrPtrsCard(a,m) = 0;
125 }
126 }
127
128 q = (StgPtr)&a->payload[a->ptrs];
129 if (p < q) {
130 for (; p < q; p++) {
131 evacuate((StgClosure**)p);
132 }
133 if (gct->failed_to_evac) {
134 any_failed = rtsTrue;
135 *mutArrPtrsCard(a,m) = 1;
136 gct->failed_to_evac = rtsFalse;
137 } else {
138 *mutArrPtrsCard(a,m) = 0;
139 }
140 }
141
142 gct->failed_to_evac = any_failed;
143 return (StgPtr)a + mut_arr_ptrs_sizeW(a);
144 }
145
146 // scavenge only the marked areas of a MUT_ARR_PTRS
147 static StgPtr scavenge_mut_arr_ptrs_marked (StgMutArrPtrs *a)
148 {
149 W_ m;
150 StgPtr p, q;
151 rtsBool any_failed;
152
153 any_failed = rtsFalse;
154 for (m = 0; m < mutArrPtrsCards(a->ptrs); m++)
155 {
156 if (*mutArrPtrsCard(a,m) != 0) {
157 p = (StgPtr)&a->payload[m << MUT_ARR_PTRS_CARD_BITS];
158 q = stg_min(p + (1 << MUT_ARR_PTRS_CARD_BITS),
159 (StgPtr)&a->payload[a->ptrs]);
160 for (; p < q; p++) {
161 evacuate((StgClosure**)p);
162 }
163 if (gct->failed_to_evac) {
164 any_failed = rtsTrue;
165 gct->failed_to_evac = rtsFalse;
166 } else {
167 *mutArrPtrsCard(a,m) = 0;
168 }
169 }
170 }
171
172 gct->failed_to_evac = any_failed;
173 return (StgPtr)a + mut_arr_ptrs_sizeW(a);
174 }
175
176 STATIC_INLINE StgPtr
177 scavenge_small_bitmap (StgPtr p, StgWord size, StgWord bitmap)
178 {
179 while (size > 0) {
180 if ((bitmap & 1) == 0) {
181 evacuate((StgClosure **)p);
182 }
183 p++;
184 bitmap = bitmap >> 1;
185 size--;
186 }
187 return p;
188 }
189
190 /* -----------------------------------------------------------------------------
191 Blocks of function args occur on the stack (at the top) and
192 in PAPs.
193 -------------------------------------------------------------------------- */
194
195 STATIC_INLINE StgPtr
196 scavenge_arg_block (StgFunInfoTable *fun_info, StgClosure **args)
197 {
198 StgPtr p;
199 StgWord bitmap;
200 StgWord size;
201
202 p = (StgPtr)args;
203 switch (fun_info->f.fun_type) {
204 case ARG_GEN:
205 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
206 size = BITMAP_SIZE(fun_info->f.b.bitmap);
207 goto small_bitmap;
208 case ARG_GEN_BIG:
209 size = GET_FUN_LARGE_BITMAP(fun_info)->size;
210 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
211 p += size;
212 break;
213 default:
214 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
215 size = BITMAP_SIZE(stg_arg_bitmaps[fun_info->f.fun_type]);
216 small_bitmap:
217 p = scavenge_small_bitmap(p, size, bitmap);
218 break;
219 }
220 return p;
221 }
222
223 STATIC_INLINE GNUC_ATTR_HOT StgPtr
224 scavenge_PAP_payload (StgClosure *fun, StgClosure **payload, StgWord size)
225 {
226 StgPtr p;
227 StgWord bitmap;
228 StgFunInfoTable *fun_info;
229
230 fun_info = get_fun_itbl(UNTAG_CLOSURE(fun));
231 ASSERT(fun_info->i.type != PAP);
232 p = (StgPtr)payload;
233
234 switch (fun_info->f.fun_type) {
235 case ARG_GEN:
236 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
237 goto small_bitmap;
238 case ARG_GEN_BIG:
239 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
240 p += size;
241 break;
242 case ARG_BCO:
243 scavenge_large_bitmap((StgPtr)payload, BCO_BITMAP(fun), size);
244 p += size;
245 break;
246 default:
247 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
248 small_bitmap:
249 p = scavenge_small_bitmap(p, size, bitmap);
250 break;
251 }
252 return p;
253 }
254
255 STATIC_INLINE GNUC_ATTR_HOT StgPtr
256 scavenge_PAP (StgPAP *pap)
257 {
258 evacuate(&pap->fun);
259 return scavenge_PAP_payload (pap->fun, pap->payload, pap->n_args);
260 }
261
262 STATIC_INLINE StgPtr
263 scavenge_AP (StgAP *ap)
264 {
265 evacuate(&ap->fun);
266 return scavenge_PAP_payload (ap->fun, ap->payload, ap->n_args);
267 }
268
269 /* -----------------------------------------------------------------------------
270 Scavenge SRTs
271 -------------------------------------------------------------------------- */
272
273 /* Similar to scavenge_large_bitmap(), but we don't write back the
274 * pointers we get back from evacuate().
275 */
276 static void
277 scavenge_large_srt_bitmap( StgLargeSRT *large_srt )
278 {
279 nat i, j, size;
280 StgWord bitmap;
281 StgClosure **p;
282
283 size = (nat)large_srt->l.size;
284 p = (StgClosure **)large_srt->srt;
285
286 for (i = 0; i < size / BITS_IN(W_); i++) {
287 bitmap = large_srt->l.bitmap[i];
288 // skip zero words: bitmaps can be very sparse, and this helps
289 // performance a lot in some cases.
290 if (bitmap != 0) {
291 for (j = 0; j < BITS_IN(W_); j++) {
292 if ((bitmap & 1) != 0) {
293 evacuate(p);
294 }
295 p++;
296 bitmap = bitmap >> 1;
297 }
298 } else {
299 p += BITS_IN(W_);
300 }
301 }
302 if (size % BITS_IN(W_) != 0) {
303 bitmap = large_srt->l.bitmap[i];
304 for (j = 0; j < size % BITS_IN(W_); j++) {
305 if ((bitmap & 1) != 0) {
306 evacuate(p);
307 }
308 p++;
309 bitmap = bitmap >> 1;
310 }
311 }
312 }
313
314 /* evacuate the SRT. If srt_bitmap is zero, then there isn't an
315 * srt field in the info table. That's ok, because we'll
316 * never dereference it.
317 */
318 STATIC_INLINE GNUC_ATTR_HOT void
319 scavenge_srt (StgClosure **srt, nat srt_bitmap)
320 {
321 nat bitmap;
322 StgClosure **p;
323
324 bitmap = srt_bitmap;
325 p = srt;
326
327 if (bitmap == (StgHalfWord)(-1)) {
328 scavenge_large_srt_bitmap( (StgLargeSRT *)srt );
329 return;
330 }
331
332 while (bitmap != 0) {
333 if ((bitmap & 1) != 0) {
334 #if defined(COMPILING_WINDOWS_DLL)
335 // Special-case to handle references to closures hiding out in DLLs, since
336 // double indirections required to get at those. The code generator knows
337 // which is which when generating the SRT, so it stores the (indirect)
338 // reference to the DLL closure in the table by first adding one to it.
339 // We check for this here, and undo the addition before evacuating it.
340 //
341 // If the SRT entry hasn't got bit 0 set, the SRT entry points to a
342 // closure that's fixed at link-time, and no extra magic is required.
343 if ( (W_)(*srt) & 0x1 ) {
344 evacuate( (StgClosure**) ((W_) (*srt) & ~0x1));
345 } else {
346 evacuate(p);
347 }
348 #else
349 evacuate(p);
350 #endif
351 }
352 p++;
353 bitmap = bitmap >> 1;
354 }
355 }
356
357
358 STATIC_INLINE GNUC_ATTR_HOT void
359 scavenge_thunk_srt(const StgInfoTable *info)
360 {
361 StgThunkInfoTable *thunk_info;
362
363 if (!major_gc) return;
364
365 thunk_info = itbl_to_thunk_itbl(info);
366 scavenge_srt((StgClosure **)GET_SRT(thunk_info), thunk_info->i.srt_bitmap);
367 }
368
369 STATIC_INLINE GNUC_ATTR_HOT void
370 scavenge_fun_srt(const StgInfoTable *info)
371 {
372 StgFunInfoTable *fun_info;
373
374 if (!major_gc) return;
375
376 fun_info = itbl_to_fun_itbl(info);
377 scavenge_srt((StgClosure **)GET_FUN_SRT(fun_info), fun_info->i.srt_bitmap);
378 }
379
380 /* -----------------------------------------------------------------------------
381 Scavenge a block from the given scan pointer up to bd->free.
382
383 evac_gen_no is set by the caller to be either zero (for a step in a
384 generation < N) or G where G is the generation of the step being
385 scavenged.
386
387 We sometimes temporarily change evac_gen_no back to zero if we're
388 scavenging a mutable object where eager promotion isn't such a good
389 idea.
390 -------------------------------------------------------------------------- */
391
392 static GNUC_ATTR_HOT void
393 scavenge_block (bdescr *bd)
394 {
395 StgPtr p, q;
396 StgInfoTable *info;
397 rtsBool saved_eager_promotion;
398 gen_workspace *ws;
399
400 debugTrace(DEBUG_gc, "scavenging block %p (gen %d) @ %p",
401 bd->start, bd->gen_no, bd->u.scan);
402
403 gct->scan_bd = bd;
404 gct->evac_gen_no = bd->gen_no;
405 saved_eager_promotion = gct->eager_promotion;
406 gct->failed_to_evac = rtsFalse;
407
408 ws = &gct->gens[bd->gen->no];
409
410 p = bd->u.scan;
411
412 // we might be evacuating into the very object that we're
413 // scavenging, so we have to check the real bd->free pointer each
414 // time around the loop.
415 while (p < bd->free || (bd == ws->todo_bd && p < ws->todo_free)) {
416
417 ASSERT(bd->link == NULL);
418 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
419 info = get_itbl((StgClosure *)p);
420
421 ASSERT(gct->thunk_selector_depth == 0);
422
423 q = p;
424 switch (info->type) {
425
426 case MVAR_CLEAN:
427 case MVAR_DIRTY:
428 {
429 StgMVar *mvar = ((StgMVar *)p);
430 gct->eager_promotion = rtsFalse;
431 evacuate((StgClosure **)&mvar->head);
432 evacuate((StgClosure **)&mvar->tail);
433 evacuate((StgClosure **)&mvar->value);
434 gct->eager_promotion = saved_eager_promotion;
435
436 if (gct->failed_to_evac) {
437 mvar->header.info = &stg_MVAR_DIRTY_info;
438 } else {
439 mvar->header.info = &stg_MVAR_CLEAN_info;
440 }
441 p += sizeofW(StgMVar);
442 break;
443 }
444
445 case TVAR:
446 {
447 StgTVar *tvar = ((StgTVar *)p);
448 gct->eager_promotion = rtsFalse;
449 evacuate((StgClosure **)&tvar->current_value);
450 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
451 gct->eager_promotion = saved_eager_promotion;
452
453 if (gct->failed_to_evac) {
454 tvar->header.info = &stg_TVAR_DIRTY_info;
455 } else {
456 tvar->header.info = &stg_TVAR_CLEAN_info;
457 }
458 p += sizeofW(StgTVar);
459 break;
460 }
461
462 case FUN_2_0:
463 scavenge_fun_srt(info);
464 evacuate(&((StgClosure *)p)->payload[1]);
465 evacuate(&((StgClosure *)p)->payload[0]);
466 p += sizeofW(StgHeader) + 2;
467 break;
468
469 case THUNK_2_0:
470 scavenge_thunk_srt(info);
471 evacuate(&((StgThunk *)p)->payload[1]);
472 evacuate(&((StgThunk *)p)->payload[0]);
473 p += sizeofW(StgThunk) + 2;
474 break;
475
476 case CONSTR_2_0:
477 evacuate(&((StgClosure *)p)->payload[1]);
478 evacuate(&((StgClosure *)p)->payload[0]);
479 p += sizeofW(StgHeader) + 2;
480 break;
481
482 case THUNK_1_0:
483 scavenge_thunk_srt(info);
484 evacuate(&((StgThunk *)p)->payload[0]);
485 p += sizeofW(StgThunk) + 1;
486 break;
487
488 case FUN_1_0:
489 scavenge_fun_srt(info);
490 case CONSTR_1_0:
491 evacuate(&((StgClosure *)p)->payload[0]);
492 p += sizeofW(StgHeader) + 1;
493 break;
494
495 case THUNK_0_1:
496 scavenge_thunk_srt(info);
497 p += sizeofW(StgThunk) + 1;
498 break;
499
500 case FUN_0_1:
501 scavenge_fun_srt(info);
502 case CONSTR_0_1:
503 p += sizeofW(StgHeader) + 1;
504 break;
505
506 case THUNK_0_2:
507 scavenge_thunk_srt(info);
508 p += sizeofW(StgThunk) + 2;
509 break;
510
511 case FUN_0_2:
512 scavenge_fun_srt(info);
513 case CONSTR_0_2:
514 p += sizeofW(StgHeader) + 2;
515 break;
516
517 case THUNK_1_1:
518 scavenge_thunk_srt(info);
519 evacuate(&((StgThunk *)p)->payload[0]);
520 p += sizeofW(StgThunk) + 2;
521 break;
522
523 case FUN_1_1:
524 scavenge_fun_srt(info);
525 case CONSTR_1_1:
526 evacuate(&((StgClosure *)p)->payload[0]);
527 p += sizeofW(StgHeader) + 2;
528 break;
529
530 case FUN:
531 scavenge_fun_srt(info);
532 goto gen_obj;
533
534 case THUNK:
535 {
536 StgPtr end;
537
538 scavenge_thunk_srt(info);
539 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
540 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
541 evacuate((StgClosure **)p);
542 }
543 p += info->layout.payload.nptrs;
544 break;
545 }
546
547 gen_obj:
548 case CONSTR:
549 case WEAK:
550 case PRIM:
551 {
552 StgPtr end;
553
554 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
555 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
556 evacuate((StgClosure **)p);
557 }
558 p += info->layout.payload.nptrs;
559 break;
560 }
561
562 case BCO: {
563 StgBCO *bco = (StgBCO *)p;
564 evacuate((StgClosure **)&bco->instrs);
565 evacuate((StgClosure **)&bco->literals);
566 evacuate((StgClosure **)&bco->ptrs);
567 p += bco_sizeW(bco);
568 break;
569 }
570
571 case IND_PERM:
572 case BLACKHOLE:
573 evacuate(&((StgInd *)p)->indirectee);
574 p += sizeofW(StgInd);
575 break;
576
577 case MUT_VAR_CLEAN:
578 case MUT_VAR_DIRTY:
579 gct->eager_promotion = rtsFalse;
580 evacuate(&((StgMutVar *)p)->var);
581 gct->eager_promotion = saved_eager_promotion;
582
583 if (gct->failed_to_evac) {
584 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
585 } else {
586 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
587 }
588 p += sizeofW(StgMutVar);
589 break;
590
591 case BLOCKING_QUEUE:
592 {
593 StgBlockingQueue *bq = (StgBlockingQueue *)p;
594
595 gct->eager_promotion = rtsFalse;
596 evacuate(&bq->bh);
597 evacuate((StgClosure**)&bq->owner);
598 evacuate((StgClosure**)&bq->queue);
599 evacuate((StgClosure**)&bq->link);
600 gct->eager_promotion = saved_eager_promotion;
601
602 if (gct->failed_to_evac) {
603 bq->header.info = &stg_BLOCKING_QUEUE_DIRTY_info;
604 } else {
605 bq->header.info = &stg_BLOCKING_QUEUE_CLEAN_info;
606 }
607 p += sizeofW(StgBlockingQueue);
608 break;
609 }
610
611 case THUNK_SELECTOR:
612 {
613 StgSelector *s = (StgSelector *)p;
614 evacuate(&s->selectee);
615 p += THUNK_SELECTOR_sizeW();
616 break;
617 }
618
619 // A chunk of stack saved in a heap object
620 case AP_STACK:
621 {
622 StgAP_STACK *ap = (StgAP_STACK *)p;
623
624 evacuate(&ap->fun);
625 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
626 p = (StgPtr)ap->payload + ap->size;
627 break;
628 }
629
630 case PAP:
631 p = scavenge_PAP((StgPAP *)p);
632 break;
633
634 case AP:
635 p = scavenge_AP((StgAP *)p);
636 break;
637
638 case ARR_WORDS:
639 // nothing to follow
640 p += arr_words_sizeW((StgArrWords *)p);
641 break;
642
643 case MUT_ARR_PTRS_CLEAN:
644 case MUT_ARR_PTRS_DIRTY:
645 {
646 // We don't eagerly promote objects pointed to by a mutable
647 // array, but if we find the array only points to objects in
648 // the same or an older generation, we mark it "clean" and
649 // avoid traversing it during minor GCs.
650 gct->eager_promotion = rtsFalse;
651
652 p = scavenge_mut_arr_ptrs((StgMutArrPtrs*)p);
653
654 if (gct->failed_to_evac) {
655 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
656 } else {
657 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
658 }
659
660 gct->eager_promotion = saved_eager_promotion;
661 gct->failed_to_evac = rtsTrue; // always put it on the mutable list.
662 break;
663 }
664
665 case MUT_ARR_PTRS_FROZEN:
666 case MUT_ARR_PTRS_FROZEN0:
667 // follow everything
668 {
669 p = scavenge_mut_arr_ptrs((StgMutArrPtrs*)p);
670
671 // If we're going to put this object on the mutable list, then
672 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
673 if (gct->failed_to_evac) {
674 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
675 } else {
676 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
677 }
678 break;
679 }
680
681 case SMALL_MUT_ARR_PTRS_CLEAN:
682 case SMALL_MUT_ARR_PTRS_DIRTY:
683 // follow everything
684 {
685 StgPtr next;
686
687 // We don't eagerly promote objects pointed to by a mutable
688 // array, but if we find the array only points to objects in
689 // the same or an older generation, we mark it "clean" and
690 // avoid traversing it during minor GCs.
691 gct->eager_promotion = rtsFalse;
692 next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
693 for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
694 evacuate((StgClosure **)p);
695 }
696 gct->eager_promotion = saved_eager_promotion;
697
698 if (gct->failed_to_evac) {
699 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_DIRTY_info;
700 } else {
701 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_CLEAN_info;
702 }
703
704 gct->failed_to_evac = rtsTrue; // always put it on the mutable list.
705 break;
706 }
707
708 case SMALL_MUT_ARR_PTRS_FROZEN:
709 case SMALL_MUT_ARR_PTRS_FROZEN0:
710 // follow everything
711 {
712 StgPtr next;
713
714 next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
715 for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
716 evacuate((StgClosure **)p);
717 }
718
719 // If we're going to put this object on the mutable list, then
720 // set its info ptr to SMALL_MUT_ARR_PTRS_FROZEN0 to indicate that.
721 if (gct->failed_to_evac) {
722 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN0_info;
723 } else {
724 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN_info;
725 }
726 break;
727 }
728
729 case TSO:
730 {
731 scavengeTSO((StgTSO *)p);
732 p += sizeofW(StgTSO);
733 break;
734 }
735
736 case STACK:
737 {
738 StgStack *stack = (StgStack*)p;
739
740 gct->eager_promotion = rtsFalse;
741
742 scavenge_stack(stack->sp, stack->stack + stack->stack_size);
743 stack->dirty = gct->failed_to_evac;
744 p += stack_sizeW(stack);
745
746 gct->eager_promotion = saved_eager_promotion;
747 break;
748 }
749
750 case MUT_PRIM:
751 {
752 StgPtr end;
753
754 gct->eager_promotion = rtsFalse;
755
756 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
757 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
758 evacuate((StgClosure **)p);
759 }
760 p += info->layout.payload.nptrs;
761
762 gct->eager_promotion = saved_eager_promotion;
763 gct->failed_to_evac = rtsTrue; // mutable
764 break;
765 }
766
767 case TREC_CHUNK:
768 {
769 StgWord i;
770 StgTRecChunk *tc = ((StgTRecChunk *) p);
771 TRecEntry *e = &(tc -> entries[0]);
772 gct->eager_promotion = rtsFalse;
773 evacuate((StgClosure **)&tc->prev_chunk);
774 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
775 evacuate((StgClosure **)&e->tvar);
776 evacuate((StgClosure **)&e->expected_value);
777 evacuate((StgClosure **)&e->new_value);
778 }
779 gct->eager_promotion = saved_eager_promotion;
780 gct->failed_to_evac = rtsTrue; // mutable
781 p += sizeofW(StgTRecChunk);
782 break;
783 }
784
785 default:
786 barf("scavenge: unimplemented/strange closure type %d @ %p",
787 info->type, p);
788 }
789
790 /*
791 * We need to record the current object on the mutable list if
792 * (a) It is actually mutable, or
793 * (b) It contains pointers to a younger generation.
794 * Case (b) arises if we didn't manage to promote everything that
795 * the current object points to into the current generation.
796 */
797 if (gct->failed_to_evac) {
798 gct->failed_to_evac = rtsFalse;
799 if (bd->gen_no > 0) {
800 recordMutableGen_GC((StgClosure *)q, bd->gen_no);
801 }
802 }
803 }
804
805 if (p > bd->free) {
806 gct->copied += ws->todo_free - bd->free;
807 bd->free = p;
808 }
809
810 debugTrace(DEBUG_gc, " scavenged %ld bytes",
811 (unsigned long)((bd->free - bd->u.scan) * sizeof(W_)));
812
813 // update stats: this is a block that has been scavenged
814 gct->scanned += bd->free - bd->u.scan;
815 bd->u.scan = bd->free;
816
817 if (bd != ws->todo_bd) {
818 // we're not going to evac any more objects into
819 // this block, so push it now.
820 push_scanned_block(bd, ws);
821 }
822
823 gct->scan_bd = NULL;
824 }
825 /* -----------------------------------------------------------------------------
826 Scavenge everything on the mark stack.
827
828 This is slightly different from scavenge():
829 - we don't walk linearly through the objects, so the scavenger
830 doesn't need to advance the pointer on to the next object.
831 -------------------------------------------------------------------------- */
832
833 static void
834 scavenge_mark_stack(void)
835 {
836 StgPtr p, q;
837 StgInfoTable *info;
838 rtsBool saved_eager_promotion;
839
840 gct->evac_gen_no = oldest_gen->no;
841 saved_eager_promotion = gct->eager_promotion;
842
843 while ((p = pop_mark_stack())) {
844
845 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
846 info = get_itbl((StgClosure *)p);
847
848 q = p;
849 switch (info->type) {
850
851 case MVAR_CLEAN:
852 case MVAR_DIRTY:
853 {
854 StgMVar *mvar = ((StgMVar *)p);
855 gct->eager_promotion = rtsFalse;
856 evacuate((StgClosure **)&mvar->head);
857 evacuate((StgClosure **)&mvar->tail);
858 evacuate((StgClosure **)&mvar->value);
859 gct->eager_promotion = saved_eager_promotion;
860
861 if (gct->failed_to_evac) {
862 mvar->header.info = &stg_MVAR_DIRTY_info;
863 } else {
864 mvar->header.info = &stg_MVAR_CLEAN_info;
865 }
866 break;
867 }
868
869 case TVAR:
870 {
871 StgTVar *tvar = ((StgTVar *)p);
872 gct->eager_promotion = rtsFalse;
873 evacuate((StgClosure **)&tvar->current_value);
874 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
875 gct->eager_promotion = saved_eager_promotion;
876
877 if (gct->failed_to_evac) {
878 tvar->header.info = &stg_TVAR_DIRTY_info;
879 } else {
880 tvar->header.info = &stg_TVAR_CLEAN_info;
881 }
882 break;
883 }
884
885 case FUN_2_0:
886 scavenge_fun_srt(info);
887 evacuate(&((StgClosure *)p)->payload[1]);
888 evacuate(&((StgClosure *)p)->payload[0]);
889 break;
890
891 case THUNK_2_0:
892 scavenge_thunk_srt(info);
893 evacuate(&((StgThunk *)p)->payload[1]);
894 evacuate(&((StgThunk *)p)->payload[0]);
895 break;
896
897 case CONSTR_2_0:
898 evacuate(&((StgClosure *)p)->payload[1]);
899 evacuate(&((StgClosure *)p)->payload[0]);
900 break;
901
902 case FUN_1_0:
903 case FUN_1_1:
904 scavenge_fun_srt(info);
905 evacuate(&((StgClosure *)p)->payload[0]);
906 break;
907
908 case THUNK_1_0:
909 case THUNK_1_1:
910 scavenge_thunk_srt(info);
911 evacuate(&((StgThunk *)p)->payload[0]);
912 break;
913
914 case CONSTR_1_0:
915 case CONSTR_1_1:
916 evacuate(&((StgClosure *)p)->payload[0]);
917 break;
918
919 case FUN_0_1:
920 case FUN_0_2:
921 scavenge_fun_srt(info);
922 break;
923
924 case THUNK_0_1:
925 case THUNK_0_2:
926 scavenge_thunk_srt(info);
927 break;
928
929 case CONSTR_0_1:
930 case CONSTR_0_2:
931 break;
932
933 case FUN:
934 scavenge_fun_srt(info);
935 goto gen_obj;
936
937 case THUNK:
938 {
939 StgPtr end;
940
941 scavenge_thunk_srt(info);
942 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
943 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
944 evacuate((StgClosure **)p);
945 }
946 break;
947 }
948
949 gen_obj:
950 case CONSTR:
951 case WEAK:
952 case PRIM:
953 {
954 StgPtr end;
955
956 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
957 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
958 evacuate((StgClosure **)p);
959 }
960 break;
961 }
962
963 case BCO: {
964 StgBCO *bco = (StgBCO *)p;
965 evacuate((StgClosure **)&bco->instrs);
966 evacuate((StgClosure **)&bco->literals);
967 evacuate((StgClosure **)&bco->ptrs);
968 break;
969 }
970
971 case IND_PERM:
972 // don't need to do anything here: the only possible case
973 // is that we're in a 1-space compacting collector, with
974 // no "old" generation.
975 break;
976
977 case IND:
978 case BLACKHOLE:
979 evacuate(&((StgInd *)p)->indirectee);
980 break;
981
982 case MUT_VAR_CLEAN:
983 case MUT_VAR_DIRTY: {
984 gct->eager_promotion = rtsFalse;
985 evacuate(&((StgMutVar *)p)->var);
986 gct->eager_promotion = saved_eager_promotion;
987
988 if (gct->failed_to_evac) {
989 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
990 } else {
991 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
992 }
993 break;
994 }
995
996 case BLOCKING_QUEUE:
997 {
998 StgBlockingQueue *bq = (StgBlockingQueue *)p;
999
1000 gct->eager_promotion = rtsFalse;
1001 evacuate(&bq->bh);
1002 evacuate((StgClosure**)&bq->owner);
1003 evacuate((StgClosure**)&bq->queue);
1004 evacuate((StgClosure**)&bq->link);
1005 gct->eager_promotion = saved_eager_promotion;
1006
1007 if (gct->failed_to_evac) {
1008 bq->header.info = &stg_BLOCKING_QUEUE_DIRTY_info;
1009 } else {
1010 bq->header.info = &stg_BLOCKING_QUEUE_CLEAN_info;
1011 }
1012 break;
1013 }
1014
1015 case ARR_WORDS:
1016 break;
1017
1018 case THUNK_SELECTOR:
1019 {
1020 StgSelector *s = (StgSelector *)p;
1021 evacuate(&s->selectee);
1022 break;
1023 }
1024
1025 // A chunk of stack saved in a heap object
1026 case AP_STACK:
1027 {
1028 StgAP_STACK *ap = (StgAP_STACK *)p;
1029
1030 evacuate(&ap->fun);
1031 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
1032 break;
1033 }
1034
1035 case PAP:
1036 scavenge_PAP((StgPAP *)p);
1037 break;
1038
1039 case AP:
1040 scavenge_AP((StgAP *)p);
1041 break;
1042
1043 case MUT_ARR_PTRS_CLEAN:
1044 case MUT_ARR_PTRS_DIRTY:
1045 // follow everything
1046 {
1047 // We don't eagerly promote objects pointed to by a mutable
1048 // array, but if we find the array only points to objects in
1049 // the same or an older generation, we mark it "clean" and
1050 // avoid traversing it during minor GCs.
1051 gct->eager_promotion = rtsFalse;
1052
1053 scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
1054
1055 if (gct->failed_to_evac) {
1056 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
1057 } else {
1058 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
1059 }
1060
1061 gct->eager_promotion = saved_eager_promotion;
1062 gct->failed_to_evac = rtsTrue; // mutable anyhow.
1063 break;
1064 }
1065
1066 case MUT_ARR_PTRS_FROZEN:
1067 case MUT_ARR_PTRS_FROZEN0:
1068 // follow everything
1069 {
1070 StgPtr q = p;
1071
1072 scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
1073
1074 // If we're going to put this object on the mutable list, then
1075 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
1076 if (gct->failed_to_evac) {
1077 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
1078 } else {
1079 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
1080 }
1081 break;
1082 }
1083
1084 case SMALL_MUT_ARR_PTRS_CLEAN:
1085 case SMALL_MUT_ARR_PTRS_DIRTY:
1086 // follow everything
1087 {
1088 StgPtr next;
1089 rtsBool saved_eager;
1090
1091 // We don't eagerly promote objects pointed to by a mutable
1092 // array, but if we find the array only points to objects in
1093 // the same or an older generation, we mark it "clean" and
1094 // avoid traversing it during minor GCs.
1095 saved_eager = gct->eager_promotion;
1096 gct->eager_promotion = rtsFalse;
1097 next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
1098 for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
1099 evacuate((StgClosure **)p);
1100 }
1101 gct->eager_promotion = saved_eager;
1102
1103 if (gct->failed_to_evac) {
1104 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_DIRTY_info;
1105 } else {
1106 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_CLEAN_info;
1107 }
1108
1109 gct->failed_to_evac = rtsTrue; // mutable anyhow.
1110 break;
1111 }
1112
1113 case SMALL_MUT_ARR_PTRS_FROZEN:
1114 case SMALL_MUT_ARR_PTRS_FROZEN0:
1115 // follow everything
1116 {
1117 StgPtr next, q = p;
1118
1119 next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
1120 for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
1121 evacuate((StgClosure **)p);
1122 }
1123
1124 // If we're going to put this object on the mutable list, then
1125 // set its info ptr to SMALL_MUT_ARR_PTRS_FROZEN0 to indicate that.
1126 if (gct->failed_to_evac) {
1127 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN0_info;
1128 } else {
1129 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN_info;
1130 }
1131 break;
1132 }
1133
1134 case TSO:
1135 {
1136 scavengeTSO((StgTSO*)p);
1137 break;
1138 }
1139
1140 case STACK:
1141 {
1142 StgStack *stack = (StgStack*)p;
1143
1144 gct->eager_promotion = rtsFalse;
1145
1146 scavenge_stack(stack->sp, stack->stack + stack->stack_size);
1147 stack->dirty = gct->failed_to_evac;
1148
1149 gct->eager_promotion = saved_eager_promotion;
1150 break;
1151 }
1152
1153 case MUT_PRIM:
1154 {
1155 StgPtr end;
1156
1157 gct->eager_promotion = rtsFalse;
1158
1159 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1160 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
1161 evacuate((StgClosure **)p);
1162 }
1163
1164 gct->eager_promotion = saved_eager_promotion;
1165 gct->failed_to_evac = rtsTrue; // mutable
1166 break;
1167 }
1168
1169 case TREC_CHUNK:
1170 {
1171 StgWord i;
1172 StgTRecChunk *tc = ((StgTRecChunk *) p);
1173 TRecEntry *e = &(tc -> entries[0]);
1174 gct->eager_promotion = rtsFalse;
1175 evacuate((StgClosure **)&tc->prev_chunk);
1176 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
1177 evacuate((StgClosure **)&e->tvar);
1178 evacuate((StgClosure **)&e->expected_value);
1179 evacuate((StgClosure **)&e->new_value);
1180 }
1181 gct->eager_promotion = saved_eager_promotion;
1182 gct->failed_to_evac = rtsTrue; // mutable
1183 break;
1184 }
1185
1186 default:
1187 barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p",
1188 info->type, p);
1189 }
1190
1191 if (gct->failed_to_evac) {
1192 gct->failed_to_evac = rtsFalse;
1193 if (gct->evac_gen_no) {
1194 recordMutableGen_GC((StgClosure *)q, gct->evac_gen_no);
1195 }
1196 }
1197 } // while (p = pop_mark_stack())
1198 }
1199
1200 /* -----------------------------------------------------------------------------
1201 Scavenge one object.
1202
1203 This is used for objects that are temporarily marked as mutable
1204 because they contain old-to-new generation pointers. Only certain
1205 objects can have this property.
1206 -------------------------------------------------------------------------- */
1207
1208 static rtsBool
1209 scavenge_one(StgPtr p)
1210 {
1211 const StgInfoTable *info;
1212 rtsBool no_luck;
1213 rtsBool saved_eager_promotion;
1214
1215 saved_eager_promotion = gct->eager_promotion;
1216
1217 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1218 info = get_itbl((StgClosure *)p);
1219
1220 switch (info->type) {
1221
1222 case MVAR_CLEAN:
1223 case MVAR_DIRTY:
1224 {
1225 StgMVar *mvar = ((StgMVar *)p);
1226 gct->eager_promotion = rtsFalse;
1227 evacuate((StgClosure **)&mvar->head);
1228 evacuate((StgClosure **)&mvar->tail);
1229 evacuate((StgClosure **)&mvar->value);
1230 gct->eager_promotion = saved_eager_promotion;
1231
1232 if (gct->failed_to_evac) {
1233 mvar->header.info = &stg_MVAR_DIRTY_info;
1234 } else {
1235 mvar->header.info = &stg_MVAR_CLEAN_info;
1236 }
1237 break;
1238 }
1239
1240 case TVAR:
1241 {
1242 StgTVar *tvar = ((StgTVar *)p);
1243 gct->eager_promotion = rtsFalse;
1244 evacuate((StgClosure **)&tvar->current_value);
1245 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
1246 gct->eager_promotion = saved_eager_promotion;
1247
1248 if (gct->failed_to_evac) {
1249 tvar->header.info = &stg_TVAR_DIRTY_info;
1250 } else {
1251 tvar->header.info = &stg_TVAR_CLEAN_info;
1252 }
1253 break;
1254 }
1255
1256 case THUNK:
1257 case THUNK_1_0:
1258 case THUNK_0_1:
1259 case THUNK_1_1:
1260 case THUNK_0_2:
1261 case THUNK_2_0:
1262 {
1263 StgPtr q, end;
1264
1265 end = (StgPtr)((StgThunk *)p)->payload + info->layout.payload.ptrs;
1266 for (q = (StgPtr)((StgThunk *)p)->payload; q < end; q++) {
1267 evacuate((StgClosure **)q);
1268 }
1269 break;
1270 }
1271
1272 case FUN:
1273 case FUN_1_0: // hardly worth specialising these guys
1274 case FUN_0_1:
1275 case FUN_1_1:
1276 case FUN_0_2:
1277 case FUN_2_0:
1278 case CONSTR:
1279 case CONSTR_1_0:
1280 case CONSTR_0_1:
1281 case CONSTR_1_1:
1282 case CONSTR_0_2:
1283 case CONSTR_2_0:
1284 case WEAK:
1285 case PRIM:
1286 case IND_PERM:
1287 {
1288 StgPtr q, end;
1289
1290 end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1291 for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) {
1292 evacuate((StgClosure **)q);
1293 }
1294 break;
1295 }
1296
1297 case MUT_VAR_CLEAN:
1298 case MUT_VAR_DIRTY: {
1299 StgPtr q = p;
1300
1301 gct->eager_promotion = rtsFalse;
1302 evacuate(&((StgMutVar *)p)->var);
1303 gct->eager_promotion = saved_eager_promotion;
1304
1305 if (gct->failed_to_evac) {
1306 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
1307 } else {
1308 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
1309 }
1310 break;
1311 }
1312
1313 case BLOCKING_QUEUE:
1314 {
1315 StgBlockingQueue *bq = (StgBlockingQueue *)p;
1316
1317 gct->eager_promotion = rtsFalse;
1318 evacuate(&bq->bh);
1319 evacuate((StgClosure**)&bq->owner);
1320 evacuate((StgClosure**)&bq->queue);
1321 evacuate((StgClosure**)&bq->link);
1322 gct->eager_promotion = saved_eager_promotion;
1323
1324 if (gct->failed_to_evac) {
1325 bq->header.info = &stg_BLOCKING_QUEUE_DIRTY_info;
1326 } else {
1327 bq->header.info = &stg_BLOCKING_QUEUE_CLEAN_info;
1328 }
1329 break;
1330 }
1331
1332 case THUNK_SELECTOR:
1333 {
1334 StgSelector *s = (StgSelector *)p;
1335 evacuate(&s->selectee);
1336 break;
1337 }
1338
1339 case AP_STACK:
1340 {
1341 StgAP_STACK *ap = (StgAP_STACK *)p;
1342
1343 evacuate(&ap->fun);
1344 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
1345 p = (StgPtr)ap->payload + ap->size;
1346 break;
1347 }
1348
1349 case PAP:
1350 p = scavenge_PAP((StgPAP *)p);
1351 break;
1352
1353 case AP:
1354 p = scavenge_AP((StgAP *)p);
1355 break;
1356
1357 case ARR_WORDS:
1358 // nothing to follow
1359 break;
1360
1361 case MUT_ARR_PTRS_CLEAN:
1362 case MUT_ARR_PTRS_DIRTY:
1363 {
1364 // We don't eagerly promote objects pointed to by a mutable
1365 // array, but if we find the array only points to objects in
1366 // the same or an older generation, we mark it "clean" and
1367 // avoid traversing it during minor GCs.
1368 gct->eager_promotion = rtsFalse;
1369
1370 scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
1371
1372 if (gct->failed_to_evac) {
1373 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
1374 } else {
1375 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
1376 }
1377
1378 gct->eager_promotion = saved_eager_promotion;
1379 gct->failed_to_evac = rtsTrue;
1380 break;
1381 }
1382
1383 case MUT_ARR_PTRS_FROZEN:
1384 case MUT_ARR_PTRS_FROZEN0:
1385 {
1386 // follow everything
1387 scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
1388
1389 // If we're going to put this object on the mutable list, then
1390 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
1391 if (gct->failed_to_evac) {
1392 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
1393 } else {
1394 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
1395 }
1396 break;
1397 }
1398
1399 case SMALL_MUT_ARR_PTRS_CLEAN:
1400 case SMALL_MUT_ARR_PTRS_DIRTY:
1401 {
1402 StgPtr next, q;
1403 rtsBool saved_eager;
1404
1405 // We don't eagerly promote objects pointed to by a mutable
1406 // array, but if we find the array only points to objects in
1407 // the same or an older generation, we mark it "clean" and
1408 // avoid traversing it during minor GCs.
1409 saved_eager = gct->eager_promotion;
1410 gct->eager_promotion = rtsFalse;
1411 q = p;
1412 next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
1413 for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
1414 evacuate((StgClosure **)p);
1415 }
1416 gct->eager_promotion = saved_eager;
1417
1418 if (gct->failed_to_evac) {
1419 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_DIRTY_info;
1420 } else {
1421 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_CLEAN_info;
1422 }
1423
1424 gct->failed_to_evac = rtsTrue;
1425 break;
1426 }
1427
1428 case SMALL_MUT_ARR_PTRS_FROZEN:
1429 case SMALL_MUT_ARR_PTRS_FROZEN0:
1430 {
1431 // follow everything
1432 StgPtr next, q=p;
1433
1434 next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
1435 for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
1436 evacuate((StgClosure **)p);
1437 }
1438
1439 // If we're going to put this object on the mutable list, then
1440 // set its info ptr to SMALL_MUT_ARR_PTRS_FROZEN0 to indicate that.
1441 if (gct->failed_to_evac) {
1442 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN0_info;
1443 } else {
1444 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN_info;
1445 }
1446 break;
1447 }
1448
1449 case TSO:
1450 {
1451 scavengeTSO((StgTSO*)p);
1452 break;
1453 }
1454
1455 case STACK:
1456 {
1457 StgStack *stack = (StgStack*)p;
1458
1459 gct->eager_promotion = rtsFalse;
1460
1461 scavenge_stack(stack->sp, stack->stack + stack->stack_size);
1462 stack->dirty = gct->failed_to_evac;
1463
1464 gct->eager_promotion = saved_eager_promotion;
1465 break;
1466 }
1467
1468 case MUT_PRIM:
1469 {
1470 StgPtr end;
1471
1472 gct->eager_promotion = rtsFalse;
1473
1474 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1475 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
1476 evacuate((StgClosure **)p);
1477 }
1478
1479 gct->eager_promotion = saved_eager_promotion;
1480 gct->failed_to_evac = rtsTrue; // mutable
1481 break;
1482
1483 }
1484
1485 case TREC_CHUNK:
1486 {
1487 StgWord i;
1488 StgTRecChunk *tc = ((StgTRecChunk *) p);
1489 TRecEntry *e = &(tc -> entries[0]);
1490 gct->eager_promotion = rtsFalse;
1491 evacuate((StgClosure **)&tc->prev_chunk);
1492 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
1493 evacuate((StgClosure **)&e->tvar);
1494 evacuate((StgClosure **)&e->expected_value);
1495 evacuate((StgClosure **)&e->new_value);
1496 }
1497 gct->eager_promotion = saved_eager_promotion;
1498 gct->failed_to_evac = rtsTrue; // mutable
1499 break;
1500 }
1501
1502 case IND:
1503 // IND can happen, for example, when the interpreter allocates
1504 // a gigantic AP closure (more than one block), which ends up
1505 // on the large-object list and then gets updated. See #3424.
1506 case BLACKHOLE:
1507 case IND_STATIC:
1508 evacuate(&((StgInd *)p)->indirectee);
1509
1510 #if 0 && defined(DEBUG)
1511 if (RtsFlags.DebugFlags.gc)
1512 /* Debugging code to print out the size of the thing we just
1513 * promoted
1514 */
1515 {
1516 StgPtr start = gen->scan;
1517 bdescr *start_bd = gen->scan_bd;
1518 StgWord size = 0;
1519 scavenge(&gen);
1520 if (start_bd != gen->scan_bd) {
1521 size += (P_)BLOCK_ROUND_UP(start) - start;
1522 start_bd = start_bd->link;
1523 while (start_bd != gen->scan_bd) {
1524 size += BLOCK_SIZE_W;
1525 start_bd = start_bd->link;
1526 }
1527 size += gen->scan -
1528 (P_)BLOCK_ROUND_DOWN(gen->scan);
1529 } else {
1530 size = gen->scan - start;
1531 }
1532 debugBelch("evac IND_OLDGEN: %ld bytes", size * sizeof(W_));
1533 }
1534 #endif
1535 break;
1536
1537 default:
1538 barf("scavenge_one: strange object %d", (int)(info->type));
1539 }
1540
1541 no_luck = gct->failed_to_evac;
1542 gct->failed_to_evac = rtsFalse;
1543 return (no_luck);
1544 }
1545
1546 /* -----------------------------------------------------------------------------
1547 Scavenging mutable lists.
1548
1549 We treat the mutable list of each generation > N (i.e. all the
1550 generations older than the one being collected) as roots. We also
1551 remove non-mutable objects from the mutable list at this point.
1552 -------------------------------------------------------------------------- */
1553
1554 void
1555 scavenge_mutable_list(bdescr *bd, generation *gen)
1556 {
1557 StgPtr p, q;
1558 nat gen_no;
1559
1560 gen_no = gen->no;
1561 gct->evac_gen_no = gen_no;
1562 for (; bd != NULL; bd = bd->link) {
1563 for (q = bd->start; q < bd->free; q++) {
1564 p = (StgPtr)*q;
1565 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1566
1567 #ifdef DEBUG
1568 switch (get_itbl((StgClosure *)p)->type) {
1569 case MUT_VAR_CLEAN:
1570 // can happen due to concurrent writeMutVars
1571 case MUT_VAR_DIRTY:
1572 mutlist_MUTVARS++; break;
1573 case MUT_ARR_PTRS_CLEAN:
1574 case MUT_ARR_PTRS_DIRTY:
1575 case MUT_ARR_PTRS_FROZEN:
1576 case MUT_ARR_PTRS_FROZEN0:
1577 mutlist_MUTARRS++; break;
1578 case MVAR_CLEAN:
1579 barf("MVAR_CLEAN on mutable list");
1580 case MVAR_DIRTY:
1581 mutlist_MVARS++; break;
1582 case TVAR:
1583 mutlist_TVAR++; break;
1584 case TREC_CHUNK:
1585 mutlist_TREC_CHUNK++; break;
1586 case MUT_PRIM:
1587 if (((StgClosure*)p)->header.info == &stg_TVAR_WATCH_QUEUE_info)
1588 mutlist_TVAR_WATCH_QUEUE++;
1589 else if (((StgClosure*)p)->header.info == &stg_TREC_HEADER_info)
1590 mutlist_TREC_HEADER++;
1591 else if (((StgClosure*)p)->header.info == &stg_ATOMIC_INVARIANT_info)
1592 mutlist_ATOMIC_INVARIANT++;
1593 else if (((StgClosure*)p)->header.info == &stg_INVARIANT_CHECK_QUEUE_info)
1594 mutlist_INVARIANT_CHECK_QUEUE++;
1595 else
1596 mutlist_OTHERS++;
1597 break;
1598 default:
1599 mutlist_OTHERS++; break;
1600 }
1601 #endif
1602
1603 // Check whether this object is "clean", that is it
1604 // definitely doesn't point into a young generation.
1605 // Clean objects don't need to be scavenged. Some clean
1606 // objects (MUT_VAR_CLEAN) are not kept on the mutable
1607 // list at all; others, such as MUT_ARR_PTRS
1608 // are always on the mutable list.
1609 //
1610 switch (get_itbl((StgClosure *)p)->type) {
1611 case MUT_ARR_PTRS_CLEAN:
1612 recordMutableGen_GC((StgClosure *)p,gen_no);
1613 continue;
1614 case MUT_ARR_PTRS_DIRTY:
1615 {
1616 rtsBool saved_eager_promotion;
1617 saved_eager_promotion = gct->eager_promotion;
1618 gct->eager_promotion = rtsFalse;
1619
1620 scavenge_mut_arr_ptrs_marked((StgMutArrPtrs *)p);
1621
1622 if (gct->failed_to_evac) {
1623 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
1624 } else {
1625 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
1626 }
1627
1628 gct->eager_promotion = saved_eager_promotion;
1629 gct->failed_to_evac = rtsFalse;
1630 recordMutableGen_GC((StgClosure *)p,gen_no);
1631 continue;
1632 }
1633 default:
1634 ;
1635 }
1636
1637 if (scavenge_one(p)) {
1638 // didn't manage to promote everything, so put the
1639 // object back on the list.
1640 recordMutableGen_GC((StgClosure *)p,gen_no);
1641 }
1642 }
1643 }
1644 }
1645
1646 void
1647 scavenge_capability_mut_lists (Capability *cap)
1648 {
1649 nat g;
1650
1651 /* Mutable lists from each generation > N
1652 * we want to *scavenge* these roots, not evacuate them: they're not
1653 * going to move in this GC.
1654 * Also do them in reverse generation order, for the usual reason:
1655 * namely to reduce the likelihood of spurious old->new pointers.
1656 */
1657 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
1658 scavenge_mutable_list(cap->saved_mut_lists[g], &generations[g]);
1659 freeChain_sync(cap->saved_mut_lists[g]);
1660 cap->saved_mut_lists[g] = NULL;
1661 }
1662 }
1663
1664 /* -----------------------------------------------------------------------------
1665 Scavenging the static objects.
1666
1667 We treat the mutable list of each generation > N (i.e. all the
1668 generations older than the one being collected) as roots. We also
1669 remove non-mutable objects from the mutable list at this point.
1670 -------------------------------------------------------------------------- */
1671
1672 static void
1673 scavenge_static(void)
1674 {
1675 StgClosure* p;
1676 const StgInfoTable *info;
1677
1678 debugTrace(DEBUG_gc, "scavenging static objects");
1679
1680 /* Always evacuate straight to the oldest generation for static
1681 * objects */
1682 gct->evac_gen_no = oldest_gen->no;
1683
1684 /* keep going until we've scavenged all the objects on the linked
1685 list... */
1686
1687 while (1) {
1688
1689 /* get the next static object from the list. Remember, there might
1690 * be more stuff on this list after each evacuation...
1691 * (static_objects is a global)
1692 */
1693 p = gct->static_objects;
1694 if (p == END_OF_STATIC_LIST) {
1695 break;
1696 }
1697
1698 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1699 info = get_itbl(p);
1700 /*
1701 if (info->type==RBH)
1702 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
1703 */
1704 // make sure the info pointer is into text space
1705
1706 /* Take this object *off* the static_objects list,
1707 * and put it on the scavenged_static_objects list.
1708 */
1709 gct->static_objects = *STATIC_LINK(info,p);
1710 *STATIC_LINK(info,p) = gct->scavenged_static_objects;
1711 gct->scavenged_static_objects = p;
1712
1713 switch (info -> type) {
1714
1715 case IND_STATIC:
1716 {
1717 StgInd *ind = (StgInd *)p;
1718 evacuate(&ind->indirectee);
1719
1720 /* might fail to evacuate it, in which case we have to pop it
1721 * back on the mutable list of the oldest generation. We
1722 * leave it *on* the scavenged_static_objects list, though,
1723 * in case we visit this object again.
1724 */
1725 if (gct->failed_to_evac) {
1726 gct->failed_to_evac = rtsFalse;
1727 recordMutableGen_GC((StgClosure *)p,oldest_gen->no);
1728 }
1729 break;
1730 }
1731
1732 case THUNK_STATIC:
1733 scavenge_thunk_srt(info);
1734 break;
1735
1736 case FUN_STATIC:
1737 scavenge_fun_srt(info);
1738 break;
1739
1740 case CONSTR_STATIC:
1741 {
1742 StgPtr q, next;
1743
1744 next = (P_)p->payload + info->layout.payload.ptrs;
1745 // evacuate the pointers
1746 for (q = (P_)p->payload; q < next; q++) {
1747 evacuate((StgClosure **)q);
1748 }
1749 break;
1750 }
1751
1752 default:
1753 barf("scavenge_static: strange closure %d", (int)(info->type));
1754 }
1755
1756 ASSERT(gct->failed_to_evac == rtsFalse);
1757 }
1758 }
1759
1760 /* -----------------------------------------------------------------------------
1761 scavenge a chunk of memory described by a bitmap
1762 -------------------------------------------------------------------------- */
1763
1764 static void
1765 scavenge_large_bitmap( StgPtr p, StgLargeBitmap *large_bitmap, StgWord size )
1766 {
1767 nat i, j, b;
1768 StgWord bitmap;
1769
1770 b = 0;
1771
1772 for (i = 0; i < size; b++) {
1773 bitmap = large_bitmap->bitmap[b];
1774 j = stg_min(size-i, BITS_IN(W_));
1775 i += j;
1776 for (; j > 0; j--, p++) {
1777 if ((bitmap & 1) == 0) {
1778 evacuate((StgClosure **)p);
1779 }
1780 bitmap = bitmap >> 1;
1781 }
1782 }
1783 }
1784
1785
1786 /* -----------------------------------------------------------------------------
1787 scavenge_stack walks over a section of stack and evacuates all the
1788 objects pointed to by it. We can use the same code for walking
1789 AP_STACK_UPDs, since these are just sections of copied stack.
1790 -------------------------------------------------------------------------- */
1791
1792 static void
1793 scavenge_stack(StgPtr p, StgPtr stack_end)
1794 {
1795 const StgRetInfoTable* info;
1796 StgWord bitmap;
1797 StgWord size;
1798
1799 /*
1800 * Each time around this loop, we are looking at a chunk of stack
1801 * that starts with an activation record.
1802 */
1803
1804 while (p < stack_end) {
1805 info = get_ret_itbl((StgClosure *)p);
1806
1807 switch (info->i.type) {
1808
1809 case UPDATE_FRAME:
1810 // Note [upd-black-hole]
1811 // In SMP, we can get update frames that point to indirections
1812 // when two threads evaluate the same thunk. We do attempt to
1813 // discover this situation in threadPaused(), but it's
1814 // possible that the following sequence occurs:
1815 //
1816 // A B
1817 // enter T
1818 // enter T
1819 // blackhole T
1820 // update T
1821 // GC
1822 //
1823 // Now T is an indirection, and the update frame is already
1824 // marked on A's stack, so we won't traverse it again in
1825 // threadPaused(). We could traverse the whole stack again
1826 // before GC, but that seems like overkill.
1827 //
1828 // Scavenging this update frame as normal would be disastrous;
1829 // the updatee would end up pointing to the value. So we
1830 // check whether the value after evacuation is a BLACKHOLE,
1831 // and if not, we change the update frame to an stg_enter
1832 // frame that simply returns the value. Hence, blackholing is
1833 // compulsory (otherwise we would have to check for thunks
1834 // too).
1835 //
1836 // One slight hiccup is that the THUNK_SELECTOR machinery can
1837 // overwrite the updatee with an IND. In parallel GC, this
1838 // could even be happening concurrently, so we can't check for
1839 // the IND. Fortunately if we assume that blackholing is
1840 // happening (either lazy or eager), then we can be sure that
1841 // the updatee is never a THUNK_SELECTOR and we're ok.
1842 // NB. this is a new invariant: blackholing is not optional.
1843 {
1844 StgUpdateFrame *frame = (StgUpdateFrame *)p;
1845 StgClosure *v;
1846
1847 evacuate(&frame->updatee);
1848 v = frame->updatee;
1849 if (GET_CLOSURE_TAG(v) != 0 ||
1850 (get_itbl(v)->type != BLACKHOLE)) {
1851 // blackholing is compulsory, see above.
1852 frame->header.info = (const StgInfoTable*)&stg_enter_checkbh_info;
1853 }
1854 ASSERT(v->header.info != &stg_TSO_info);
1855 p += sizeofW(StgUpdateFrame);
1856 continue;
1857 }
1858
1859 // small bitmap (< 32 entries, or 64 on a 64-bit machine)
1860 case CATCH_STM_FRAME:
1861 case CATCH_RETRY_FRAME:
1862 case ATOMICALLY_FRAME:
1863 case UNDERFLOW_FRAME:
1864 case STOP_FRAME:
1865 case CATCH_FRAME:
1866 case RET_SMALL:
1867 bitmap = BITMAP_BITS(info->i.layout.bitmap);
1868 size = BITMAP_SIZE(info->i.layout.bitmap);
1869 // NOTE: the payload starts immediately after the info-ptr, we
1870 // don't have an StgHeader in the same sense as a heap closure.
1871 p++;
1872 p = scavenge_small_bitmap(p, size, bitmap);
1873
1874 follow_srt:
1875 if (major_gc)
1876 scavenge_srt((StgClosure **)GET_SRT(info), info->i.srt_bitmap);
1877 continue;
1878
1879 case RET_BCO: {
1880 StgBCO *bco;
1881 StgWord size;
1882
1883 p++;
1884 evacuate((StgClosure **)p);
1885 bco = (StgBCO *)*p;
1886 p++;
1887 size = BCO_BITMAP_SIZE(bco);
1888 scavenge_large_bitmap(p, BCO_BITMAP(bco), size);
1889 p += size;
1890 continue;
1891 }
1892
1893 // large bitmap (> 32 entries, or > 64 on a 64-bit machine)
1894 case RET_BIG:
1895 {
1896 StgWord size;
1897
1898 size = GET_LARGE_BITMAP(&info->i)->size;
1899 p++;
1900 scavenge_large_bitmap(p, GET_LARGE_BITMAP(&info->i), size);
1901 p += size;
1902 // and don't forget to follow the SRT
1903 goto follow_srt;
1904 }
1905
1906 case RET_FUN:
1907 {
1908 StgRetFun *ret_fun = (StgRetFun *)p;
1909 StgFunInfoTable *fun_info;
1910
1911 evacuate(&ret_fun->fun);
1912 fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun));
1913 p = scavenge_arg_block(fun_info, ret_fun->payload);
1914 goto follow_srt;
1915 }
1916
1917 default:
1918 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->i.type));
1919 }
1920 }
1921 }
1922
1923 /*-----------------------------------------------------------------------------
1924 scavenge the large object list.
1925
1926 evac_gen set by caller; similar games played with evac_gen as with
1927 scavenge() - see comment at the top of scavenge(). Most large
1928 objects are (repeatedly) mutable, so most of the time evac_gen will
1929 be zero.
1930 --------------------------------------------------------------------------- */
1931
1932 static void
1933 scavenge_large (gen_workspace *ws)
1934 {
1935 bdescr *bd;
1936 StgPtr p;
1937
1938 gct->evac_gen_no = ws->gen->no;
1939
1940 bd = ws->todo_large_objects;
1941
1942 for (; bd != NULL; bd = ws->todo_large_objects) {
1943
1944 // take this object *off* the large objects list and put it on
1945 // the scavenged large objects list. This is so that we can
1946 // treat new_large_objects as a stack and push new objects on
1947 // the front when evacuating.
1948 ws->todo_large_objects = bd->link;
1949
1950 ACQUIRE_SPIN_LOCK(&ws->gen->sync);
1951 dbl_link_onto(bd, &ws->gen->scavenged_large_objects);
1952 ws->gen->n_scavenged_large_blocks += bd->blocks;
1953 RELEASE_SPIN_LOCK(&ws->gen->sync);
1954
1955 p = bd->start;
1956 if (scavenge_one(p)) {
1957 if (ws->gen->no > 0) {
1958 recordMutableGen_GC((StgClosure *)p, ws->gen->no);
1959 }
1960 }
1961
1962 // stats
1963 gct->scanned += closure_sizeW((StgClosure*)p);
1964 }
1965 }
1966
1967 /* ----------------------------------------------------------------------------
1968 Look for work to do.
1969
1970 We look for the oldest gen that has either a todo block that can
1971 be scanned, or a block of work on the global queue that we can
1972 scan.
1973
1974 It is important to take work from the *oldest* generation that we
1975 has work available, because that minimizes the likelihood of
1976 evacuating objects into a young generation when they should have
1977 been eagerly promoted. This really does make a difference (the
1978 cacheprof benchmark is one that is affected).
1979
1980 We also want to scan the todo block if possible before grabbing
1981 work from the global queue, the reason being that we don't want to
1982 steal work from the global queue and starve other threads if there
1983 is other work we can usefully be doing.
1984 ------------------------------------------------------------------------- */
1985
1986 static rtsBool
1987 scavenge_find_work (void)
1988 {
1989 int g;
1990 gen_workspace *ws;
1991 rtsBool did_something, did_anything;
1992 bdescr *bd;
1993
1994 gct->scav_find_work++;
1995
1996 did_anything = rtsFalse;
1997
1998 loop:
1999 did_something = rtsFalse;
2000 for (g = RtsFlags.GcFlags.generations-1; g >= 0; g--) {
2001 ws = &gct->gens[g];
2002
2003 gct->scan_bd = NULL;
2004
2005 // If we have a scan block with some work to do,
2006 // scavenge everything up to the free pointer.
2007 if (ws->todo_bd->u.scan < ws->todo_free)
2008 {
2009 scavenge_block(ws->todo_bd);
2010 did_something = rtsTrue;
2011 break;
2012 }
2013
2014 // If we have any large objects to scavenge, do them now.
2015 if (ws->todo_large_objects) {
2016 scavenge_large(ws);
2017 did_something = rtsTrue;
2018 break;
2019 }
2020
2021 if ((bd = grab_local_todo_block(ws)) != NULL) {
2022 scavenge_block(bd);
2023 did_something = rtsTrue;
2024 break;
2025 }
2026 }
2027
2028 if (did_something) {
2029 did_anything = rtsTrue;
2030 goto loop;
2031 }
2032
2033 #if defined(THREADED_RTS)
2034 if (work_stealing) {
2035 // look for work to steal
2036 for (g = RtsFlags.GcFlags.generations-1; g >= 0; g--) {
2037 if ((bd = steal_todo_block(g)) != NULL) {
2038 scavenge_block(bd);
2039 did_something = rtsTrue;
2040 break;
2041 }
2042 }
2043
2044 if (did_something) {
2045 did_anything = rtsTrue;
2046 goto loop;
2047 }
2048 }
2049 #endif
2050
2051 // only return when there is no more work to do
2052
2053 return did_anything;
2054 }
2055
2056 /* ----------------------------------------------------------------------------
2057 Scavenge until we can't find anything more to scavenge.
2058 ------------------------------------------------------------------------- */
2059
2060 void
2061 scavenge_loop(void)
2062 {
2063 rtsBool work_to_do;
2064
2065 loop:
2066 work_to_do = rtsFalse;
2067
2068 // scavenge static objects
2069 if (major_gc && gct->static_objects != END_OF_STATIC_LIST) {
2070 IF_DEBUG(sanity, checkStaticObjects(gct->static_objects));
2071 scavenge_static();
2072 }
2073
2074 // scavenge objects in compacted generation
2075 if (mark_stack_bd != NULL && !mark_stack_empty()) {
2076 scavenge_mark_stack();
2077 work_to_do = rtsTrue;
2078 }
2079
2080 // Order is important here: we want to deal in full blocks as
2081 // much as possible, so go for global work in preference to
2082 // local work. Only if all the global work has been exhausted
2083 // do we start scavenging the fragments of blocks in the local
2084 // workspaces.
2085 if (scavenge_find_work()) goto loop;
2086
2087 if (work_to_do) goto loop;
2088 }