Remove unused IND_PERM
[ghc.git] / rts / sm / Scav.c
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team 1998-2008
4 *
5 * Generational garbage collector: scavenging functions
6 *
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
9 *
10 * http://ghc.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/GC
11 *
12 * ---------------------------------------------------------------------------*/
13
14 #include "PosixSource.h"
15 #include "Rts.h"
16
17 #include "Storage.h"
18 #include "GC.h"
19 #include "GCThread.h"
20 #include "GCUtils.h"
21 #include "Compact.h"
22 #include "MarkStack.h"
23 #include "Evac.h"
24 #include "Scav.h"
25 #include "Apply.h"
26 #include "Trace.h"
27 #include "Sanity.h"
28 #include "Capability.h"
29 #include "LdvProfile.h"
30
31 static void scavenge_stack (StgPtr p, StgPtr stack_end);
32
33 static void scavenge_large_bitmap (StgPtr p,
34 StgLargeBitmap *large_bitmap,
35 StgWord size );
36
37 #if defined(THREADED_RTS) && !defined(PARALLEL_GC)
38 # define evacuate(a) evacuate1(a)
39 # define scavenge_loop(a) scavenge_loop1(a)
40 # define scavenge_block(a) scavenge_block1(a)
41 # define scavenge_mutable_list(bd,g) scavenge_mutable_list1(bd,g)
42 # define scavenge_capability_mut_lists(cap) scavenge_capability_mut_Lists1(cap)
43 #endif
44
45 /* -----------------------------------------------------------------------------
46 Scavenge a TSO.
47 -------------------------------------------------------------------------- */
48
49 static void
50 scavengeTSO (StgTSO *tso)
51 {
52 rtsBool saved_eager;
53
54 debugTrace(DEBUG_gc,"scavenging thread %d",(int)tso->id);
55
56 // update the pointer from the InCall.
57 if (tso->bound != NULL) {
58 // NB. We can't just set tso->bound->tso = tso, because this
59 // might be an invalid copy the TSO resulting from multiple
60 // threads evacuating the TSO simultaneously (see
61 // Evac.c:copy_tag()). Calling evacuate() on this pointer
62 // will ensure that we update it to point to the correct copy.
63 evacuate((StgClosure **)&tso->bound->tso);
64 }
65
66 saved_eager = gct->eager_promotion;
67 gct->eager_promotion = rtsFalse;
68
69 evacuate((StgClosure **)&tso->blocked_exceptions);
70 evacuate((StgClosure **)&tso->bq);
71
72 // scavange current transaction record
73 evacuate((StgClosure **)&tso->trec);
74
75 evacuate((StgClosure **)&tso->stackobj);
76
77 evacuate((StgClosure **)&tso->_link);
78 if ( tso->why_blocked == BlockedOnMVar
79 || tso->why_blocked == BlockedOnMVarRead
80 || tso->why_blocked == BlockedOnBlackHole
81 || tso->why_blocked == BlockedOnMsgThrowTo
82 || tso->why_blocked == NotBlocked
83 ) {
84 evacuate(&tso->block_info.closure);
85 }
86 #ifdef THREADED_RTS
87 // in the THREADED_RTS, block_info.closure must always point to a
88 // valid closure, because we assume this in throwTo(). In the
89 // non-threaded RTS it might be a FD (for
90 // BlockedOnRead/BlockedOnWrite) or a time value (BlockedOnDelay)
91 else {
92 tso->block_info.closure = (StgClosure *)END_TSO_QUEUE;
93 }
94 #endif
95
96 tso->dirty = gct->failed_to_evac;
97
98 gct->eager_promotion = saved_eager;
99 }
100
101 /* -----------------------------------------------------------------------------
102 Mutable arrays of pointers
103 -------------------------------------------------------------------------- */
104
105 static StgPtr scavenge_mut_arr_ptrs (StgMutArrPtrs *a)
106 {
107 W_ m;
108 rtsBool any_failed;
109 StgPtr p, q;
110
111 any_failed = rtsFalse;
112 p = (StgPtr)&a->payload[0];
113 for (m = 0; (int)m < (int)mutArrPtrsCards(a->ptrs) - 1; m++)
114 {
115 q = p + (1 << MUT_ARR_PTRS_CARD_BITS);
116 for (; p < q; p++) {
117 evacuate((StgClosure**)p);
118 }
119 if (gct->failed_to_evac) {
120 any_failed = rtsTrue;
121 *mutArrPtrsCard(a,m) = 1;
122 gct->failed_to_evac = rtsFalse;
123 } else {
124 *mutArrPtrsCard(a,m) = 0;
125 }
126 }
127
128 q = (StgPtr)&a->payload[a->ptrs];
129 if (p < q) {
130 for (; p < q; p++) {
131 evacuate((StgClosure**)p);
132 }
133 if (gct->failed_to_evac) {
134 any_failed = rtsTrue;
135 *mutArrPtrsCard(a,m) = 1;
136 gct->failed_to_evac = rtsFalse;
137 } else {
138 *mutArrPtrsCard(a,m) = 0;
139 }
140 }
141
142 gct->failed_to_evac = any_failed;
143 return (StgPtr)a + mut_arr_ptrs_sizeW(a);
144 }
145
146 // scavenge only the marked areas of a MUT_ARR_PTRS
147 static StgPtr scavenge_mut_arr_ptrs_marked (StgMutArrPtrs *a)
148 {
149 W_ m;
150 StgPtr p, q;
151 rtsBool any_failed;
152
153 any_failed = rtsFalse;
154 for (m = 0; m < mutArrPtrsCards(a->ptrs); m++)
155 {
156 if (*mutArrPtrsCard(a,m) != 0) {
157 p = (StgPtr)&a->payload[m << MUT_ARR_PTRS_CARD_BITS];
158 q = stg_min(p + (1 << MUT_ARR_PTRS_CARD_BITS),
159 (StgPtr)&a->payload[a->ptrs]);
160 for (; p < q; p++) {
161 evacuate((StgClosure**)p);
162 }
163 if (gct->failed_to_evac) {
164 any_failed = rtsTrue;
165 gct->failed_to_evac = rtsFalse;
166 } else {
167 *mutArrPtrsCard(a,m) = 0;
168 }
169 }
170 }
171
172 gct->failed_to_evac = any_failed;
173 return (StgPtr)a + mut_arr_ptrs_sizeW(a);
174 }
175
176 STATIC_INLINE StgPtr
177 scavenge_small_bitmap (StgPtr p, StgWord size, StgWord bitmap)
178 {
179 while (size > 0) {
180 if ((bitmap & 1) == 0) {
181 evacuate((StgClosure **)p);
182 }
183 p++;
184 bitmap = bitmap >> 1;
185 size--;
186 }
187 return p;
188 }
189
190 /* -----------------------------------------------------------------------------
191 Blocks of function args occur on the stack (at the top) and
192 in PAPs.
193 -------------------------------------------------------------------------- */
194
195 STATIC_INLINE StgPtr
196 scavenge_arg_block (StgFunInfoTable *fun_info, StgClosure **args)
197 {
198 StgPtr p;
199 StgWord bitmap;
200 StgWord size;
201
202 p = (StgPtr)args;
203 switch (fun_info->f.fun_type) {
204 case ARG_GEN:
205 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
206 size = BITMAP_SIZE(fun_info->f.b.bitmap);
207 goto small_bitmap;
208 case ARG_GEN_BIG:
209 size = GET_FUN_LARGE_BITMAP(fun_info)->size;
210 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
211 p += size;
212 break;
213 default:
214 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
215 size = BITMAP_SIZE(stg_arg_bitmaps[fun_info->f.fun_type]);
216 small_bitmap:
217 p = scavenge_small_bitmap(p, size, bitmap);
218 break;
219 }
220 return p;
221 }
222
223 STATIC_INLINE GNUC_ATTR_HOT StgPtr
224 scavenge_PAP_payload (StgClosure *fun, StgClosure **payload, StgWord size)
225 {
226 StgPtr p;
227 StgWord bitmap;
228 StgFunInfoTable *fun_info;
229
230 fun_info = get_fun_itbl(UNTAG_CLOSURE(fun));
231 ASSERT(fun_info->i.type != PAP);
232 p = (StgPtr)payload;
233
234 switch (fun_info->f.fun_type) {
235 case ARG_GEN:
236 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
237 goto small_bitmap;
238 case ARG_GEN_BIG:
239 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
240 p += size;
241 break;
242 case ARG_BCO:
243 scavenge_large_bitmap((StgPtr)payload, BCO_BITMAP(fun), size);
244 p += size;
245 break;
246 default:
247 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
248 small_bitmap:
249 p = scavenge_small_bitmap(p, size, bitmap);
250 break;
251 }
252 return p;
253 }
254
255 STATIC_INLINE GNUC_ATTR_HOT StgPtr
256 scavenge_PAP (StgPAP *pap)
257 {
258 evacuate(&pap->fun);
259 return scavenge_PAP_payload (pap->fun, pap->payload, pap->n_args);
260 }
261
262 STATIC_INLINE StgPtr
263 scavenge_AP (StgAP *ap)
264 {
265 evacuate(&ap->fun);
266 return scavenge_PAP_payload (ap->fun, ap->payload, ap->n_args);
267 }
268
269 /* -----------------------------------------------------------------------------
270 Scavenge SRTs
271 -------------------------------------------------------------------------- */
272
273 /* Similar to scavenge_large_bitmap(), but we don't write back the
274 * pointers we get back from evacuate().
275 */
276 static void
277 scavenge_large_srt_bitmap( StgLargeSRT *large_srt )
278 {
279 nat i, j, size;
280 StgWord bitmap;
281 StgClosure **p;
282
283 size = (nat)large_srt->l.size;
284 p = (StgClosure **)large_srt->srt;
285
286 for (i = 0; i < size / BITS_IN(W_); i++) {
287 bitmap = large_srt->l.bitmap[i];
288 // skip zero words: bitmaps can be very sparse, and this helps
289 // performance a lot in some cases.
290 if (bitmap != 0) {
291 for (j = 0; j < BITS_IN(W_); j++) {
292 if ((bitmap & 1) != 0) {
293 evacuate(p);
294 }
295 p++;
296 bitmap = bitmap >> 1;
297 }
298 } else {
299 p += BITS_IN(W_);
300 }
301 }
302 if (size % BITS_IN(W_) != 0) {
303 bitmap = large_srt->l.bitmap[i];
304 for (j = 0; j < size % BITS_IN(W_); j++) {
305 if ((bitmap & 1) != 0) {
306 evacuate(p);
307 }
308 p++;
309 bitmap = bitmap >> 1;
310 }
311 }
312 }
313
314 /* evacuate the SRT. If srt_bitmap is zero, then there isn't an
315 * srt field in the info table. That's ok, because we'll
316 * never dereference it.
317 */
318 STATIC_INLINE GNUC_ATTR_HOT void
319 scavenge_srt (StgClosure **srt, nat srt_bitmap)
320 {
321 nat bitmap;
322 StgClosure **p;
323
324 bitmap = srt_bitmap;
325 p = srt;
326
327 if (bitmap == (StgHalfWord)(-1)) {
328 scavenge_large_srt_bitmap( (StgLargeSRT *)srt );
329 return;
330 }
331
332 while (bitmap != 0) {
333 if ((bitmap & 1) != 0) {
334 #if defined(COMPILING_WINDOWS_DLL)
335 // Special-case to handle references to closures hiding out in DLLs, since
336 // double indirections required to get at those. The code generator knows
337 // which is which when generating the SRT, so it stores the (indirect)
338 // reference to the DLL closure in the table by first adding one to it.
339 // We check for this here, and undo the addition before evacuating it.
340 //
341 // If the SRT entry hasn't got bit 0 set, the SRT entry points to a
342 // closure that's fixed at link-time, and no extra magic is required.
343 if ( (W_)(*srt) & 0x1 ) {
344 evacuate( (StgClosure**) ((W_) (*srt) & ~0x1));
345 } else {
346 evacuate(p);
347 }
348 #else
349 evacuate(p);
350 #endif
351 }
352 p++;
353 bitmap = bitmap >> 1;
354 }
355 }
356
357
358 STATIC_INLINE GNUC_ATTR_HOT void
359 scavenge_thunk_srt(const StgInfoTable *info)
360 {
361 StgThunkInfoTable *thunk_info;
362 nat bitmap;
363
364 if (!major_gc) return;
365
366 thunk_info = itbl_to_thunk_itbl(info);
367 bitmap = thunk_info->i.srt_bitmap;
368 if (bitmap) {
369 // don't read srt_offset if bitmap==0, because it doesn't exist
370 // and so the memory might not be readable.
371 scavenge_srt((StgClosure **)GET_SRT(thunk_info), bitmap);
372 }
373 }
374
375 STATIC_INLINE GNUC_ATTR_HOT void
376 scavenge_fun_srt(const StgInfoTable *info)
377 {
378 StgFunInfoTable *fun_info;
379 nat bitmap;
380
381 if (!major_gc) return;
382
383 fun_info = itbl_to_fun_itbl(info);
384 bitmap = fun_info->i.srt_bitmap;
385 if (bitmap) {
386 // don't read srt_offset if bitmap==0, because it doesn't exist
387 // and so the memory might not be readable.
388 scavenge_srt((StgClosure **)GET_FUN_SRT(fun_info), bitmap);
389 }
390 }
391
392 /* -----------------------------------------------------------------------------
393 Scavenge a block from the given scan pointer up to bd->free.
394
395 evac_gen_no is set by the caller to be either zero (for a step in a
396 generation < N) or G where G is the generation of the step being
397 scavenged.
398
399 We sometimes temporarily change evac_gen_no back to zero if we're
400 scavenging a mutable object where eager promotion isn't such a good
401 idea.
402 -------------------------------------------------------------------------- */
403
404 static GNUC_ATTR_HOT void
405 scavenge_block (bdescr *bd)
406 {
407 StgPtr p, q;
408 StgInfoTable *info;
409 rtsBool saved_eager_promotion;
410 gen_workspace *ws;
411
412 debugTrace(DEBUG_gc, "scavenging block %p (gen %d) @ %p",
413 bd->start, bd->gen_no, bd->u.scan);
414
415 gct->scan_bd = bd;
416 gct->evac_gen_no = bd->gen_no;
417 saved_eager_promotion = gct->eager_promotion;
418 gct->failed_to_evac = rtsFalse;
419
420 ws = &gct->gens[bd->gen->no];
421
422 p = bd->u.scan;
423
424 // we might be evacuating into the very object that we're
425 // scavenging, so we have to check the real bd->free pointer each
426 // time around the loop.
427 while (p < bd->free || (bd == ws->todo_bd && p < ws->todo_free)) {
428
429 ASSERT(bd->link == NULL);
430 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
431 info = get_itbl((StgClosure *)p);
432
433 ASSERT(gct->thunk_selector_depth == 0);
434
435 q = p;
436 switch (info->type) {
437
438 case MVAR_CLEAN:
439 case MVAR_DIRTY:
440 {
441 StgMVar *mvar = ((StgMVar *)p);
442 gct->eager_promotion = rtsFalse;
443 evacuate((StgClosure **)&mvar->head);
444 evacuate((StgClosure **)&mvar->tail);
445 evacuate((StgClosure **)&mvar->value);
446 gct->eager_promotion = saved_eager_promotion;
447
448 if (gct->failed_to_evac) {
449 mvar->header.info = &stg_MVAR_DIRTY_info;
450 } else {
451 mvar->header.info = &stg_MVAR_CLEAN_info;
452 }
453 p += sizeofW(StgMVar);
454 break;
455 }
456
457 case TVAR:
458 {
459 StgTVar *tvar = ((StgTVar *)p);
460 gct->eager_promotion = rtsFalse;
461 evacuate((StgClosure **)&tvar->current_value);
462 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
463 gct->eager_promotion = saved_eager_promotion;
464
465 if (gct->failed_to_evac) {
466 tvar->header.info = &stg_TVAR_DIRTY_info;
467 } else {
468 tvar->header.info = &stg_TVAR_CLEAN_info;
469 }
470 p += sizeofW(StgTVar);
471 break;
472 }
473
474 case FUN_2_0:
475 scavenge_fun_srt(info);
476 evacuate(&((StgClosure *)p)->payload[1]);
477 evacuate(&((StgClosure *)p)->payload[0]);
478 p += sizeofW(StgHeader) + 2;
479 break;
480
481 case THUNK_2_0:
482 scavenge_thunk_srt(info);
483 evacuate(&((StgThunk *)p)->payload[1]);
484 evacuate(&((StgThunk *)p)->payload[0]);
485 p += sizeofW(StgThunk) + 2;
486 break;
487
488 case CONSTR_2_0:
489 evacuate(&((StgClosure *)p)->payload[1]);
490 evacuate(&((StgClosure *)p)->payload[0]);
491 p += sizeofW(StgHeader) + 2;
492 break;
493
494 case THUNK_1_0:
495 scavenge_thunk_srt(info);
496 evacuate(&((StgThunk *)p)->payload[0]);
497 p += sizeofW(StgThunk) + 1;
498 break;
499
500 case FUN_1_0:
501 scavenge_fun_srt(info);
502 case CONSTR_1_0:
503 evacuate(&((StgClosure *)p)->payload[0]);
504 p += sizeofW(StgHeader) + 1;
505 break;
506
507 case THUNK_0_1:
508 scavenge_thunk_srt(info);
509 p += sizeofW(StgThunk) + 1;
510 break;
511
512 case FUN_0_1:
513 scavenge_fun_srt(info);
514 case CONSTR_0_1:
515 p += sizeofW(StgHeader) + 1;
516 break;
517
518 case THUNK_0_2:
519 scavenge_thunk_srt(info);
520 p += sizeofW(StgThunk) + 2;
521 break;
522
523 case FUN_0_2:
524 scavenge_fun_srt(info);
525 case CONSTR_0_2:
526 p += sizeofW(StgHeader) + 2;
527 break;
528
529 case THUNK_1_1:
530 scavenge_thunk_srt(info);
531 evacuate(&((StgThunk *)p)->payload[0]);
532 p += sizeofW(StgThunk) + 2;
533 break;
534
535 case FUN_1_1:
536 scavenge_fun_srt(info);
537 case CONSTR_1_1:
538 evacuate(&((StgClosure *)p)->payload[0]);
539 p += sizeofW(StgHeader) + 2;
540 break;
541
542 case FUN:
543 scavenge_fun_srt(info);
544 goto gen_obj;
545
546 case THUNK:
547 {
548 StgPtr end;
549
550 scavenge_thunk_srt(info);
551 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
552 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
553 evacuate((StgClosure **)p);
554 }
555 p += info->layout.payload.nptrs;
556 break;
557 }
558
559 gen_obj:
560 case CONSTR:
561 case WEAK:
562 case PRIM:
563 {
564 StgPtr end;
565
566 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
567 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
568 evacuate((StgClosure **)p);
569 }
570 p += info->layout.payload.nptrs;
571 break;
572 }
573
574 case BCO: {
575 StgBCO *bco = (StgBCO *)p;
576 evacuate((StgClosure **)&bco->instrs);
577 evacuate((StgClosure **)&bco->literals);
578 evacuate((StgClosure **)&bco->ptrs);
579 p += bco_sizeW(bco);
580 break;
581 }
582
583 case BLACKHOLE:
584 evacuate(&((StgInd *)p)->indirectee);
585 p += sizeofW(StgInd);
586 break;
587
588 case MUT_VAR_CLEAN:
589 case MUT_VAR_DIRTY:
590 gct->eager_promotion = rtsFalse;
591 evacuate(&((StgMutVar *)p)->var);
592 gct->eager_promotion = saved_eager_promotion;
593
594 if (gct->failed_to_evac) {
595 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
596 } else {
597 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
598 }
599 p += sizeofW(StgMutVar);
600 break;
601
602 case BLOCKING_QUEUE:
603 {
604 StgBlockingQueue *bq = (StgBlockingQueue *)p;
605
606 gct->eager_promotion = rtsFalse;
607 evacuate(&bq->bh);
608 evacuate((StgClosure**)&bq->owner);
609 evacuate((StgClosure**)&bq->queue);
610 evacuate((StgClosure**)&bq->link);
611 gct->eager_promotion = saved_eager_promotion;
612
613 if (gct->failed_to_evac) {
614 bq->header.info = &stg_BLOCKING_QUEUE_DIRTY_info;
615 } else {
616 bq->header.info = &stg_BLOCKING_QUEUE_CLEAN_info;
617 }
618 p += sizeofW(StgBlockingQueue);
619 break;
620 }
621
622 case THUNK_SELECTOR:
623 {
624 StgSelector *s = (StgSelector *)p;
625 evacuate(&s->selectee);
626 p += THUNK_SELECTOR_sizeW();
627 break;
628 }
629
630 // A chunk of stack saved in a heap object
631 case AP_STACK:
632 {
633 StgAP_STACK *ap = (StgAP_STACK *)p;
634
635 evacuate(&ap->fun);
636 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
637 p = (StgPtr)ap->payload + ap->size;
638 break;
639 }
640
641 case PAP:
642 p = scavenge_PAP((StgPAP *)p);
643 break;
644
645 case AP:
646 p = scavenge_AP((StgAP *)p);
647 break;
648
649 case ARR_WORDS:
650 // nothing to follow
651 p += arr_words_sizeW((StgArrBytes *)p);
652 break;
653
654 case MUT_ARR_PTRS_CLEAN:
655 case MUT_ARR_PTRS_DIRTY:
656 {
657 // We don't eagerly promote objects pointed to by a mutable
658 // array, but if we find the array only points to objects in
659 // the same or an older generation, we mark it "clean" and
660 // avoid traversing it during minor GCs.
661 gct->eager_promotion = rtsFalse;
662
663 p = scavenge_mut_arr_ptrs((StgMutArrPtrs*)p);
664
665 if (gct->failed_to_evac) {
666 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
667 } else {
668 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
669 }
670
671 gct->eager_promotion = saved_eager_promotion;
672 gct->failed_to_evac = rtsTrue; // always put it on the mutable list.
673 break;
674 }
675
676 case MUT_ARR_PTRS_FROZEN:
677 case MUT_ARR_PTRS_FROZEN0:
678 // follow everything
679 {
680 p = scavenge_mut_arr_ptrs((StgMutArrPtrs*)p);
681
682 // If we're going to put this object on the mutable list, then
683 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
684 if (gct->failed_to_evac) {
685 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
686 } else {
687 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
688 }
689 break;
690 }
691
692 case SMALL_MUT_ARR_PTRS_CLEAN:
693 case SMALL_MUT_ARR_PTRS_DIRTY:
694 // follow everything
695 {
696 StgPtr next;
697
698 // We don't eagerly promote objects pointed to by a mutable
699 // array, but if we find the array only points to objects in
700 // the same or an older generation, we mark it "clean" and
701 // avoid traversing it during minor GCs.
702 gct->eager_promotion = rtsFalse;
703 next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
704 for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
705 evacuate((StgClosure **)p);
706 }
707 gct->eager_promotion = saved_eager_promotion;
708
709 if (gct->failed_to_evac) {
710 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_DIRTY_info;
711 } else {
712 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_CLEAN_info;
713 }
714
715 gct->failed_to_evac = rtsTrue; // always put it on the mutable list.
716 break;
717 }
718
719 case SMALL_MUT_ARR_PTRS_FROZEN:
720 case SMALL_MUT_ARR_PTRS_FROZEN0:
721 // follow everything
722 {
723 StgPtr next;
724
725 next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
726 for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
727 evacuate((StgClosure **)p);
728 }
729
730 // If we're going to put this object on the mutable list, then
731 // set its info ptr to SMALL_MUT_ARR_PTRS_FROZEN0 to indicate that.
732 if (gct->failed_to_evac) {
733 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN0_info;
734 } else {
735 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN_info;
736 }
737 break;
738 }
739
740 case TSO:
741 {
742 scavengeTSO((StgTSO *)p);
743 p += sizeofW(StgTSO);
744 break;
745 }
746
747 case STACK:
748 {
749 StgStack *stack = (StgStack*)p;
750
751 gct->eager_promotion = rtsFalse;
752
753 scavenge_stack(stack->sp, stack->stack + stack->stack_size);
754 stack->dirty = gct->failed_to_evac;
755 p += stack_sizeW(stack);
756
757 gct->eager_promotion = saved_eager_promotion;
758 break;
759 }
760
761 case MUT_PRIM:
762 {
763 StgPtr end;
764
765 gct->eager_promotion = rtsFalse;
766
767 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
768 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
769 evacuate((StgClosure **)p);
770 }
771 p += info->layout.payload.nptrs;
772
773 gct->eager_promotion = saved_eager_promotion;
774 gct->failed_to_evac = rtsTrue; // mutable
775 break;
776 }
777
778 case TREC_CHUNK:
779 {
780 StgWord i;
781 StgTRecChunk *tc = ((StgTRecChunk *) p);
782 TRecEntry *e = &(tc -> entries[0]);
783 gct->eager_promotion = rtsFalse;
784 evacuate((StgClosure **)&tc->prev_chunk);
785 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
786 evacuate((StgClosure **)&e->tvar);
787 evacuate((StgClosure **)&e->expected_value);
788 evacuate((StgClosure **)&e->new_value);
789 }
790 gct->eager_promotion = saved_eager_promotion;
791 gct->failed_to_evac = rtsTrue; // mutable
792 p += sizeofW(StgTRecChunk);
793 break;
794 }
795
796 default:
797 barf("scavenge: unimplemented/strange closure type %d @ %p",
798 info->type, p);
799 }
800
801 /*
802 * We need to record the current object on the mutable list if
803 * (a) It is actually mutable, or
804 * (b) It contains pointers to a younger generation.
805 * Case (b) arises if we didn't manage to promote everything that
806 * the current object points to into the current generation.
807 */
808 if (gct->failed_to_evac) {
809 gct->failed_to_evac = rtsFalse;
810 if (bd->gen_no > 0) {
811 recordMutableGen_GC((StgClosure *)q, bd->gen_no);
812 }
813 }
814 }
815
816 if (p > bd->free) {
817 gct->copied += ws->todo_free - bd->free;
818 bd->free = p;
819 }
820
821 debugTrace(DEBUG_gc, " scavenged %ld bytes",
822 (unsigned long)((bd->free - bd->u.scan) * sizeof(W_)));
823
824 // update stats: this is a block that has been scavenged
825 gct->scanned += bd->free - bd->u.scan;
826 bd->u.scan = bd->free;
827
828 if (bd != ws->todo_bd) {
829 // we're not going to evac any more objects into
830 // this block, so push it now.
831 push_scanned_block(bd, ws);
832 }
833
834 gct->scan_bd = NULL;
835 }
836 /* -----------------------------------------------------------------------------
837 Scavenge everything on the mark stack.
838
839 This is slightly different from scavenge():
840 - we don't walk linearly through the objects, so the scavenger
841 doesn't need to advance the pointer on to the next object.
842 -------------------------------------------------------------------------- */
843
844 static void
845 scavenge_mark_stack(void)
846 {
847 StgPtr p, q;
848 StgInfoTable *info;
849 rtsBool saved_eager_promotion;
850
851 gct->evac_gen_no = oldest_gen->no;
852 saved_eager_promotion = gct->eager_promotion;
853
854 while ((p = pop_mark_stack())) {
855
856 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
857 info = get_itbl((StgClosure *)p);
858
859 q = p;
860 switch (info->type) {
861
862 case MVAR_CLEAN:
863 case MVAR_DIRTY:
864 {
865 StgMVar *mvar = ((StgMVar *)p);
866 gct->eager_promotion = rtsFalse;
867 evacuate((StgClosure **)&mvar->head);
868 evacuate((StgClosure **)&mvar->tail);
869 evacuate((StgClosure **)&mvar->value);
870 gct->eager_promotion = saved_eager_promotion;
871
872 if (gct->failed_to_evac) {
873 mvar->header.info = &stg_MVAR_DIRTY_info;
874 } else {
875 mvar->header.info = &stg_MVAR_CLEAN_info;
876 }
877 break;
878 }
879
880 case TVAR:
881 {
882 StgTVar *tvar = ((StgTVar *)p);
883 gct->eager_promotion = rtsFalse;
884 evacuate((StgClosure **)&tvar->current_value);
885 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
886 gct->eager_promotion = saved_eager_promotion;
887
888 if (gct->failed_to_evac) {
889 tvar->header.info = &stg_TVAR_DIRTY_info;
890 } else {
891 tvar->header.info = &stg_TVAR_CLEAN_info;
892 }
893 break;
894 }
895
896 case FUN_2_0:
897 scavenge_fun_srt(info);
898 evacuate(&((StgClosure *)p)->payload[1]);
899 evacuate(&((StgClosure *)p)->payload[0]);
900 break;
901
902 case THUNK_2_0:
903 scavenge_thunk_srt(info);
904 evacuate(&((StgThunk *)p)->payload[1]);
905 evacuate(&((StgThunk *)p)->payload[0]);
906 break;
907
908 case CONSTR_2_0:
909 evacuate(&((StgClosure *)p)->payload[1]);
910 evacuate(&((StgClosure *)p)->payload[0]);
911 break;
912
913 case FUN_1_0:
914 case FUN_1_1:
915 scavenge_fun_srt(info);
916 evacuate(&((StgClosure *)p)->payload[0]);
917 break;
918
919 case THUNK_1_0:
920 case THUNK_1_1:
921 scavenge_thunk_srt(info);
922 evacuate(&((StgThunk *)p)->payload[0]);
923 break;
924
925 case CONSTR_1_0:
926 case CONSTR_1_1:
927 evacuate(&((StgClosure *)p)->payload[0]);
928 break;
929
930 case FUN_0_1:
931 case FUN_0_2:
932 scavenge_fun_srt(info);
933 break;
934
935 case THUNK_0_1:
936 case THUNK_0_2:
937 scavenge_thunk_srt(info);
938 break;
939
940 case CONSTR_0_1:
941 case CONSTR_0_2:
942 break;
943
944 case FUN:
945 scavenge_fun_srt(info);
946 goto gen_obj;
947
948 case THUNK:
949 {
950 StgPtr end;
951
952 scavenge_thunk_srt(info);
953 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
954 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
955 evacuate((StgClosure **)p);
956 }
957 break;
958 }
959
960 gen_obj:
961 case CONSTR:
962 case WEAK:
963 case PRIM:
964 {
965 StgPtr end;
966
967 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
968 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
969 evacuate((StgClosure **)p);
970 }
971 break;
972 }
973
974 case BCO: {
975 StgBCO *bco = (StgBCO *)p;
976 evacuate((StgClosure **)&bco->instrs);
977 evacuate((StgClosure **)&bco->literals);
978 evacuate((StgClosure **)&bco->ptrs);
979 break;
980 }
981
982 case IND:
983 case BLACKHOLE:
984 evacuate(&((StgInd *)p)->indirectee);
985 break;
986
987 case MUT_VAR_CLEAN:
988 case MUT_VAR_DIRTY: {
989 gct->eager_promotion = rtsFalse;
990 evacuate(&((StgMutVar *)p)->var);
991 gct->eager_promotion = saved_eager_promotion;
992
993 if (gct->failed_to_evac) {
994 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
995 } else {
996 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
997 }
998 break;
999 }
1000
1001 case BLOCKING_QUEUE:
1002 {
1003 StgBlockingQueue *bq = (StgBlockingQueue *)p;
1004
1005 gct->eager_promotion = rtsFalse;
1006 evacuate(&bq->bh);
1007 evacuate((StgClosure**)&bq->owner);
1008 evacuate((StgClosure**)&bq->queue);
1009 evacuate((StgClosure**)&bq->link);
1010 gct->eager_promotion = saved_eager_promotion;
1011
1012 if (gct->failed_to_evac) {
1013 bq->header.info = &stg_BLOCKING_QUEUE_DIRTY_info;
1014 } else {
1015 bq->header.info = &stg_BLOCKING_QUEUE_CLEAN_info;
1016 }
1017 break;
1018 }
1019
1020 case ARR_WORDS:
1021 break;
1022
1023 case THUNK_SELECTOR:
1024 {
1025 StgSelector *s = (StgSelector *)p;
1026 evacuate(&s->selectee);
1027 break;
1028 }
1029
1030 // A chunk of stack saved in a heap object
1031 case AP_STACK:
1032 {
1033 StgAP_STACK *ap = (StgAP_STACK *)p;
1034
1035 evacuate(&ap->fun);
1036 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
1037 break;
1038 }
1039
1040 case PAP:
1041 scavenge_PAP((StgPAP *)p);
1042 break;
1043
1044 case AP:
1045 scavenge_AP((StgAP *)p);
1046 break;
1047
1048 case MUT_ARR_PTRS_CLEAN:
1049 case MUT_ARR_PTRS_DIRTY:
1050 // follow everything
1051 {
1052 // We don't eagerly promote objects pointed to by a mutable
1053 // array, but if we find the array only points to objects in
1054 // the same or an older generation, we mark it "clean" and
1055 // avoid traversing it during minor GCs.
1056 gct->eager_promotion = rtsFalse;
1057
1058 scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
1059
1060 if (gct->failed_to_evac) {
1061 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
1062 } else {
1063 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
1064 }
1065
1066 gct->eager_promotion = saved_eager_promotion;
1067 gct->failed_to_evac = rtsTrue; // mutable anyhow.
1068 break;
1069 }
1070
1071 case MUT_ARR_PTRS_FROZEN:
1072 case MUT_ARR_PTRS_FROZEN0:
1073 // follow everything
1074 {
1075 StgPtr q = p;
1076
1077 scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
1078
1079 // If we're going to put this object on the mutable list, then
1080 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
1081 if (gct->failed_to_evac) {
1082 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
1083 } else {
1084 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
1085 }
1086 break;
1087 }
1088
1089 case SMALL_MUT_ARR_PTRS_CLEAN:
1090 case SMALL_MUT_ARR_PTRS_DIRTY:
1091 // follow everything
1092 {
1093 StgPtr next;
1094 rtsBool saved_eager;
1095
1096 // We don't eagerly promote objects pointed to by a mutable
1097 // array, but if we find the array only points to objects in
1098 // the same or an older generation, we mark it "clean" and
1099 // avoid traversing it during minor GCs.
1100 saved_eager = gct->eager_promotion;
1101 gct->eager_promotion = rtsFalse;
1102 next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
1103 for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
1104 evacuate((StgClosure **)p);
1105 }
1106 gct->eager_promotion = saved_eager;
1107
1108 if (gct->failed_to_evac) {
1109 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_DIRTY_info;
1110 } else {
1111 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_CLEAN_info;
1112 }
1113
1114 gct->failed_to_evac = rtsTrue; // mutable anyhow.
1115 break;
1116 }
1117
1118 case SMALL_MUT_ARR_PTRS_FROZEN:
1119 case SMALL_MUT_ARR_PTRS_FROZEN0:
1120 // follow everything
1121 {
1122 StgPtr next, q = p;
1123
1124 next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
1125 for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
1126 evacuate((StgClosure **)p);
1127 }
1128
1129 // If we're going to put this object on the mutable list, then
1130 // set its info ptr to SMALL_MUT_ARR_PTRS_FROZEN0 to indicate that.
1131 if (gct->failed_to_evac) {
1132 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN0_info;
1133 } else {
1134 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN_info;
1135 }
1136 break;
1137 }
1138
1139 case TSO:
1140 {
1141 scavengeTSO((StgTSO*)p);
1142 break;
1143 }
1144
1145 case STACK:
1146 {
1147 StgStack *stack = (StgStack*)p;
1148
1149 gct->eager_promotion = rtsFalse;
1150
1151 scavenge_stack(stack->sp, stack->stack + stack->stack_size);
1152 stack->dirty = gct->failed_to_evac;
1153
1154 gct->eager_promotion = saved_eager_promotion;
1155 break;
1156 }
1157
1158 case MUT_PRIM:
1159 {
1160 StgPtr end;
1161
1162 gct->eager_promotion = rtsFalse;
1163
1164 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1165 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
1166 evacuate((StgClosure **)p);
1167 }
1168
1169 gct->eager_promotion = saved_eager_promotion;
1170 gct->failed_to_evac = rtsTrue; // mutable
1171 break;
1172 }
1173
1174 case TREC_CHUNK:
1175 {
1176 StgWord i;
1177 StgTRecChunk *tc = ((StgTRecChunk *) p);
1178 TRecEntry *e = &(tc -> entries[0]);
1179 gct->eager_promotion = rtsFalse;
1180 evacuate((StgClosure **)&tc->prev_chunk);
1181 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
1182 evacuate((StgClosure **)&e->tvar);
1183 evacuate((StgClosure **)&e->expected_value);
1184 evacuate((StgClosure **)&e->new_value);
1185 }
1186 gct->eager_promotion = saved_eager_promotion;
1187 gct->failed_to_evac = rtsTrue; // mutable
1188 break;
1189 }
1190
1191 default:
1192 barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p",
1193 info->type, p);
1194 }
1195
1196 if (gct->failed_to_evac) {
1197 gct->failed_to_evac = rtsFalse;
1198 if (gct->evac_gen_no) {
1199 recordMutableGen_GC((StgClosure *)q, gct->evac_gen_no);
1200 }
1201 }
1202 } // while (p = pop_mark_stack())
1203 }
1204
1205 /* -----------------------------------------------------------------------------
1206 Scavenge one object.
1207
1208 This is used for objects that are temporarily marked as mutable
1209 because they contain old-to-new generation pointers. Only certain
1210 objects can have this property.
1211 -------------------------------------------------------------------------- */
1212
1213 static rtsBool
1214 scavenge_one(StgPtr p)
1215 {
1216 const StgInfoTable *info;
1217 rtsBool no_luck;
1218 rtsBool saved_eager_promotion;
1219
1220 saved_eager_promotion = gct->eager_promotion;
1221
1222 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1223 info = get_itbl((StgClosure *)p);
1224
1225 switch (info->type) {
1226
1227 case MVAR_CLEAN:
1228 case MVAR_DIRTY:
1229 {
1230 StgMVar *mvar = ((StgMVar *)p);
1231 gct->eager_promotion = rtsFalse;
1232 evacuate((StgClosure **)&mvar->head);
1233 evacuate((StgClosure **)&mvar->tail);
1234 evacuate((StgClosure **)&mvar->value);
1235 gct->eager_promotion = saved_eager_promotion;
1236
1237 if (gct->failed_to_evac) {
1238 mvar->header.info = &stg_MVAR_DIRTY_info;
1239 } else {
1240 mvar->header.info = &stg_MVAR_CLEAN_info;
1241 }
1242 break;
1243 }
1244
1245 case TVAR:
1246 {
1247 StgTVar *tvar = ((StgTVar *)p);
1248 gct->eager_promotion = rtsFalse;
1249 evacuate((StgClosure **)&tvar->current_value);
1250 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
1251 gct->eager_promotion = saved_eager_promotion;
1252
1253 if (gct->failed_to_evac) {
1254 tvar->header.info = &stg_TVAR_DIRTY_info;
1255 } else {
1256 tvar->header.info = &stg_TVAR_CLEAN_info;
1257 }
1258 break;
1259 }
1260
1261 case THUNK:
1262 case THUNK_1_0:
1263 case THUNK_0_1:
1264 case THUNK_1_1:
1265 case THUNK_0_2:
1266 case THUNK_2_0:
1267 {
1268 StgPtr q, end;
1269
1270 end = (StgPtr)((StgThunk *)p)->payload + info->layout.payload.ptrs;
1271 for (q = (StgPtr)((StgThunk *)p)->payload; q < end; q++) {
1272 evacuate((StgClosure **)q);
1273 }
1274 break;
1275 }
1276
1277 case FUN:
1278 case FUN_1_0: // hardly worth specialising these guys
1279 case FUN_0_1:
1280 case FUN_1_1:
1281 case FUN_0_2:
1282 case FUN_2_0:
1283 case CONSTR:
1284 case CONSTR_1_0:
1285 case CONSTR_0_1:
1286 case CONSTR_1_1:
1287 case CONSTR_0_2:
1288 case CONSTR_2_0:
1289 case WEAK:
1290 case PRIM:
1291 {
1292 StgPtr q, end;
1293
1294 end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1295 for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) {
1296 evacuate((StgClosure **)q);
1297 }
1298 break;
1299 }
1300
1301 case MUT_VAR_CLEAN:
1302 case MUT_VAR_DIRTY: {
1303 StgPtr q = p;
1304
1305 gct->eager_promotion = rtsFalse;
1306 evacuate(&((StgMutVar *)p)->var);
1307 gct->eager_promotion = saved_eager_promotion;
1308
1309 if (gct->failed_to_evac) {
1310 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
1311 } else {
1312 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
1313 }
1314 break;
1315 }
1316
1317 case BLOCKING_QUEUE:
1318 {
1319 StgBlockingQueue *bq = (StgBlockingQueue *)p;
1320
1321 gct->eager_promotion = rtsFalse;
1322 evacuate(&bq->bh);
1323 evacuate((StgClosure**)&bq->owner);
1324 evacuate((StgClosure**)&bq->queue);
1325 evacuate((StgClosure**)&bq->link);
1326 gct->eager_promotion = saved_eager_promotion;
1327
1328 if (gct->failed_to_evac) {
1329 bq->header.info = &stg_BLOCKING_QUEUE_DIRTY_info;
1330 } else {
1331 bq->header.info = &stg_BLOCKING_QUEUE_CLEAN_info;
1332 }
1333 break;
1334 }
1335
1336 case THUNK_SELECTOR:
1337 {
1338 StgSelector *s = (StgSelector *)p;
1339 evacuate(&s->selectee);
1340 break;
1341 }
1342
1343 case AP_STACK:
1344 {
1345 StgAP_STACK *ap = (StgAP_STACK *)p;
1346
1347 evacuate(&ap->fun);
1348 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
1349 p = (StgPtr)ap->payload + ap->size;
1350 break;
1351 }
1352
1353 case PAP:
1354 p = scavenge_PAP((StgPAP *)p);
1355 break;
1356
1357 case AP:
1358 p = scavenge_AP((StgAP *)p);
1359 break;
1360
1361 case ARR_WORDS:
1362 // nothing to follow
1363 break;
1364
1365 case MUT_ARR_PTRS_CLEAN:
1366 case MUT_ARR_PTRS_DIRTY:
1367 {
1368 // We don't eagerly promote objects pointed to by a mutable
1369 // array, but if we find the array only points to objects in
1370 // the same or an older generation, we mark it "clean" and
1371 // avoid traversing it during minor GCs.
1372 gct->eager_promotion = rtsFalse;
1373
1374 scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
1375
1376 if (gct->failed_to_evac) {
1377 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
1378 } else {
1379 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
1380 }
1381
1382 gct->eager_promotion = saved_eager_promotion;
1383 gct->failed_to_evac = rtsTrue;
1384 break;
1385 }
1386
1387 case MUT_ARR_PTRS_FROZEN:
1388 case MUT_ARR_PTRS_FROZEN0:
1389 {
1390 // follow everything
1391 scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
1392
1393 // If we're going to put this object on the mutable list, then
1394 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
1395 if (gct->failed_to_evac) {
1396 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
1397 } else {
1398 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
1399 }
1400 break;
1401 }
1402
1403 case SMALL_MUT_ARR_PTRS_CLEAN:
1404 case SMALL_MUT_ARR_PTRS_DIRTY:
1405 {
1406 StgPtr next, q;
1407 rtsBool saved_eager;
1408
1409 // We don't eagerly promote objects pointed to by a mutable
1410 // array, but if we find the array only points to objects in
1411 // the same or an older generation, we mark it "clean" and
1412 // avoid traversing it during minor GCs.
1413 saved_eager = gct->eager_promotion;
1414 gct->eager_promotion = rtsFalse;
1415 q = p;
1416 next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
1417 for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
1418 evacuate((StgClosure **)p);
1419 }
1420 gct->eager_promotion = saved_eager;
1421
1422 if (gct->failed_to_evac) {
1423 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_DIRTY_info;
1424 } else {
1425 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_CLEAN_info;
1426 }
1427
1428 gct->failed_to_evac = rtsTrue;
1429 break;
1430 }
1431
1432 case SMALL_MUT_ARR_PTRS_FROZEN:
1433 case SMALL_MUT_ARR_PTRS_FROZEN0:
1434 {
1435 // follow everything
1436 StgPtr next, q=p;
1437
1438 next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
1439 for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
1440 evacuate((StgClosure **)p);
1441 }
1442
1443 // If we're going to put this object on the mutable list, then
1444 // set its info ptr to SMALL_MUT_ARR_PTRS_FROZEN0 to indicate that.
1445 if (gct->failed_to_evac) {
1446 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN0_info;
1447 } else {
1448 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN_info;
1449 }
1450 break;
1451 }
1452
1453 case TSO:
1454 {
1455 scavengeTSO((StgTSO*)p);
1456 break;
1457 }
1458
1459 case STACK:
1460 {
1461 StgStack *stack = (StgStack*)p;
1462
1463 gct->eager_promotion = rtsFalse;
1464
1465 scavenge_stack(stack->sp, stack->stack + stack->stack_size);
1466 stack->dirty = gct->failed_to_evac;
1467
1468 gct->eager_promotion = saved_eager_promotion;
1469 break;
1470 }
1471
1472 case MUT_PRIM:
1473 {
1474 StgPtr end;
1475
1476 gct->eager_promotion = rtsFalse;
1477
1478 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1479 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
1480 evacuate((StgClosure **)p);
1481 }
1482
1483 gct->eager_promotion = saved_eager_promotion;
1484 gct->failed_to_evac = rtsTrue; // mutable
1485 break;
1486
1487 }
1488
1489 case TREC_CHUNK:
1490 {
1491 StgWord i;
1492 StgTRecChunk *tc = ((StgTRecChunk *) p);
1493 TRecEntry *e = &(tc -> entries[0]);
1494 gct->eager_promotion = rtsFalse;
1495 evacuate((StgClosure **)&tc->prev_chunk);
1496 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
1497 evacuate((StgClosure **)&e->tvar);
1498 evacuate((StgClosure **)&e->expected_value);
1499 evacuate((StgClosure **)&e->new_value);
1500 }
1501 gct->eager_promotion = saved_eager_promotion;
1502 gct->failed_to_evac = rtsTrue; // mutable
1503 break;
1504 }
1505
1506 case IND:
1507 // IND can happen, for example, when the interpreter allocates
1508 // a gigantic AP closure (more than one block), which ends up
1509 // on the large-object list and then gets updated. See #3424.
1510 case BLACKHOLE:
1511 case IND_STATIC:
1512 evacuate(&((StgInd *)p)->indirectee);
1513
1514 #if 0 && defined(DEBUG)
1515 if (RtsFlags.DebugFlags.gc)
1516 /* Debugging code to print out the size of the thing we just
1517 * promoted
1518 */
1519 {
1520 StgPtr start = gen->scan;
1521 bdescr *start_bd = gen->scan_bd;
1522 StgWord size = 0;
1523 scavenge(&gen);
1524 if (start_bd != gen->scan_bd) {
1525 size += (P_)BLOCK_ROUND_UP(start) - start;
1526 start_bd = start_bd->link;
1527 while (start_bd != gen->scan_bd) {
1528 size += BLOCK_SIZE_W;
1529 start_bd = start_bd->link;
1530 }
1531 size += gen->scan -
1532 (P_)BLOCK_ROUND_DOWN(gen->scan);
1533 } else {
1534 size = gen->scan - start;
1535 }
1536 debugBelch("evac IND_OLDGEN: %ld bytes", size * sizeof(W_));
1537 }
1538 #endif
1539 break;
1540
1541 default:
1542 barf("scavenge_one: strange object %d", (int)(info->type));
1543 }
1544
1545 no_luck = gct->failed_to_evac;
1546 gct->failed_to_evac = rtsFalse;
1547 return (no_luck);
1548 }
1549
1550 /* -----------------------------------------------------------------------------
1551 Scavenging mutable lists.
1552
1553 We treat the mutable list of each generation > N (i.e. all the
1554 generations older than the one being collected) as roots. We also
1555 remove non-mutable objects from the mutable list at this point.
1556 -------------------------------------------------------------------------- */
1557
1558 void
1559 scavenge_mutable_list(bdescr *bd, generation *gen)
1560 {
1561 StgPtr p, q;
1562 nat gen_no;
1563
1564 gen_no = gen->no;
1565 gct->evac_gen_no = gen_no;
1566 for (; bd != NULL; bd = bd->link) {
1567 for (q = bd->start; q < bd->free; q++) {
1568 p = (StgPtr)*q;
1569 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1570
1571 #ifdef DEBUG
1572 switch (get_itbl((StgClosure *)p)->type) {
1573 case MUT_VAR_CLEAN:
1574 // can happen due to concurrent writeMutVars
1575 case MUT_VAR_DIRTY:
1576 mutlist_MUTVARS++; break;
1577 case MUT_ARR_PTRS_CLEAN:
1578 case MUT_ARR_PTRS_DIRTY:
1579 case MUT_ARR_PTRS_FROZEN:
1580 case MUT_ARR_PTRS_FROZEN0:
1581 mutlist_MUTARRS++; break;
1582 case MVAR_CLEAN:
1583 barf("MVAR_CLEAN on mutable list");
1584 case MVAR_DIRTY:
1585 mutlist_MVARS++; break;
1586 case TVAR:
1587 mutlist_TVAR++; break;
1588 case TREC_CHUNK:
1589 mutlist_TREC_CHUNK++; break;
1590 case MUT_PRIM:
1591 if (((StgClosure*)p)->header.info == &stg_TVAR_WATCH_QUEUE_info)
1592 mutlist_TVAR_WATCH_QUEUE++;
1593 else if (((StgClosure*)p)->header.info == &stg_TREC_HEADER_info)
1594 mutlist_TREC_HEADER++;
1595 else if (((StgClosure*)p)->header.info == &stg_ATOMIC_INVARIANT_info)
1596 mutlist_ATOMIC_INVARIANT++;
1597 else if (((StgClosure*)p)->header.info == &stg_INVARIANT_CHECK_QUEUE_info)
1598 mutlist_INVARIANT_CHECK_QUEUE++;
1599 else
1600 mutlist_OTHERS++;
1601 break;
1602 default:
1603 mutlist_OTHERS++; break;
1604 }
1605 #endif
1606
1607 // Check whether this object is "clean", that is it
1608 // definitely doesn't point into a young generation.
1609 // Clean objects don't need to be scavenged. Some clean
1610 // objects (MUT_VAR_CLEAN) are not kept on the mutable
1611 // list at all; others, such as MUT_ARR_PTRS
1612 // are always on the mutable list.
1613 //
1614 switch (get_itbl((StgClosure *)p)->type) {
1615 case MUT_ARR_PTRS_CLEAN:
1616 recordMutableGen_GC((StgClosure *)p,gen_no);
1617 continue;
1618 case MUT_ARR_PTRS_DIRTY:
1619 {
1620 rtsBool saved_eager_promotion;
1621 saved_eager_promotion = gct->eager_promotion;
1622 gct->eager_promotion = rtsFalse;
1623
1624 scavenge_mut_arr_ptrs_marked((StgMutArrPtrs *)p);
1625
1626 if (gct->failed_to_evac) {
1627 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
1628 } else {
1629 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
1630 }
1631
1632 gct->eager_promotion = saved_eager_promotion;
1633 gct->failed_to_evac = rtsFalse;
1634 recordMutableGen_GC((StgClosure *)p,gen_no);
1635 continue;
1636 }
1637 default:
1638 ;
1639 }
1640
1641 if (scavenge_one(p)) {
1642 // didn't manage to promote everything, so put the
1643 // object back on the list.
1644 recordMutableGen_GC((StgClosure *)p,gen_no);
1645 }
1646 }
1647 }
1648 }
1649
1650 void
1651 scavenge_capability_mut_lists (Capability *cap)
1652 {
1653 nat g;
1654
1655 /* Mutable lists from each generation > N
1656 * we want to *scavenge* these roots, not evacuate them: they're not
1657 * going to move in this GC.
1658 * Also do them in reverse generation order, for the usual reason:
1659 * namely to reduce the likelihood of spurious old->new pointers.
1660 */
1661 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
1662 scavenge_mutable_list(cap->saved_mut_lists[g], &generations[g]);
1663 freeChain_sync(cap->saved_mut_lists[g]);
1664 cap->saved_mut_lists[g] = NULL;
1665 }
1666 }
1667
1668 /* -----------------------------------------------------------------------------
1669 Scavenging the static objects.
1670
1671 We treat the mutable list of each generation > N (i.e. all the
1672 generations older than the one being collected) as roots. We also
1673 remove non-mutable objects from the mutable list at this point.
1674 -------------------------------------------------------------------------- */
1675
1676 static void
1677 scavenge_static(void)
1678 {
1679 StgClosure *flagged_p, *p;
1680 const StgInfoTable *info;
1681
1682 debugTrace(DEBUG_gc, "scavenging static objects");
1683
1684 /* Always evacuate straight to the oldest generation for static
1685 * objects */
1686 gct->evac_gen_no = oldest_gen->no;
1687
1688 /* keep going until we've scavenged all the objects on the linked
1689 list... */
1690
1691 while (1) {
1692
1693 /* get the next static object from the list. Remember, there might
1694 * be more stuff on this list after each evacuation...
1695 * (static_objects is a global)
1696 */
1697 flagged_p = gct->static_objects;
1698 if (flagged_p == END_OF_STATIC_OBJECT_LIST) {
1699 break;
1700 }
1701 p = UNTAG_STATIC_LIST_PTR(flagged_p);
1702
1703 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1704 info = get_itbl(p);
1705 /*
1706 if (info->type==RBH)
1707 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
1708 */
1709 // make sure the info pointer is into text space
1710
1711 /* Take this object *off* the static_objects list,
1712 * and put it on the scavenged_static_objects list.
1713 */
1714 gct->static_objects = *STATIC_LINK(info,p);
1715 *STATIC_LINK(info,p) = gct->scavenged_static_objects;
1716 gct->scavenged_static_objects = flagged_p;
1717
1718 switch (info -> type) {
1719
1720 case IND_STATIC:
1721 {
1722 StgInd *ind = (StgInd *)p;
1723 evacuate(&ind->indirectee);
1724
1725 /* might fail to evacuate it, in which case we have to pop it
1726 * back on the mutable list of the oldest generation. We
1727 * leave it *on* the scavenged_static_objects list, though,
1728 * in case we visit this object again.
1729 */
1730 if (gct->failed_to_evac) {
1731 gct->failed_to_evac = rtsFalse;
1732 recordMutableGen_GC((StgClosure *)p,oldest_gen->no);
1733 }
1734 break;
1735 }
1736
1737 case THUNK_STATIC:
1738 scavenge_thunk_srt(info);
1739 break;
1740
1741 case FUN_STATIC:
1742 scavenge_fun_srt(info);
1743 break;
1744
1745 case CONSTR_STATIC:
1746 {
1747 StgPtr q, next;
1748
1749 next = (P_)p->payload + info->layout.payload.ptrs;
1750 // evacuate the pointers
1751 for (q = (P_)p->payload; q < next; q++) {
1752 evacuate((StgClosure **)q);
1753 }
1754 break;
1755 }
1756
1757 default:
1758 barf("scavenge_static: strange closure %d", (int)(info->type));
1759 }
1760
1761 ASSERT(gct->failed_to_evac == rtsFalse);
1762 }
1763 }
1764
1765 /* -----------------------------------------------------------------------------
1766 scavenge a chunk of memory described by a bitmap
1767 -------------------------------------------------------------------------- */
1768
1769 static void
1770 scavenge_large_bitmap( StgPtr p, StgLargeBitmap *large_bitmap, StgWord size )
1771 {
1772 nat i, j, b;
1773 StgWord bitmap;
1774
1775 b = 0;
1776
1777 for (i = 0; i < size; b++) {
1778 bitmap = large_bitmap->bitmap[b];
1779 j = stg_min(size-i, BITS_IN(W_));
1780 i += j;
1781 for (; j > 0; j--, p++) {
1782 if ((bitmap & 1) == 0) {
1783 evacuate((StgClosure **)p);
1784 }
1785 bitmap = bitmap >> 1;
1786 }
1787 }
1788 }
1789
1790
1791 /* -----------------------------------------------------------------------------
1792 scavenge_stack walks over a section of stack and evacuates all the
1793 objects pointed to by it. We can use the same code for walking
1794 AP_STACK_UPDs, since these are just sections of copied stack.
1795 -------------------------------------------------------------------------- */
1796
1797 static void
1798 scavenge_stack(StgPtr p, StgPtr stack_end)
1799 {
1800 const StgRetInfoTable* info;
1801 StgWord bitmap;
1802 StgWord size;
1803
1804 /*
1805 * Each time around this loop, we are looking at a chunk of stack
1806 * that starts with an activation record.
1807 */
1808
1809 while (p < stack_end) {
1810 info = get_ret_itbl((StgClosure *)p);
1811
1812 switch (info->i.type) {
1813
1814 case UPDATE_FRAME:
1815 // Note [upd-black-hole]
1816 // In SMP, we can get update frames that point to indirections
1817 // when two threads evaluate the same thunk. We do attempt to
1818 // discover this situation in threadPaused(), but it's
1819 // possible that the following sequence occurs:
1820 //
1821 // A B
1822 // enter T
1823 // enter T
1824 // blackhole T
1825 // update T
1826 // GC
1827 //
1828 // Now T is an indirection, and the update frame is already
1829 // marked on A's stack, so we won't traverse it again in
1830 // threadPaused(). We could traverse the whole stack again
1831 // before GC, but that seems like overkill.
1832 //
1833 // Scavenging this update frame as normal would be disastrous;
1834 // the updatee would end up pointing to the value. So we
1835 // check whether the value after evacuation is a BLACKHOLE,
1836 // and if not, we change the update frame to an stg_enter
1837 // frame that simply returns the value. Hence, blackholing is
1838 // compulsory (otherwise we would have to check for thunks
1839 // too).
1840 //
1841 // One slight hiccup is that the THUNK_SELECTOR machinery can
1842 // overwrite the updatee with an IND. In parallel GC, this
1843 // could even be happening concurrently, so we can't check for
1844 // the IND. Fortunately if we assume that blackholing is
1845 // happening (either lazy or eager), then we can be sure that
1846 // the updatee is never a THUNK_SELECTOR and we're ok.
1847 // NB. this is a new invariant: blackholing is not optional.
1848 {
1849 StgUpdateFrame *frame = (StgUpdateFrame *)p;
1850 StgClosure *v;
1851
1852 evacuate(&frame->updatee);
1853 v = frame->updatee;
1854 if (GET_CLOSURE_TAG(v) != 0 ||
1855 (get_itbl(v)->type != BLACKHOLE)) {
1856 // blackholing is compulsory, see above.
1857 frame->header.info = (const StgInfoTable*)&stg_enter_checkbh_info;
1858 }
1859 ASSERT(v->header.info != &stg_TSO_info);
1860 p += sizeofW(StgUpdateFrame);
1861 continue;
1862 }
1863
1864 // small bitmap (< 32 entries, or 64 on a 64-bit machine)
1865 case CATCH_STM_FRAME:
1866 case CATCH_RETRY_FRAME:
1867 case ATOMICALLY_FRAME:
1868 case UNDERFLOW_FRAME:
1869 case STOP_FRAME:
1870 case CATCH_FRAME:
1871 case RET_SMALL:
1872 bitmap = BITMAP_BITS(info->i.layout.bitmap);
1873 size = BITMAP_SIZE(info->i.layout.bitmap);
1874 // NOTE: the payload starts immediately after the info-ptr, we
1875 // don't have an StgHeader in the same sense as a heap closure.
1876 p++;
1877 p = scavenge_small_bitmap(p, size, bitmap);
1878
1879 follow_srt:
1880 if (major_gc)
1881 scavenge_srt((StgClosure **)GET_SRT(info), info->i.srt_bitmap);
1882 continue;
1883
1884 case RET_BCO: {
1885 StgBCO *bco;
1886 StgWord size;
1887
1888 p++;
1889 evacuate((StgClosure **)p);
1890 bco = (StgBCO *)*p;
1891 p++;
1892 size = BCO_BITMAP_SIZE(bco);
1893 scavenge_large_bitmap(p, BCO_BITMAP(bco), size);
1894 p += size;
1895 continue;
1896 }
1897
1898 // large bitmap (> 32 entries, or > 64 on a 64-bit machine)
1899 case RET_BIG:
1900 {
1901 StgWord size;
1902
1903 size = GET_LARGE_BITMAP(&info->i)->size;
1904 p++;
1905 scavenge_large_bitmap(p, GET_LARGE_BITMAP(&info->i), size);
1906 p += size;
1907 // and don't forget to follow the SRT
1908 goto follow_srt;
1909 }
1910
1911 case RET_FUN:
1912 {
1913 StgRetFun *ret_fun = (StgRetFun *)p;
1914 StgFunInfoTable *fun_info;
1915
1916 evacuate(&ret_fun->fun);
1917 fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun));
1918 p = scavenge_arg_block(fun_info, ret_fun->payload);
1919 goto follow_srt;
1920 }
1921
1922 default:
1923 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->i.type));
1924 }
1925 }
1926 }
1927
1928 /*-----------------------------------------------------------------------------
1929 scavenge the large object list.
1930
1931 evac_gen set by caller; similar games played with evac_gen as with
1932 scavenge() - see comment at the top of scavenge(). Most large
1933 objects are (repeatedly) mutable, so most of the time evac_gen will
1934 be zero.
1935 --------------------------------------------------------------------------- */
1936
1937 static void
1938 scavenge_large (gen_workspace *ws)
1939 {
1940 bdescr *bd;
1941 StgPtr p;
1942
1943 gct->evac_gen_no = ws->gen->no;
1944
1945 bd = ws->todo_large_objects;
1946
1947 for (; bd != NULL; bd = ws->todo_large_objects) {
1948
1949 // take this object *off* the large objects list and put it on
1950 // the scavenged large objects list. This is so that we can
1951 // treat new_large_objects as a stack and push new objects on
1952 // the front when evacuating.
1953 ws->todo_large_objects = bd->link;
1954
1955 ACQUIRE_SPIN_LOCK(&ws->gen->sync);
1956 dbl_link_onto(bd, &ws->gen->scavenged_large_objects);
1957 ws->gen->n_scavenged_large_blocks += bd->blocks;
1958 RELEASE_SPIN_LOCK(&ws->gen->sync);
1959
1960 p = bd->start;
1961 if (scavenge_one(p)) {
1962 if (ws->gen->no > 0) {
1963 recordMutableGen_GC((StgClosure *)p, ws->gen->no);
1964 }
1965 }
1966
1967 // stats
1968 gct->scanned += closure_sizeW((StgClosure*)p);
1969 }
1970 }
1971
1972 /* ----------------------------------------------------------------------------
1973 Look for work to do.
1974
1975 We look for the oldest gen that has either a todo block that can
1976 be scanned, or a block of work on the global queue that we can
1977 scan.
1978
1979 It is important to take work from the *oldest* generation that we
1980 has work available, because that minimizes the likelihood of
1981 evacuating objects into a young generation when they should have
1982 been eagerly promoted. This really does make a difference (the
1983 cacheprof benchmark is one that is affected).
1984
1985 We also want to scan the todo block if possible before grabbing
1986 work from the global queue, the reason being that we don't want to
1987 steal work from the global queue and starve other threads if there
1988 is other work we can usefully be doing.
1989 ------------------------------------------------------------------------- */
1990
1991 static rtsBool
1992 scavenge_find_work (void)
1993 {
1994 int g;
1995 gen_workspace *ws;
1996 rtsBool did_something, did_anything;
1997 bdescr *bd;
1998
1999 gct->scav_find_work++;
2000
2001 did_anything = rtsFalse;
2002
2003 loop:
2004 did_something = rtsFalse;
2005 for (g = RtsFlags.GcFlags.generations-1; g >= 0; g--) {
2006 ws = &gct->gens[g];
2007
2008 gct->scan_bd = NULL;
2009
2010 // If we have a scan block with some work to do,
2011 // scavenge everything up to the free pointer.
2012 if (ws->todo_bd->u.scan < ws->todo_free)
2013 {
2014 scavenge_block(ws->todo_bd);
2015 did_something = rtsTrue;
2016 break;
2017 }
2018
2019 // If we have any large objects to scavenge, do them now.
2020 if (ws->todo_large_objects) {
2021 scavenge_large(ws);
2022 did_something = rtsTrue;
2023 break;
2024 }
2025
2026 if ((bd = grab_local_todo_block(ws)) != NULL) {
2027 scavenge_block(bd);
2028 did_something = rtsTrue;
2029 break;
2030 }
2031 }
2032
2033 if (did_something) {
2034 did_anything = rtsTrue;
2035 goto loop;
2036 }
2037
2038 #if defined(THREADED_RTS)
2039 if (work_stealing) {
2040 // look for work to steal
2041 for (g = RtsFlags.GcFlags.generations-1; g >= 0; g--) {
2042 if ((bd = steal_todo_block(g)) != NULL) {
2043 scavenge_block(bd);
2044 did_something = rtsTrue;
2045 break;
2046 }
2047 }
2048
2049 if (did_something) {
2050 did_anything = rtsTrue;
2051 goto loop;
2052 }
2053 }
2054 #endif
2055
2056 // only return when there is no more work to do
2057
2058 return did_anything;
2059 }
2060
2061 /* ----------------------------------------------------------------------------
2062 Scavenge until we can't find anything more to scavenge.
2063 ------------------------------------------------------------------------- */
2064
2065 void
2066 scavenge_loop(void)
2067 {
2068 rtsBool work_to_do;
2069
2070 loop:
2071 work_to_do = rtsFalse;
2072
2073 // scavenge static objects
2074 if (major_gc && gct->static_objects != END_OF_STATIC_OBJECT_LIST) {
2075 IF_DEBUG(sanity, checkStaticObjects(gct->static_objects));
2076 scavenge_static();
2077 }
2078
2079 // scavenge objects in compacted generation
2080 if (mark_stack_bd != NULL && !mark_stack_empty()) {
2081 scavenge_mark_stack();
2082 work_to_do = rtsTrue;
2083 }
2084
2085 // Order is important here: we want to deal in full blocks as
2086 // much as possible, so go for global work in preference to
2087 // local work. Only if all the global work has been exhausted
2088 // do we start scavenging the fragments of blocks in the local
2089 // workspaces.
2090 if (scavenge_find_work()) goto loop;
2091
2092 if (work_to_do) goto loop;
2093 }