4f5665bdac0dfc1e4cfaebfbbf8a6f25588d6187
[ghc.git] / rts / sm / Scav.c
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team 1998-2008
4 *
5 * Generational garbage collector: scavenging functions
6 *
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
9 *
10 * http://ghc.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/GC
11 *
12 * ---------------------------------------------------------------------------*/
13
14 #include "PosixSource.h"
15 #include "Rts.h"
16
17 #include "Storage.h"
18 #include "GC.h"
19 #include "GCThread.h"
20 #include "GCUtils.h"
21 #include "Compact.h"
22 #include "MarkStack.h"
23 #include "Evac.h"
24 #include "Scav.h"
25 #include "Apply.h"
26 #include "Trace.h"
27 #include "Sanity.h"
28 #include "Capability.h"
29 #include "LdvProfile.h"
30 #include "Hash.h"
31
32 #include "sm/MarkWeak.h"
33
34 static void scavenge_stack (StgPtr p, StgPtr stack_end);
35
36 static void scavenge_large_bitmap (StgPtr p,
37 StgLargeBitmap *large_bitmap,
38 StgWord size );
39
40 #if defined(THREADED_RTS) && !defined(PARALLEL_GC)
41 # define evacuate(a) evacuate1(a)
42 # define evacuate_BLACKHOLE(a) evacuate_BLACKHOLE1(a)
43 # define scavenge_loop(a) scavenge_loop1(a)
44 # define scavenge_block(a) scavenge_block1(a)
45 # define scavenge_mutable_list(bd,g) scavenge_mutable_list1(bd,g)
46 # define scavenge_capability_mut_lists(cap) scavenge_capability_mut_Lists1(cap)
47 #endif
48
49 /* -----------------------------------------------------------------------------
50 Scavenge a TSO.
51 -------------------------------------------------------------------------- */
52
53 static void
54 scavengeTSO (StgTSO *tso)
55 {
56 bool saved_eager;
57
58 debugTrace(DEBUG_gc,"scavenging thread %d",(int)tso->id);
59
60 // update the pointer from the InCall.
61 if (tso->bound != NULL) {
62 // NB. We can't just set tso->bound->tso = tso, because this
63 // might be an invalid copy the TSO resulting from multiple
64 // threads evacuating the TSO simultaneously (see
65 // Evac.c:copy_tag()). Calling evacuate() on this pointer
66 // will ensure that we update it to point to the correct copy.
67 evacuate((StgClosure **)&tso->bound->tso);
68 }
69
70 saved_eager = gct->eager_promotion;
71 gct->eager_promotion = false;
72
73 evacuate((StgClosure **)&tso->blocked_exceptions);
74 evacuate((StgClosure **)&tso->bq);
75
76 // scavange current transaction record
77 evacuate((StgClosure **)&tso->trec);
78
79 evacuate((StgClosure **)&tso->stackobj);
80
81 evacuate((StgClosure **)&tso->_link);
82 if ( tso->why_blocked == BlockedOnMVar
83 || tso->why_blocked == BlockedOnMVarRead
84 || tso->why_blocked == BlockedOnBlackHole
85 || tso->why_blocked == BlockedOnMsgThrowTo
86 || tso->why_blocked == NotBlocked
87 ) {
88 evacuate(&tso->block_info.closure);
89 }
90 #if defined(THREADED_RTS)
91 // in the THREADED_RTS, block_info.closure must always point to a
92 // valid closure, because we assume this in throwTo(). In the
93 // non-threaded RTS it might be a FD (for
94 // BlockedOnRead/BlockedOnWrite) or a time value (BlockedOnDelay)
95 else {
96 tso->block_info.closure = (StgClosure *)END_TSO_QUEUE;
97 }
98 #endif
99
100 tso->dirty = gct->failed_to_evac;
101
102 gct->eager_promotion = saved_eager;
103 }
104
105 /* ----------------------------------------------------------------------------
106 Scavenging compact objects
107 ------------------------------------------------------------------------- */
108
109 typedef struct {
110 // We must save gct when calling mapHashTable(), which is compiled
111 // without GCThread.h and so uses a different calling convention.
112 // See also GC.c:mark_root where we do a similar thing.
113 gc_thread *saved_gct;
114 HashTable *newHash;
115 } MapHashData;
116
117 static void
118 evacuate_hash_entry(MapHashData *dat, StgWord key, const void *value)
119 {
120 StgClosure *p = (StgClosure*)key;
121 #if defined(THREADED_RTS)
122 gc_thread *old_gct = gct;
123 #endif
124
125 SET_GCT(dat->saved_gct);
126 evacuate(&p);
127 insertHashTable(dat->newHash, (StgWord)p, value);
128 SET_GCT(old_gct);
129 }
130
131 static void
132 scavenge_compact(StgCompactNFData *str)
133 {
134 bool saved_eager;
135 saved_eager = gct->eager_promotion;
136 gct->eager_promotion = false;
137
138 if (str->hash) {
139 MapHashData dat;
140 dat.saved_gct = gct;
141 HashTable *newHash = allocHashTable();
142 dat.newHash = newHash;
143 mapHashTable(str->hash, (void*)&dat, (MapHashFn)evacuate_hash_entry);
144 freeHashTable(str->hash, NULL);
145 str->hash = newHash;
146 }
147
148 debugTrace(DEBUG_compact,
149 "compact alive @%p, gen %d, %" FMT_Word " bytes",
150 str, Bdescr((P_)str)->gen_no, str->totalW * sizeof(W_))
151
152 gct->eager_promotion = saved_eager;
153 if (gct->failed_to_evac) {
154 ((StgClosure *)str)->header.info = &stg_COMPACT_NFDATA_DIRTY_info;
155 } else {
156 ((StgClosure *)str)->header.info = &stg_COMPACT_NFDATA_CLEAN_info;
157 }
158 }
159
160 /* -----------------------------------------------------------------------------
161 Mutable arrays of pointers
162 -------------------------------------------------------------------------- */
163
164 static StgPtr scavenge_mut_arr_ptrs (StgMutArrPtrs *a)
165 {
166 W_ m;
167 bool any_failed;
168 StgPtr p, q;
169
170 any_failed = false;
171 p = (StgPtr)&a->payload[0];
172 for (m = 0; (int)m < (int)mutArrPtrsCards(a->ptrs) - 1; m++)
173 {
174 q = p + (1 << MUT_ARR_PTRS_CARD_BITS);
175 for (; p < q; p++) {
176 evacuate((StgClosure**)p);
177 }
178 if (gct->failed_to_evac) {
179 any_failed = true;
180 *mutArrPtrsCard(a,m) = 1;
181 gct->failed_to_evac = false;
182 } else {
183 *mutArrPtrsCard(a,m) = 0;
184 }
185 }
186
187 q = (StgPtr)&a->payload[a->ptrs];
188 if (p < q) {
189 for (; p < q; p++) {
190 evacuate((StgClosure**)p);
191 }
192 if (gct->failed_to_evac) {
193 any_failed = true;
194 *mutArrPtrsCard(a,m) = 1;
195 gct->failed_to_evac = false;
196 } else {
197 *mutArrPtrsCard(a,m) = 0;
198 }
199 }
200
201 gct->failed_to_evac = any_failed;
202 return (StgPtr)a + mut_arr_ptrs_sizeW(a);
203 }
204
205 // scavenge only the marked areas of a MUT_ARR_PTRS
206 static StgPtr scavenge_mut_arr_ptrs_marked (StgMutArrPtrs *a)
207 {
208 W_ m;
209 StgPtr p, q;
210 bool any_failed;
211
212 any_failed = false;
213 for (m = 0; m < mutArrPtrsCards(a->ptrs); m++)
214 {
215 if (*mutArrPtrsCard(a,m) != 0) {
216 p = (StgPtr)&a->payload[m << MUT_ARR_PTRS_CARD_BITS];
217 q = stg_min(p + (1 << MUT_ARR_PTRS_CARD_BITS),
218 (StgPtr)&a->payload[a->ptrs]);
219 for (; p < q; p++) {
220 evacuate((StgClosure**)p);
221 }
222 if (gct->failed_to_evac) {
223 any_failed = true;
224 gct->failed_to_evac = false;
225 } else {
226 *mutArrPtrsCard(a,m) = 0;
227 }
228 }
229 }
230
231 gct->failed_to_evac = any_failed;
232 return (StgPtr)a + mut_arr_ptrs_sizeW(a);
233 }
234
235 STATIC_INLINE StgPtr
236 scavenge_small_bitmap (StgPtr p, StgWord size, StgWord bitmap)
237 {
238 while (size > 0) {
239 if ((bitmap & 1) == 0) {
240 evacuate((StgClosure **)p);
241 }
242 p++;
243 bitmap = bitmap >> 1;
244 size--;
245 }
246 return p;
247 }
248
249 /* -----------------------------------------------------------------------------
250 Blocks of function args occur on the stack (at the top) and
251 in PAPs.
252 -------------------------------------------------------------------------- */
253
254 STATIC_INLINE StgPtr
255 scavenge_arg_block (const StgFunInfoTable *fun_info, StgClosure **args)
256 {
257 StgPtr p;
258 StgWord bitmap;
259 StgWord size;
260
261 p = (StgPtr)args;
262 switch (fun_info->f.fun_type) {
263 case ARG_GEN:
264 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
265 size = BITMAP_SIZE(fun_info->f.b.bitmap);
266 goto small_bitmap;
267 case ARG_GEN_BIG:
268 size = GET_FUN_LARGE_BITMAP(fun_info)->size;
269 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
270 p += size;
271 break;
272 default:
273 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
274 size = BITMAP_SIZE(stg_arg_bitmaps[fun_info->f.fun_type]);
275 small_bitmap:
276 p = scavenge_small_bitmap(p, size, bitmap);
277 break;
278 }
279 return p;
280 }
281
282 STATIC_INLINE GNUC_ATTR_HOT StgPtr
283 scavenge_PAP_payload (StgClosure *fun, StgClosure **payload, StgWord size)
284 {
285 StgPtr p;
286 StgWord bitmap;
287 const StgFunInfoTable *fun_info;
288
289 fun_info = get_fun_itbl(UNTAG_CONST_CLOSURE(fun));
290 ASSERT(fun_info->i.type != PAP);
291 p = (StgPtr)payload;
292
293 switch (fun_info->f.fun_type) {
294 case ARG_GEN:
295 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
296 goto small_bitmap;
297 case ARG_GEN_BIG:
298 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
299 p += size;
300 break;
301 case ARG_BCO:
302 scavenge_large_bitmap((StgPtr)payload, BCO_BITMAP(fun), size);
303 p += size;
304 break;
305 default:
306 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
307 small_bitmap:
308 p = scavenge_small_bitmap(p, size, bitmap);
309 break;
310 }
311 return p;
312 }
313
314 STATIC_INLINE GNUC_ATTR_HOT StgPtr
315 scavenge_PAP (StgPAP *pap)
316 {
317 evacuate(&pap->fun);
318 return scavenge_PAP_payload (pap->fun, pap->payload, pap->n_args);
319 }
320
321 STATIC_INLINE StgPtr
322 scavenge_AP (StgAP *ap)
323 {
324 evacuate(&ap->fun);
325 return scavenge_PAP_payload (ap->fun, ap->payload, ap->n_args);
326 }
327
328 /* -----------------------------------------------------------------------------
329 Scavenge SRTs
330 -------------------------------------------------------------------------- */
331
332 STATIC_INLINE GNUC_ATTR_HOT void
333 scavenge_thunk_srt(const StgInfoTable *info)
334 {
335 StgThunkInfoTable *thunk_info;
336
337 if (!major_gc) return;
338
339 thunk_info = itbl_to_thunk_itbl(info);
340 if (thunk_info->i.srt) {
341 StgClosure *srt = (StgClosure*)GET_SRT(thunk_info);
342 evacuate(&srt);
343 }
344 }
345
346 STATIC_INLINE GNUC_ATTR_HOT void
347 scavenge_fun_srt(const StgInfoTable *info)
348 {
349 StgFunInfoTable *fun_info;
350
351 if (!major_gc) return;
352
353 fun_info = itbl_to_fun_itbl(info);
354 if (fun_info->i.srt) {
355 StgClosure *srt = (StgClosure*)GET_FUN_SRT(fun_info);
356 evacuate(&srt);
357 }
358 }
359
360 /* -----------------------------------------------------------------------------
361 Scavenge a block from the given scan pointer up to bd->free.
362
363 evac_gen_no is set by the caller to be either zero (for a step in a
364 generation < N) or G where G is the generation of the step being
365 scavenged.
366
367 We sometimes temporarily change evac_gen_no back to zero if we're
368 scavenging a mutable object where eager promotion isn't such a good
369 idea.
370 -------------------------------------------------------------------------- */
371
372 static GNUC_ATTR_HOT void
373 scavenge_block (bdescr *bd)
374 {
375 StgPtr p, q;
376 const StgInfoTable *info;
377 bool saved_eager_promotion;
378 gen_workspace *ws;
379
380 debugTrace(DEBUG_gc, "scavenging block %p (gen %d) @ %p",
381 bd->start, bd->gen_no, bd->u.scan);
382
383 gct->scan_bd = bd;
384 gct->evac_gen_no = bd->gen_no;
385 saved_eager_promotion = gct->eager_promotion;
386 gct->failed_to_evac = false;
387
388 ws = &gct->gens[bd->gen->no];
389
390 p = bd->u.scan;
391
392 // we might be evacuating into the very object that we're
393 // scavenging, so we have to check the real bd->free pointer each
394 // time around the loop.
395 while (p < bd->free || (bd == ws->todo_bd && p < ws->todo_free)) {
396
397 ASSERT(bd->link == NULL);
398 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
399 info = get_itbl((StgClosure *)p);
400
401 ASSERT(gct->thunk_selector_depth == 0);
402
403 q = p;
404 switch (info->type) {
405
406 case MVAR_CLEAN:
407 case MVAR_DIRTY:
408 {
409 StgMVar *mvar = ((StgMVar *)p);
410 gct->eager_promotion = false;
411 evacuate((StgClosure **)&mvar->head);
412 evacuate((StgClosure **)&mvar->tail);
413 evacuate((StgClosure **)&mvar->value);
414 gct->eager_promotion = saved_eager_promotion;
415
416 if (gct->failed_to_evac) {
417 mvar->header.info = &stg_MVAR_DIRTY_info;
418 } else {
419 mvar->header.info = &stg_MVAR_CLEAN_info;
420 }
421 p += sizeofW(StgMVar);
422 break;
423 }
424
425 case TVAR:
426 {
427 StgTVar *tvar = ((StgTVar *)p);
428 gct->eager_promotion = false;
429 evacuate((StgClosure **)&tvar->current_value);
430 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
431 gct->eager_promotion = saved_eager_promotion;
432
433 if (gct->failed_to_evac) {
434 tvar->header.info = &stg_TVAR_DIRTY_info;
435 } else {
436 tvar->header.info = &stg_TVAR_CLEAN_info;
437 }
438 p += sizeofW(StgTVar);
439 break;
440 }
441
442 case FUN_2_0:
443 scavenge_fun_srt(info);
444 evacuate(&((StgClosure *)p)->payload[1]);
445 evacuate(&((StgClosure *)p)->payload[0]);
446 p += sizeofW(StgHeader) + 2;
447 break;
448
449 case THUNK_2_0:
450 scavenge_thunk_srt(info);
451 evacuate(&((StgThunk *)p)->payload[1]);
452 evacuate(&((StgThunk *)p)->payload[0]);
453 p += sizeofW(StgThunk) + 2;
454 break;
455
456 case CONSTR_2_0:
457 evacuate(&((StgClosure *)p)->payload[1]);
458 evacuate(&((StgClosure *)p)->payload[0]);
459 p += sizeofW(StgHeader) + 2;
460 break;
461
462 case THUNK_1_0:
463 scavenge_thunk_srt(info);
464 evacuate(&((StgThunk *)p)->payload[0]);
465 p += sizeofW(StgThunk) + 1;
466 break;
467
468 case FUN_1_0:
469 scavenge_fun_srt(info);
470 /* fallthrough */
471 case CONSTR_1_0:
472 evacuate(&((StgClosure *)p)->payload[0]);
473 p += sizeofW(StgHeader) + 1;
474 break;
475
476 case THUNK_0_1:
477 scavenge_thunk_srt(info);
478 p += sizeofW(StgThunk) + 1;
479 break;
480
481 case FUN_0_1:
482 scavenge_fun_srt(info);
483 /* fallthrough */
484 case CONSTR_0_1:
485 p += sizeofW(StgHeader) + 1;
486 break;
487
488 case THUNK_0_2:
489 scavenge_thunk_srt(info);
490 p += sizeofW(StgThunk) + 2;
491 break;
492
493 case FUN_0_2:
494 scavenge_fun_srt(info);
495 /* fallthrough */
496 case CONSTR_0_2:
497 p += sizeofW(StgHeader) + 2;
498 break;
499
500 case THUNK_1_1:
501 scavenge_thunk_srt(info);
502 evacuate(&((StgThunk *)p)->payload[0]);
503 p += sizeofW(StgThunk) + 2;
504 break;
505
506 case FUN_1_1:
507 scavenge_fun_srt(info);
508 /* fallthrough */
509 case CONSTR_1_1:
510 evacuate(&((StgClosure *)p)->payload[0]);
511 p += sizeofW(StgHeader) + 2;
512 break;
513
514 case FUN:
515 scavenge_fun_srt(info);
516 goto gen_obj;
517
518 case THUNK:
519 {
520 StgPtr end;
521
522 scavenge_thunk_srt(info);
523 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
524 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
525 evacuate((StgClosure **)p);
526 }
527 p += info->layout.payload.nptrs;
528 break;
529 }
530
531 gen_obj:
532 case CONSTR:
533 case CONSTR_NOCAF:
534 case WEAK:
535 case PRIM:
536 {
537 StgPtr end;
538
539 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
540 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
541 evacuate((StgClosure **)p);
542 }
543 p += info->layout.payload.nptrs;
544 break;
545 }
546
547 case BCO: {
548 StgBCO *bco = (StgBCO *)p;
549 evacuate((StgClosure **)&bco->instrs);
550 evacuate((StgClosure **)&bco->literals);
551 evacuate((StgClosure **)&bco->ptrs);
552 p += bco_sizeW(bco);
553 break;
554 }
555
556 case BLACKHOLE:
557 evacuate(&((StgInd *)p)->indirectee);
558 p += sizeofW(StgInd);
559 break;
560
561 case MUT_VAR_CLEAN:
562 case MUT_VAR_DIRTY:
563 gct->eager_promotion = false;
564 evacuate(&((StgMutVar *)p)->var);
565 gct->eager_promotion = saved_eager_promotion;
566
567 if (gct->failed_to_evac) {
568 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
569 } else {
570 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
571 }
572 p += sizeofW(StgMutVar);
573 break;
574
575 case BLOCKING_QUEUE:
576 {
577 StgBlockingQueue *bq = (StgBlockingQueue *)p;
578
579 gct->eager_promotion = false;
580 evacuate(&bq->bh);
581 evacuate((StgClosure**)&bq->owner);
582 evacuate((StgClosure**)&bq->queue);
583 evacuate((StgClosure**)&bq->link);
584 gct->eager_promotion = saved_eager_promotion;
585
586 if (gct->failed_to_evac) {
587 bq->header.info = &stg_BLOCKING_QUEUE_DIRTY_info;
588 } else {
589 bq->header.info = &stg_BLOCKING_QUEUE_CLEAN_info;
590 }
591 p += sizeofW(StgBlockingQueue);
592 break;
593 }
594
595 case THUNK_SELECTOR:
596 {
597 StgSelector *s = (StgSelector *)p;
598 evacuate(&s->selectee);
599 p += THUNK_SELECTOR_sizeW();
600 break;
601 }
602
603 // A chunk of stack saved in a heap object
604 case AP_STACK:
605 {
606 StgAP_STACK *ap = (StgAP_STACK *)p;
607
608 evacuate(&ap->fun);
609 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
610 p = (StgPtr)ap->payload + ap->size;
611 break;
612 }
613
614 case PAP:
615 p = scavenge_PAP((StgPAP *)p);
616 break;
617
618 case AP:
619 p = scavenge_AP((StgAP *)p);
620 break;
621
622 case ARR_WORDS:
623 // nothing to follow
624 p += arr_words_sizeW((StgArrBytes *)p);
625 break;
626
627 case MUT_ARR_PTRS_CLEAN:
628 case MUT_ARR_PTRS_DIRTY:
629 {
630 // We don't eagerly promote objects pointed to by a mutable
631 // array, but if we find the array only points to objects in
632 // the same or an older generation, we mark it "clean" and
633 // avoid traversing it during minor GCs.
634 gct->eager_promotion = false;
635
636 p = scavenge_mut_arr_ptrs((StgMutArrPtrs*)p);
637
638 if (gct->failed_to_evac) {
639 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
640 } else {
641 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
642 }
643
644 gct->eager_promotion = saved_eager_promotion;
645 gct->failed_to_evac = true; // always put it on the mutable list.
646 break;
647 }
648
649 case MUT_ARR_PTRS_FROZEN_CLEAN:
650 case MUT_ARR_PTRS_FROZEN_DIRTY:
651 // follow everything
652 {
653 p = scavenge_mut_arr_ptrs((StgMutArrPtrs*)p);
654
655 if (gct->failed_to_evac) {
656 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_DIRTY_info;
657 } else {
658 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_CLEAN_info;
659 }
660 break;
661 }
662
663 case SMALL_MUT_ARR_PTRS_CLEAN:
664 case SMALL_MUT_ARR_PTRS_DIRTY:
665 // follow everything
666 {
667 StgPtr next;
668
669 // We don't eagerly promote objects pointed to by a mutable
670 // array, but if we find the array only points to objects in
671 // the same or an older generation, we mark it "clean" and
672 // avoid traversing it during minor GCs.
673 gct->eager_promotion = false;
674 next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
675 for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
676 evacuate((StgClosure **)p);
677 }
678 gct->eager_promotion = saved_eager_promotion;
679
680 if (gct->failed_to_evac) {
681 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_DIRTY_info;
682 } else {
683 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_CLEAN_info;
684 }
685
686 gct->failed_to_evac = true; // always put it on the mutable list.
687 break;
688 }
689
690 case SMALL_MUT_ARR_PTRS_FROZEN_CLEAN:
691 case SMALL_MUT_ARR_PTRS_FROZEN_DIRTY:
692 // follow everything
693 {
694 StgPtr next;
695
696 next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
697 for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
698 evacuate((StgClosure **)p);
699 }
700
701 if (gct->failed_to_evac) {
702 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN_DIRTY_info;
703 } else {
704 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN_CLEAN_info;
705 }
706 break;
707 }
708
709 case TSO:
710 {
711 scavengeTSO((StgTSO *)p);
712 p += sizeofW(StgTSO);
713 break;
714 }
715
716 case STACK:
717 {
718 StgStack *stack = (StgStack*)p;
719
720 gct->eager_promotion = false;
721
722 scavenge_stack(stack->sp, stack->stack + stack->stack_size);
723 stack->dirty = gct->failed_to_evac;
724 p += stack_sizeW(stack);
725
726 gct->eager_promotion = saved_eager_promotion;
727 break;
728 }
729
730 case MUT_PRIM:
731 {
732 StgPtr end;
733
734 gct->eager_promotion = false;
735
736 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
737 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
738 evacuate((StgClosure **)p);
739 }
740 p += info->layout.payload.nptrs;
741
742 gct->eager_promotion = saved_eager_promotion;
743 gct->failed_to_evac = true; // mutable
744 break;
745 }
746
747 case TREC_CHUNK:
748 {
749 StgWord i;
750 StgTRecChunk *tc = ((StgTRecChunk *) p);
751 TRecEntry *e = &(tc -> entries[0]);
752 gct->eager_promotion = false;
753 evacuate((StgClosure **)&tc->prev_chunk);
754 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
755 evacuate((StgClosure **)&e->tvar);
756 evacuate((StgClosure **)&e->expected_value);
757 evacuate((StgClosure **)&e->new_value);
758 }
759 gct->eager_promotion = saved_eager_promotion;
760 gct->failed_to_evac = true; // mutable
761 p += sizeofW(StgTRecChunk);
762 break;
763 }
764
765 default:
766 barf("scavenge: unimplemented/strange closure type %d @ %p",
767 info->type, p);
768 }
769
770 /*
771 * We need to record the current object on the mutable list if
772 * (a) It is actually mutable, or
773 * (b) It contains pointers to a younger generation.
774 * Case (b) arises if we didn't manage to promote everything that
775 * the current object points to into the current generation.
776 */
777 if (gct->failed_to_evac) {
778 gct->failed_to_evac = false;
779 if (bd->gen_no > 0) {
780 recordMutableGen_GC((StgClosure *)q, bd->gen_no);
781 }
782 }
783 }
784
785 if (p > bd->free) {
786 gct->copied += ws->todo_free - bd->free;
787 bd->free = p;
788 }
789
790 debugTrace(DEBUG_gc, " scavenged %ld bytes",
791 (unsigned long)((bd->free - bd->u.scan) * sizeof(W_)));
792
793 // update stats: this is a block that has been scavenged
794 gct->scanned += bd->free - bd->u.scan;
795 bd->u.scan = bd->free;
796
797 if (bd != ws->todo_bd) {
798 // we're not going to evac any more objects into
799 // this block, so push it now.
800 push_scanned_block(bd, ws);
801 }
802
803 gct->scan_bd = NULL;
804 }
805 /* -----------------------------------------------------------------------------
806 Scavenge everything on the mark stack.
807
808 This is slightly different from scavenge():
809 - we don't walk linearly through the objects, so the scavenger
810 doesn't need to advance the pointer on to the next object.
811 -------------------------------------------------------------------------- */
812
813 static void
814 scavenge_mark_stack(void)
815 {
816 StgPtr p, q;
817 const StgInfoTable *info;
818 bool saved_eager_promotion;
819
820 gct->evac_gen_no = oldest_gen->no;
821 saved_eager_promotion = gct->eager_promotion;
822
823 while ((p = pop_mark_stack())) {
824
825 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
826 info = get_itbl((StgClosure *)p);
827
828 q = p;
829 switch (info->type) {
830
831 case MVAR_CLEAN:
832 case MVAR_DIRTY:
833 {
834 StgMVar *mvar = ((StgMVar *)p);
835 gct->eager_promotion = false;
836 evacuate((StgClosure **)&mvar->head);
837 evacuate((StgClosure **)&mvar->tail);
838 evacuate((StgClosure **)&mvar->value);
839 gct->eager_promotion = saved_eager_promotion;
840
841 if (gct->failed_to_evac) {
842 mvar->header.info = &stg_MVAR_DIRTY_info;
843 } else {
844 mvar->header.info = &stg_MVAR_CLEAN_info;
845 }
846 break;
847 }
848
849 case TVAR:
850 {
851 StgTVar *tvar = ((StgTVar *)p);
852 gct->eager_promotion = false;
853 evacuate((StgClosure **)&tvar->current_value);
854 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
855 gct->eager_promotion = saved_eager_promotion;
856
857 if (gct->failed_to_evac) {
858 tvar->header.info = &stg_TVAR_DIRTY_info;
859 } else {
860 tvar->header.info = &stg_TVAR_CLEAN_info;
861 }
862 break;
863 }
864
865 case FUN_2_0:
866 scavenge_fun_srt(info);
867 evacuate(&((StgClosure *)p)->payload[1]);
868 evacuate(&((StgClosure *)p)->payload[0]);
869 break;
870
871 case THUNK_2_0:
872 scavenge_thunk_srt(info);
873 evacuate(&((StgThunk *)p)->payload[1]);
874 evacuate(&((StgThunk *)p)->payload[0]);
875 break;
876
877 case CONSTR_2_0:
878 evacuate(&((StgClosure *)p)->payload[1]);
879 evacuate(&((StgClosure *)p)->payload[0]);
880 break;
881
882 case FUN_1_0:
883 case FUN_1_1:
884 scavenge_fun_srt(info);
885 evacuate(&((StgClosure *)p)->payload[0]);
886 break;
887
888 case THUNK_1_0:
889 case THUNK_1_1:
890 scavenge_thunk_srt(info);
891 evacuate(&((StgThunk *)p)->payload[0]);
892 break;
893
894 case CONSTR_1_0:
895 case CONSTR_1_1:
896 evacuate(&((StgClosure *)p)->payload[0]);
897 break;
898
899 case FUN_0_1:
900 case FUN_0_2:
901 scavenge_fun_srt(info);
902 break;
903
904 case THUNK_0_1:
905 case THUNK_0_2:
906 scavenge_thunk_srt(info);
907 break;
908
909 case CONSTR_0_1:
910 case CONSTR_0_2:
911 break;
912
913 case FUN:
914 scavenge_fun_srt(info);
915 goto gen_obj;
916
917 case THUNK:
918 {
919 StgPtr end;
920
921 scavenge_thunk_srt(info);
922 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
923 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
924 evacuate((StgClosure **)p);
925 }
926 break;
927 }
928
929 gen_obj:
930 case CONSTR:
931 case CONSTR_NOCAF:
932 case WEAK:
933 case PRIM:
934 {
935 StgPtr end;
936
937 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
938 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
939 evacuate((StgClosure **)p);
940 }
941 break;
942 }
943
944 case BCO: {
945 StgBCO *bco = (StgBCO *)p;
946 evacuate((StgClosure **)&bco->instrs);
947 evacuate((StgClosure **)&bco->literals);
948 evacuate((StgClosure **)&bco->ptrs);
949 break;
950 }
951
952 case IND:
953 case BLACKHOLE:
954 evacuate(&((StgInd *)p)->indirectee);
955 break;
956
957 case MUT_VAR_CLEAN:
958 case MUT_VAR_DIRTY: {
959 gct->eager_promotion = false;
960 evacuate(&((StgMutVar *)p)->var);
961 gct->eager_promotion = saved_eager_promotion;
962
963 if (gct->failed_to_evac) {
964 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
965 } else {
966 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
967 }
968 break;
969 }
970
971 case BLOCKING_QUEUE:
972 {
973 StgBlockingQueue *bq = (StgBlockingQueue *)p;
974
975 gct->eager_promotion = false;
976 evacuate(&bq->bh);
977 evacuate((StgClosure**)&bq->owner);
978 evacuate((StgClosure**)&bq->queue);
979 evacuate((StgClosure**)&bq->link);
980 gct->eager_promotion = saved_eager_promotion;
981
982 if (gct->failed_to_evac) {
983 bq->header.info = &stg_BLOCKING_QUEUE_DIRTY_info;
984 } else {
985 bq->header.info = &stg_BLOCKING_QUEUE_CLEAN_info;
986 }
987 break;
988 }
989
990 case ARR_WORDS:
991 break;
992
993 case THUNK_SELECTOR:
994 {
995 StgSelector *s = (StgSelector *)p;
996 evacuate(&s->selectee);
997 break;
998 }
999
1000 // A chunk of stack saved in a heap object
1001 case AP_STACK:
1002 {
1003 StgAP_STACK *ap = (StgAP_STACK *)p;
1004
1005 evacuate(&ap->fun);
1006 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
1007 break;
1008 }
1009
1010 case PAP:
1011 scavenge_PAP((StgPAP *)p);
1012 break;
1013
1014 case AP:
1015 scavenge_AP((StgAP *)p);
1016 break;
1017
1018 case MUT_ARR_PTRS_CLEAN:
1019 case MUT_ARR_PTRS_DIRTY:
1020 // follow everything
1021 {
1022 // We don't eagerly promote objects pointed to by a mutable
1023 // array, but if we find the array only points to objects in
1024 // the same or an older generation, we mark it "clean" and
1025 // avoid traversing it during minor GCs.
1026 gct->eager_promotion = false;
1027
1028 scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
1029
1030 if (gct->failed_to_evac) {
1031 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
1032 } else {
1033 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
1034 }
1035
1036 gct->eager_promotion = saved_eager_promotion;
1037 gct->failed_to_evac = true; // mutable anyhow.
1038 break;
1039 }
1040
1041 case MUT_ARR_PTRS_FROZEN_CLEAN:
1042 case MUT_ARR_PTRS_FROZEN_DIRTY:
1043 // follow everything
1044 {
1045 StgPtr q = p;
1046
1047 scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
1048
1049 if (gct->failed_to_evac) {
1050 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_DIRTY_info;
1051 } else {
1052 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_CLEAN_info;
1053 }
1054 break;
1055 }
1056
1057 case SMALL_MUT_ARR_PTRS_CLEAN:
1058 case SMALL_MUT_ARR_PTRS_DIRTY:
1059 // follow everything
1060 {
1061 StgPtr next;
1062 bool saved_eager;
1063
1064 // We don't eagerly promote objects pointed to by a mutable
1065 // array, but if we find the array only points to objects in
1066 // the same or an older generation, we mark it "clean" and
1067 // avoid traversing it during minor GCs.
1068 saved_eager = gct->eager_promotion;
1069 gct->eager_promotion = false;
1070 next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
1071 for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
1072 evacuate((StgClosure **)p);
1073 }
1074 gct->eager_promotion = saved_eager;
1075
1076 if (gct->failed_to_evac) {
1077 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_DIRTY_info;
1078 } else {
1079 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_CLEAN_info;
1080 }
1081
1082 gct->failed_to_evac = true; // mutable anyhow.
1083 break;
1084 }
1085
1086 case SMALL_MUT_ARR_PTRS_FROZEN_CLEAN:
1087 case SMALL_MUT_ARR_PTRS_FROZEN_DIRTY:
1088 // follow everything
1089 {
1090 StgPtr next, q = p;
1091
1092 next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
1093 for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
1094 evacuate((StgClosure **)p);
1095 }
1096
1097 if (gct->failed_to_evac) {
1098 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN_DIRTY_info;
1099 } else {
1100 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN_CLEAN_info;
1101 }
1102 break;
1103 }
1104
1105 case TSO:
1106 {
1107 scavengeTSO((StgTSO*)p);
1108 break;
1109 }
1110
1111 case STACK:
1112 {
1113 StgStack *stack = (StgStack*)p;
1114
1115 gct->eager_promotion = false;
1116
1117 scavenge_stack(stack->sp, stack->stack + stack->stack_size);
1118 stack->dirty = gct->failed_to_evac;
1119
1120 gct->eager_promotion = saved_eager_promotion;
1121 break;
1122 }
1123
1124 case MUT_PRIM:
1125 {
1126 StgPtr end;
1127
1128 gct->eager_promotion = false;
1129
1130 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1131 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
1132 evacuate((StgClosure **)p);
1133 }
1134
1135 gct->eager_promotion = saved_eager_promotion;
1136 gct->failed_to_evac = true; // mutable
1137 break;
1138 }
1139
1140 case TREC_CHUNK:
1141 {
1142 StgWord i;
1143 StgTRecChunk *tc = ((StgTRecChunk *) p);
1144 TRecEntry *e = &(tc -> entries[0]);
1145 gct->eager_promotion = false;
1146 evacuate((StgClosure **)&tc->prev_chunk);
1147 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
1148 evacuate((StgClosure **)&e->tvar);
1149 evacuate((StgClosure **)&e->expected_value);
1150 evacuate((StgClosure **)&e->new_value);
1151 }
1152 gct->eager_promotion = saved_eager_promotion;
1153 gct->failed_to_evac = true; // mutable
1154 break;
1155 }
1156
1157 default:
1158 barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p",
1159 info->type, p);
1160 }
1161
1162 if (gct->failed_to_evac) {
1163 gct->failed_to_evac = false;
1164 if (gct->evac_gen_no) {
1165 recordMutableGen_GC((StgClosure *)q, gct->evac_gen_no);
1166 }
1167 }
1168 } // while (p = pop_mark_stack())
1169 }
1170
1171 /* -----------------------------------------------------------------------------
1172 Scavenge one object.
1173
1174 This is used for objects that are temporarily marked as mutable
1175 because they contain old-to-new generation pointers. Only certain
1176 objects can have this property.
1177 -------------------------------------------------------------------------- */
1178
1179 static bool
1180 scavenge_one(StgPtr p)
1181 {
1182 const StgInfoTable *info;
1183 bool no_luck;
1184 bool saved_eager_promotion;
1185
1186 saved_eager_promotion = gct->eager_promotion;
1187
1188 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1189 info = get_itbl((StgClosure *)p);
1190
1191 switch (info->type) {
1192
1193 case MVAR_CLEAN:
1194 case MVAR_DIRTY:
1195 {
1196 StgMVar *mvar = ((StgMVar *)p);
1197 gct->eager_promotion = false;
1198 evacuate((StgClosure **)&mvar->head);
1199 evacuate((StgClosure **)&mvar->tail);
1200 evacuate((StgClosure **)&mvar->value);
1201 gct->eager_promotion = saved_eager_promotion;
1202
1203 if (gct->failed_to_evac) {
1204 mvar->header.info = &stg_MVAR_DIRTY_info;
1205 } else {
1206 mvar->header.info = &stg_MVAR_CLEAN_info;
1207 }
1208 break;
1209 }
1210
1211 case TVAR:
1212 {
1213 StgTVar *tvar = ((StgTVar *)p);
1214 gct->eager_promotion = false;
1215 evacuate((StgClosure **)&tvar->current_value);
1216 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
1217 gct->eager_promotion = saved_eager_promotion;
1218
1219 if (gct->failed_to_evac) {
1220 tvar->header.info = &stg_TVAR_DIRTY_info;
1221 } else {
1222 tvar->header.info = &stg_TVAR_CLEAN_info;
1223 }
1224 break;
1225 }
1226
1227 case THUNK:
1228 case THUNK_1_0:
1229 case THUNK_0_1:
1230 case THUNK_1_1:
1231 case THUNK_0_2:
1232 case THUNK_2_0:
1233 {
1234 StgPtr q, end;
1235
1236 end = (StgPtr)((StgThunk *)p)->payload + info->layout.payload.ptrs;
1237 for (q = (StgPtr)((StgThunk *)p)->payload; q < end; q++) {
1238 evacuate((StgClosure **)q);
1239 }
1240 break;
1241 }
1242
1243 case FUN:
1244 case FUN_1_0: // hardly worth specialising these guys
1245 case FUN_0_1:
1246 case FUN_1_1:
1247 case FUN_0_2:
1248 case FUN_2_0:
1249 case CONSTR:
1250 case CONSTR_NOCAF:
1251 case CONSTR_1_0:
1252 case CONSTR_0_1:
1253 case CONSTR_1_1:
1254 case CONSTR_0_2:
1255 case CONSTR_2_0:
1256 case PRIM:
1257 {
1258 StgPtr q, end;
1259
1260 end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1261 for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) {
1262 evacuate((StgClosure **)q);
1263 }
1264 break;
1265 }
1266
1267 case WEAK:
1268 // This WEAK object will not be considered by tidyWeakList during this
1269 // collection because it is in a generation > N, but it is on the
1270 // mutable list so we must evacuate all of its pointers because some
1271 // of them may point into a younger generation.
1272 scavengeLiveWeak((StgWeak *)p);
1273 break;
1274
1275 case MUT_VAR_CLEAN:
1276 case MUT_VAR_DIRTY: {
1277 StgPtr q = p;
1278
1279 gct->eager_promotion = false;
1280 evacuate(&((StgMutVar *)p)->var);
1281 gct->eager_promotion = saved_eager_promotion;
1282
1283 if (gct->failed_to_evac) {
1284 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
1285 } else {
1286 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
1287 }
1288 break;
1289 }
1290
1291 case BLOCKING_QUEUE:
1292 {
1293 StgBlockingQueue *bq = (StgBlockingQueue *)p;
1294
1295 gct->eager_promotion = false;
1296 evacuate(&bq->bh);
1297 evacuate((StgClosure**)&bq->owner);
1298 evacuate((StgClosure**)&bq->queue);
1299 evacuate((StgClosure**)&bq->link);
1300 gct->eager_promotion = saved_eager_promotion;
1301
1302 if (gct->failed_to_evac) {
1303 bq->header.info = &stg_BLOCKING_QUEUE_DIRTY_info;
1304 } else {
1305 bq->header.info = &stg_BLOCKING_QUEUE_CLEAN_info;
1306 }
1307 break;
1308 }
1309
1310 case THUNK_SELECTOR:
1311 {
1312 StgSelector *s = (StgSelector *)p;
1313 evacuate(&s->selectee);
1314 break;
1315 }
1316
1317 case AP_STACK:
1318 {
1319 StgAP_STACK *ap = (StgAP_STACK *)p;
1320
1321 evacuate(&ap->fun);
1322 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
1323 p = (StgPtr)ap->payload + ap->size;
1324 break;
1325 }
1326
1327 case PAP:
1328 p = scavenge_PAP((StgPAP *)p);
1329 break;
1330
1331 case AP:
1332 p = scavenge_AP((StgAP *)p);
1333 break;
1334
1335 case ARR_WORDS:
1336 // nothing to follow
1337 break;
1338
1339 case MUT_ARR_PTRS_CLEAN:
1340 case MUT_ARR_PTRS_DIRTY:
1341 {
1342 // We don't eagerly promote objects pointed to by a mutable
1343 // array, but if we find the array only points to objects in
1344 // the same or an older generation, we mark it "clean" and
1345 // avoid traversing it during minor GCs.
1346 gct->eager_promotion = false;
1347
1348 scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
1349
1350 if (gct->failed_to_evac) {
1351 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
1352 } else {
1353 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
1354 }
1355
1356 gct->eager_promotion = saved_eager_promotion;
1357 gct->failed_to_evac = true;
1358 break;
1359 }
1360
1361 case MUT_ARR_PTRS_FROZEN_CLEAN:
1362 case MUT_ARR_PTRS_FROZEN_DIRTY:
1363 {
1364 // follow everything
1365 scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
1366
1367 if (gct->failed_to_evac) {
1368 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_FROZEN_DIRTY_info;
1369 } else {
1370 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_FROZEN_CLEAN_info;
1371 }
1372 break;
1373 }
1374
1375 case SMALL_MUT_ARR_PTRS_CLEAN:
1376 case SMALL_MUT_ARR_PTRS_DIRTY:
1377 {
1378 StgPtr next, q;
1379 bool saved_eager;
1380
1381 // We don't eagerly promote objects pointed to by a mutable
1382 // array, but if we find the array only points to objects in
1383 // the same or an older generation, we mark it "clean" and
1384 // avoid traversing it during minor GCs.
1385 saved_eager = gct->eager_promotion;
1386 gct->eager_promotion = false;
1387 q = p;
1388 next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
1389 for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
1390 evacuate((StgClosure **)p);
1391 }
1392 gct->eager_promotion = saved_eager;
1393
1394 if (gct->failed_to_evac) {
1395 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_DIRTY_info;
1396 } else {
1397 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_CLEAN_info;
1398 }
1399
1400 gct->failed_to_evac = true;
1401 break;
1402 }
1403
1404 case SMALL_MUT_ARR_PTRS_FROZEN_CLEAN:
1405 case SMALL_MUT_ARR_PTRS_FROZEN_DIRTY:
1406 {
1407 // follow everything
1408 StgPtr next, q=p;
1409
1410 next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
1411 for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
1412 evacuate((StgClosure **)p);
1413 }
1414
1415 if (gct->failed_to_evac) {
1416 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN_DIRTY_info;
1417 } else {
1418 ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN_CLEAN_info;
1419 }
1420 break;
1421 }
1422
1423 case TSO:
1424 {
1425 scavengeTSO((StgTSO*)p);
1426 break;
1427 }
1428
1429 case STACK:
1430 {
1431 StgStack *stack = (StgStack*)p;
1432
1433 gct->eager_promotion = false;
1434
1435 scavenge_stack(stack->sp, stack->stack + stack->stack_size);
1436 stack->dirty = gct->failed_to_evac;
1437
1438 gct->eager_promotion = saved_eager_promotion;
1439 break;
1440 }
1441
1442 case MUT_PRIM:
1443 {
1444 StgPtr end;
1445
1446 gct->eager_promotion = false;
1447
1448 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1449 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
1450 evacuate((StgClosure **)p);
1451 }
1452
1453 gct->eager_promotion = saved_eager_promotion;
1454 gct->failed_to_evac = true; // mutable
1455 break;
1456
1457 }
1458
1459 case TREC_CHUNK:
1460 {
1461 StgWord i;
1462 StgTRecChunk *tc = ((StgTRecChunk *) p);
1463 TRecEntry *e = &(tc -> entries[0]);
1464 gct->eager_promotion = false;
1465 evacuate((StgClosure **)&tc->prev_chunk);
1466 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
1467 evacuate((StgClosure **)&e->tvar);
1468 evacuate((StgClosure **)&e->expected_value);
1469 evacuate((StgClosure **)&e->new_value);
1470 }
1471 gct->eager_promotion = saved_eager_promotion;
1472 gct->failed_to_evac = true; // mutable
1473 break;
1474 }
1475
1476 case IND:
1477 // IND can happen, for example, when the interpreter allocates
1478 // a gigantic AP closure (more than one block), which ends up
1479 // on the large-object list and then gets updated. See #3424.
1480 case BLACKHOLE:
1481 case IND_STATIC:
1482 evacuate(&((StgInd *)p)->indirectee);
1483
1484 #if 0 && defined(DEBUG)
1485 if (RtsFlags.DebugFlags.gc)
1486 /* Debugging code to print out the size of the thing we just
1487 * promoted
1488 */
1489 {
1490 StgPtr start = gen->scan;
1491 bdescr *start_bd = gen->scan_bd;
1492 StgWord size = 0;
1493 scavenge(&gen);
1494 if (start_bd != gen->scan_bd) {
1495 size += (P_)BLOCK_ROUND_UP(start) - start;
1496 start_bd = start_bd->link;
1497 while (start_bd != gen->scan_bd) {
1498 size += BLOCK_SIZE_W;
1499 start_bd = start_bd->link;
1500 }
1501 size += gen->scan -
1502 (P_)BLOCK_ROUND_DOWN(gen->scan);
1503 } else {
1504 size = gen->scan - start;
1505 }
1506 debugBelch("evac IND: %ld bytes", size * sizeof(W_));
1507 }
1508 #endif
1509 break;
1510
1511 case COMPACT_NFDATA:
1512 scavenge_compact((StgCompactNFData*)p);
1513 break;
1514
1515 default:
1516 barf("scavenge_one: strange object %d", (int)(info->type));
1517 }
1518
1519 no_luck = gct->failed_to_evac;
1520 gct->failed_to_evac = false;
1521 return (no_luck);
1522 }
1523
1524 /* -----------------------------------------------------------------------------
1525 Scavenging mutable lists.
1526
1527 We treat the mutable list of each generation > N (i.e. all the
1528 generations older than the one being collected) as roots. We also
1529 remove non-mutable objects from the mutable list at this point.
1530 -------------------------------------------------------------------------- */
1531
1532 static void
1533 scavenge_mutable_list(bdescr *bd, generation *gen)
1534 {
1535 StgPtr p, q;
1536 uint32_t gen_no;
1537
1538 gen_no = gen->no;
1539 gct->evac_gen_no = gen_no;
1540 for (; bd != NULL; bd = bd->link) {
1541 for (q = bd->start; q < bd->free; q++) {
1542 p = (StgPtr)*q;
1543 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1544
1545 #if defined(DEBUG)
1546 switch (get_itbl((StgClosure *)p)->type) {
1547 case MUT_VAR_CLEAN:
1548 // can happen due to concurrent writeMutVars
1549 case MUT_VAR_DIRTY:
1550 mutlist_MUTVARS++; break;
1551 case MUT_ARR_PTRS_CLEAN:
1552 case MUT_ARR_PTRS_DIRTY:
1553 case MUT_ARR_PTRS_FROZEN_CLEAN:
1554 case MUT_ARR_PTRS_FROZEN_DIRTY:
1555 mutlist_MUTARRS++; break;
1556 case MVAR_CLEAN:
1557 barf("MVAR_CLEAN on mutable list");
1558 case MVAR_DIRTY:
1559 mutlist_MVARS++; break;
1560 case TVAR:
1561 mutlist_TVAR++; break;
1562 case TREC_CHUNK:
1563 mutlist_TREC_CHUNK++; break;
1564 case MUT_PRIM:
1565 if (((StgClosure*)p)->header.info == &stg_TVAR_WATCH_QUEUE_info)
1566 mutlist_TVAR_WATCH_QUEUE++;
1567 else if (((StgClosure*)p)->header.info == &stg_TREC_HEADER_info)
1568 mutlist_TREC_HEADER++;
1569 else
1570 mutlist_OTHERS++;
1571 break;
1572 default:
1573 mutlist_OTHERS++; break;
1574 }
1575 #endif
1576
1577 // Check whether this object is "clean", that is it
1578 // definitely doesn't point into a young generation.
1579 // Clean objects don't need to be scavenged. Some clean
1580 // objects (MUT_VAR_CLEAN) are not kept on the mutable
1581 // list at all; others, such as MUT_ARR_PTRS
1582 // are always on the mutable list.
1583 //
1584 switch (get_itbl((StgClosure *)p)->type) {
1585 case MUT_ARR_PTRS_CLEAN:
1586 case SMALL_MUT_ARR_PTRS_CLEAN:
1587 recordMutableGen_GC((StgClosure *)p,gen_no);
1588 continue;
1589 case MUT_ARR_PTRS_DIRTY:
1590 {
1591 bool saved_eager_promotion;
1592 saved_eager_promotion = gct->eager_promotion;
1593 gct->eager_promotion = false;
1594
1595 scavenge_mut_arr_ptrs_marked((StgMutArrPtrs *)p);
1596
1597 if (gct->failed_to_evac) {
1598 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
1599 } else {
1600 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
1601 }
1602
1603 gct->eager_promotion = saved_eager_promotion;
1604 gct->failed_to_evac = false;
1605 recordMutableGen_GC((StgClosure *)p,gen_no);
1606 continue;
1607 }
1608 default:
1609 ;
1610 }
1611
1612 if (scavenge_one(p)) {
1613 // didn't manage to promote everything, so put the
1614 // object back on the list.
1615 recordMutableGen_GC((StgClosure *)p,gen_no);
1616 }
1617 }
1618 }
1619 }
1620
1621 void
1622 scavenge_capability_mut_lists (Capability *cap)
1623 {
1624 uint32_t g;
1625
1626 /* Mutable lists from each generation > N
1627 * we want to *scavenge* these roots, not evacuate them: they're not
1628 * going to move in this GC.
1629 * Also do them in reverse generation order, for the usual reason:
1630 * namely to reduce the likelihood of spurious old->new pointers.
1631 */
1632 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
1633 scavenge_mutable_list(cap->saved_mut_lists[g], &generations[g]);
1634 freeChain_sync(cap->saved_mut_lists[g]);
1635 cap->saved_mut_lists[g] = NULL;
1636 }
1637 }
1638
1639 /* -----------------------------------------------------------------------------
1640 Scavenging the static objects.
1641
1642 We treat the mutable list of each generation > N (i.e. all the
1643 generations older than the one being collected) as roots. We also
1644 remove non-mutable objects from the mutable list at this point.
1645 -------------------------------------------------------------------------- */
1646
1647 static void
1648 scavenge_static(void)
1649 {
1650 StgClosure *flagged_p, *p;
1651 const StgInfoTable *info;
1652
1653 debugTrace(DEBUG_gc, "scavenging static objects");
1654
1655 /* Always evacuate straight to the oldest generation for static
1656 * objects */
1657 gct->evac_gen_no = oldest_gen->no;
1658
1659 /* keep going until we've scavenged all the objects on the linked
1660 list... */
1661
1662 while (1) {
1663
1664 /* get the next static object from the list. Remember, there might
1665 * be more stuff on this list after each evacuation...
1666 * (static_objects is a global)
1667 */
1668 flagged_p = gct->static_objects;
1669 if (flagged_p == END_OF_STATIC_OBJECT_LIST) {
1670 break;
1671 }
1672 p = UNTAG_STATIC_LIST_PTR(flagged_p);
1673
1674 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1675 info = get_itbl(p);
1676 // make sure the info pointer is into text space
1677
1678 /* Take this object *off* the static_objects list,
1679 * and put it on the scavenged_static_objects list.
1680 */
1681 gct->static_objects = *STATIC_LINK(info,p);
1682 *STATIC_LINK(info,p) = gct->scavenged_static_objects;
1683 gct->scavenged_static_objects = flagged_p;
1684
1685 switch (info -> type) {
1686
1687 case IND_STATIC:
1688 {
1689 StgInd *ind = (StgInd *)p;
1690 evacuate(&ind->indirectee);
1691
1692 /* might fail to evacuate it, in which case we have to pop it
1693 * back on the mutable list of the oldest generation. We
1694 * leave it *on* the scavenged_static_objects list, though,
1695 * in case we visit this object again.
1696 */
1697 if (gct->failed_to_evac) {
1698 gct->failed_to_evac = false;
1699 recordMutableGen_GC((StgClosure *)p,oldest_gen->no);
1700 }
1701 break;
1702 }
1703
1704 case THUNK_STATIC:
1705 scavenge_thunk_srt(info);
1706 break;
1707
1708 case FUN_STATIC:
1709 scavenge_fun_srt(info);
1710 break;
1711
1712 case CONSTR:
1713 case CONSTR_NOCAF:
1714 case CONSTR_1_0:
1715 case CONSTR_0_1:
1716 case CONSTR_2_0:
1717 case CONSTR_1_1:
1718 case CONSTR_0_2:
1719 {
1720 StgPtr q, next;
1721
1722 next = (P_)p->payload + info->layout.payload.ptrs;
1723 // evacuate the pointers
1724 for (q = (P_)p->payload; q < next; q++) {
1725 evacuate((StgClosure **)q);
1726 }
1727 break;
1728 }
1729
1730 default:
1731 barf("scavenge_static: strange closure %d", (int)(info->type));
1732 }
1733
1734 ASSERT(gct->failed_to_evac == false);
1735 }
1736 }
1737
1738 /* -----------------------------------------------------------------------------
1739 scavenge a chunk of memory described by a bitmap
1740 -------------------------------------------------------------------------- */
1741
1742 static void
1743 scavenge_large_bitmap( StgPtr p, StgLargeBitmap *large_bitmap, StgWord size )
1744 {
1745 uint32_t i, j, b;
1746 StgWord bitmap;
1747
1748 b = 0;
1749
1750 for (i = 0; i < size; b++) {
1751 bitmap = large_bitmap->bitmap[b];
1752 j = stg_min(size-i, BITS_IN(W_));
1753 i += j;
1754 for (; j > 0; j--, p++) {
1755 if ((bitmap & 1) == 0) {
1756 evacuate((StgClosure **)p);
1757 }
1758 bitmap = bitmap >> 1;
1759 }
1760 }
1761 }
1762
1763
1764 /* -----------------------------------------------------------------------------
1765 scavenge_stack walks over a section of stack and evacuates all the
1766 objects pointed to by it. We can use the same code for walking
1767 AP_STACK_UPDs, since these are just sections of copied stack.
1768 -------------------------------------------------------------------------- */
1769
1770 static void
1771 scavenge_stack(StgPtr p, StgPtr stack_end)
1772 {
1773 const StgRetInfoTable* info;
1774 StgWord bitmap;
1775 StgWord size;
1776
1777 /*
1778 * Each time around this loop, we are looking at a chunk of stack
1779 * that starts with an activation record.
1780 */
1781
1782 while (p < stack_end) {
1783 info = get_ret_itbl((StgClosure *)p);
1784
1785 switch (info->i.type) {
1786
1787 case UPDATE_FRAME:
1788 // Note [upd-black-hole]
1789 //
1790 // In SMP, we can get update frames that point to indirections
1791 // when two threads evaluate the same thunk. We do attempt to
1792 // discover this situation in threadPaused(), but it's
1793 // possible that the following sequence occurs:
1794 //
1795 // A B
1796 // enter T
1797 // enter T
1798 // blackhole T
1799 // update T
1800 // GC
1801 //
1802 // Now T is an indirection, and the update frame is already
1803 // marked on A's stack, so we won't traverse it again in
1804 // threadPaused(). We could traverse the whole stack again
1805 // before GC, but that would be too expensive.
1806 //
1807 // Scavenging this update frame as normal would be disastrous;
1808 // the indirection will be shorted out, and the updatee would
1809 // end up pointing to the value. The update code will then
1810 // overwrite the value, instead of the BLACKHOLE it is
1811 // expecting to write to.
1812 //
1813 // One way we could try to fix this is to detect when the
1814 // BLACKHOLE has been updated by another thread, and then
1815 // replace this update frame with a special frame that just
1816 // enters the value. But this introduces some other
1817 // complexities:
1818 //
1819 // - we must be careful to call checkBlockingQueues() in this
1820 // special frame, because we might otherwise miss wakeups
1821 // for threads that blocked on the original BLACKHOLE,
1822 // - we must spot this frame when we're stripping the stack in
1823 // raiseAsync() and raiseExceptionHelper(), and arrange to call
1824 // checkBlockingQueues() there too.
1825 //
1826 // This is hard to get right, indeed we previously got it
1827 // wrong (see #13751). So we now take a different approach:
1828 // always copy the BLACKHOLE, even if it is actually an
1829 // indirection. This way we keep the update frame, we're
1830 // guaranteed to still perform the update, and check for
1831 // missed wakeups even when stripping the stack in
1832 // raiseAsync() and raiseExceptionHelper(). This is also a
1833 // little more efficient, because evacuating a known BLACKHOLE
1834 // is faster than evacuating an unknown closure.
1835 //
1836 // NOTE: for the reasons above, blackholing (either lazy or
1837 // eager) is NOT optional. See also Note [avoiding
1838 // threadPaused] in Interpreter.c.
1839 //
1840 // There are a couple of alternative solutions:
1841 // - if we see an update frame that points to an indirection,
1842 // arrange to call checkBlockingQueues() on that thread
1843 // after GC.
1844 // - spot a BLOCKING_QUEUE that points to a value and
1845 // arrange to wake it up after the GC.
1846 //
1847 // These are more difficult to implement, requiring an extra
1848 // list to be maintained during GC. They also rely on more
1849 // subtle invariants than the solution implemented here.
1850 //
1851
1852 {
1853 StgUpdateFrame *frame = (StgUpdateFrame *)p;
1854
1855 evacuate_BLACKHOLE(&frame->updatee);
1856 p += sizeofW(StgUpdateFrame);
1857 continue;
1858 }
1859
1860 // small bitmap (< 32 entries, or 64 on a 64-bit machine)
1861 case CATCH_STM_FRAME:
1862 case CATCH_RETRY_FRAME:
1863 case ATOMICALLY_FRAME:
1864 case UNDERFLOW_FRAME:
1865 case STOP_FRAME:
1866 case CATCH_FRAME:
1867 case RET_SMALL:
1868 bitmap = BITMAP_BITS(info->i.layout.bitmap);
1869 size = BITMAP_SIZE(info->i.layout.bitmap);
1870 // NOTE: the payload starts immediately after the info-ptr, we
1871 // don't have an StgHeader in the same sense as a heap closure.
1872 p++;
1873 p = scavenge_small_bitmap(p, size, bitmap);
1874
1875 follow_srt:
1876 if (major_gc && info->i.srt) {
1877 StgClosure *srt = (StgClosure*)GET_SRT(info);
1878 evacuate(&srt);
1879 }
1880 continue;
1881
1882 case RET_BCO: {
1883 StgBCO *bco;
1884 StgWord size;
1885
1886 p++;
1887 evacuate((StgClosure **)p);
1888 bco = (StgBCO *)*p;
1889 p++;
1890 size = BCO_BITMAP_SIZE(bco);
1891 scavenge_large_bitmap(p, BCO_BITMAP(bco), size);
1892 p += size;
1893 continue;
1894 }
1895
1896 // large bitmap (> 32 entries, or > 64 on a 64-bit machine)
1897 case RET_BIG:
1898 {
1899 StgWord size;
1900
1901 size = GET_LARGE_BITMAP(&info->i)->size;
1902 p++;
1903 scavenge_large_bitmap(p, GET_LARGE_BITMAP(&info->i), size);
1904 p += size;
1905 // and don't forget to follow the SRT
1906 goto follow_srt;
1907 }
1908
1909 case RET_FUN:
1910 {
1911 StgRetFun *ret_fun = (StgRetFun *)p;
1912 const StgFunInfoTable *fun_info;
1913
1914 evacuate(&ret_fun->fun);
1915 fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun));
1916 p = scavenge_arg_block(fun_info, ret_fun->payload);
1917 goto follow_srt;
1918 }
1919
1920 default:
1921 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->i.type));
1922 }
1923 }
1924 }
1925
1926 /*-----------------------------------------------------------------------------
1927 scavenge the large object list.
1928
1929 evac_gen set by caller; similar games played with evac_gen as with
1930 scavenge() - see comment at the top of scavenge(). Most large
1931 objects are (repeatedly) mutable, so most of the time evac_gen will
1932 be zero.
1933 --------------------------------------------------------------------------- */
1934
1935 static void
1936 scavenge_large (gen_workspace *ws)
1937 {
1938 bdescr *bd;
1939 StgPtr p;
1940
1941 gct->evac_gen_no = ws->gen->no;
1942
1943 bd = ws->todo_large_objects;
1944
1945 for (; bd != NULL; bd = ws->todo_large_objects) {
1946
1947 // take this object *off* the large objects list and put it on
1948 // the scavenged large objects list. This is so that we can
1949 // treat todo_large_objects as a stack and push new objects on
1950 // the front when evacuating.
1951 ws->todo_large_objects = bd->link;
1952
1953 ACQUIRE_SPIN_LOCK(&ws->gen->sync);
1954 if (bd->flags & BF_COMPACT) {
1955 dbl_link_onto(bd, &ws->gen->live_compact_objects);
1956 StgCompactNFData *str = ((StgCompactNFDataBlock*)bd->start)->owner;
1957 ws->gen->n_live_compact_blocks += str->totalW / BLOCK_SIZE_W;
1958 p = (StgPtr)str;
1959 } else {
1960 dbl_link_onto(bd, &ws->gen->scavenged_large_objects);
1961 ws->gen->n_scavenged_large_blocks += bd->blocks;
1962 p = bd->start;
1963 }
1964 RELEASE_SPIN_LOCK(&ws->gen->sync);
1965
1966 if (scavenge_one(p)) {
1967 if (ws->gen->no > 0) {
1968 recordMutableGen_GC((StgClosure *)p, ws->gen->no);
1969 }
1970 }
1971
1972 // stats
1973 gct->scanned += closure_sizeW((StgClosure*)p);
1974 }
1975 }
1976
1977 /* ----------------------------------------------------------------------------
1978 Look for work to do.
1979
1980 We look for the oldest gen that has either a todo block that can
1981 be scanned, or a block of work on the global queue that we can
1982 scan.
1983
1984 It is important to take work from the *oldest* generation that we
1985 has work available, because that minimizes the likelihood of
1986 evacuating objects into a young generation when they should have
1987 been eagerly promoted. This really does make a difference (the
1988 cacheprof benchmark is one that is affected).
1989
1990 We also want to scan the todo block if possible before grabbing
1991 work from the global queue, the reason being that we don't want to
1992 steal work from the global queue and starve other threads if there
1993 is other work we can usefully be doing.
1994 ------------------------------------------------------------------------- */
1995
1996 static bool
1997 scavenge_find_work (void)
1998 {
1999 int g;
2000 gen_workspace *ws;
2001 bool did_something, did_anything;
2002 bdescr *bd;
2003
2004 gct->scav_find_work++;
2005
2006 did_anything = false;
2007
2008 loop:
2009 did_something = false;
2010 for (g = RtsFlags.GcFlags.generations-1; g >= 0; g--) {
2011 ws = &gct->gens[g];
2012
2013 gct->scan_bd = NULL;
2014
2015 // If we have a scan block with some work to do,
2016 // scavenge everything up to the free pointer.
2017 if (ws->todo_bd->u.scan < ws->todo_free)
2018 {
2019 scavenge_block(ws->todo_bd);
2020 did_something = true;
2021 break;
2022 }
2023
2024 // If we have any large objects to scavenge, do them now.
2025 if (ws->todo_large_objects) {
2026 scavenge_large(ws);
2027 did_something = true;
2028 break;
2029 }
2030
2031 if ((bd = grab_local_todo_block(ws)) != NULL) {
2032 scavenge_block(bd);
2033 did_something = true;
2034 break;
2035 }
2036 }
2037
2038 if (did_something) {
2039 did_anything = true;
2040 goto loop;
2041 }
2042
2043 #if defined(THREADED_RTS)
2044 if (work_stealing) {
2045 // look for work to steal
2046 for (g = RtsFlags.GcFlags.generations-1; g >= 0; g--) {
2047 if ((bd = steal_todo_block(g)) != NULL) {
2048 scavenge_block(bd);
2049 did_something = true;
2050 break;
2051 }
2052 }
2053
2054 if (did_something) {
2055 did_anything = true;
2056 goto loop;
2057 }
2058 }
2059 #endif
2060
2061 // only return when there is no more work to do
2062
2063 return did_anything;
2064 }
2065
2066 /* ----------------------------------------------------------------------------
2067 Scavenge until we can't find anything more to scavenge.
2068 ------------------------------------------------------------------------- */
2069
2070 void
2071 scavenge_loop(void)
2072 {
2073 bool work_to_do;
2074
2075 loop:
2076 work_to_do = false;
2077
2078 // scavenge static objects
2079 if (major_gc && gct->static_objects != END_OF_STATIC_OBJECT_LIST) {
2080 IF_DEBUG(sanity, checkStaticObjects(gct->static_objects));
2081 scavenge_static();
2082 }
2083
2084 // scavenge objects in compacted generation
2085 if (mark_stack_bd != NULL && !mark_stack_empty()) {
2086 scavenge_mark_stack();
2087 work_to_do = true;
2088 }
2089
2090 // Order is important here: we want to deal in full blocks as
2091 // much as possible, so go for global work in preference to
2092 // local work. Only if all the global work has been exhausted
2093 // do we start scavenging the fragments of blocks in the local
2094 // workspaces.
2095 if (scavenge_find_work()) goto loop;
2096
2097 if (work_to_do) goto loop;
2098 }