b75c54948059e76367cf41945e983ef1b13f6db1
[ghc.git] / rts / GC.c
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team 1998-2003
4 *
5 * Generational garbage collector
6 *
7 * ---------------------------------------------------------------------------*/
8
9 #include "PosixSource.h"
10 #include "Rts.h"
11 #include "RtsFlags.h"
12 #include "RtsUtils.h"
13 #include "Apply.h"
14 #include "OSThreads.h"
15 #include "Storage.h"
16 #include "LdvProfile.h"
17 #include "Updates.h"
18 #include "Stats.h"
19 #include "Schedule.h"
20 #include "Sanity.h"
21 #include "BlockAlloc.h"
22 #include "MBlock.h"
23 #include "ProfHeap.h"
24 #include "SchedAPI.h"
25 #include "Weak.h"
26 #include "Prelude.h"
27 #include "ParTicky.h" // ToDo: move into Rts.h
28 #include "GCCompact.h"
29 #include "RtsSignals.h"
30 #include "STM.h"
31 #if defined(GRAN) || defined(PAR)
32 # include "GranSimRts.h"
33 # include "ParallelRts.h"
34 # include "FetchMe.h"
35 # if defined(DEBUG)
36 # include "Printer.h"
37 # include "ParallelDebug.h"
38 # endif
39 #endif
40 #include "HsFFI.h"
41 #include "Linker.h"
42 #if defined(RTS_GTK_FRONTPANEL)
43 #include "FrontPanel.h"
44 #endif
45
46 #include "RetainerProfile.h"
47
48 #include <string.h>
49
50 // Turn off inlining when debugging - it obfuscates things
51 #ifdef DEBUG
52 # undef STATIC_INLINE
53 # define STATIC_INLINE static
54 #endif
55
56 /* STATIC OBJECT LIST.
57 *
58 * During GC:
59 * We maintain a linked list of static objects that are still live.
60 * The requirements for this list are:
61 *
62 * - we need to scan the list while adding to it, in order to
63 * scavenge all the static objects (in the same way that
64 * breadth-first scavenging works for dynamic objects).
65 *
66 * - we need to be able to tell whether an object is already on
67 * the list, to break loops.
68 *
69 * Each static object has a "static link field", which we use for
70 * linking objects on to the list. We use a stack-type list, consing
71 * objects on the front as they are added (this means that the
72 * scavenge phase is depth-first, not breadth-first, but that
73 * shouldn't matter).
74 *
75 * A separate list is kept for objects that have been scavenged
76 * already - this is so that we can zero all the marks afterwards.
77 *
78 * An object is on the list if its static link field is non-zero; this
79 * means that we have to mark the end of the list with '1', not NULL.
80 *
81 * Extra notes for generational GC:
82 *
83 * Each generation has a static object list associated with it. When
84 * collecting generations up to N, we treat the static object lists
85 * from generations > N as roots.
86 *
87 * We build up a static object list while collecting generations 0..N,
88 * which is then appended to the static object list of generation N+1.
89 */
90 static StgClosure* static_objects; // live static objects
91 StgClosure* scavenged_static_objects; // static objects scavenged so far
92
93 /* N is the oldest generation being collected, where the generations
94 * are numbered starting at 0. A major GC (indicated by the major_gc
95 * flag) is when we're collecting all generations. We only attempt to
96 * deal with static objects and GC CAFs when doing a major GC.
97 */
98 static nat N;
99 static rtsBool major_gc;
100
101 /* Youngest generation that objects should be evacuated to in
102 * evacuate(). (Logically an argument to evacuate, but it's static
103 * a lot of the time so we optimise it into a global variable).
104 */
105 static nat evac_gen;
106
107 /* Whether to do eager promotion or not.
108 */
109 static rtsBool eager_promotion;
110
111 /* Weak pointers
112 */
113 StgWeak *old_weak_ptr_list; // also pending finaliser list
114
115 /* Which stage of processing various kinds of weak pointer are we at?
116 * (see traverse_weak_ptr_list() below for discussion).
117 */
118 typedef enum { WeakPtrs, WeakThreads, WeakDone } WeakStage;
119 static WeakStage weak_stage;
120
121 /* List of all threads during GC
122 */
123 static StgTSO *old_all_threads;
124 StgTSO *resurrected_threads;
125
126 /* Flag indicating failure to evacuate an object to the desired
127 * generation.
128 */
129 static rtsBool failed_to_evac;
130
131 /* Saved nursery (used for 2-space collector only)
132 */
133 static bdescr *saved_nursery;
134 static nat saved_n_blocks;
135
136 /* Data used for allocation area sizing.
137 */
138 static lnat new_blocks; // blocks allocated during this GC
139 static lnat new_scavd_blocks; // ditto, but depth-first blocks
140 static lnat g0s0_pcnt_kept = 30; // percentage of g0s0 live at last minor GC
141
142 /* Used to avoid long recursion due to selector thunks
143 */
144 static lnat thunk_selector_depth = 0;
145 #define MAX_THUNK_SELECTOR_DEPTH 8
146
147 /* Mut-list stats */
148 #ifdef DEBUG
149 static nat
150 mutlist_MUTVARS,
151 mutlist_MUTARRS,
152 mutlist_OTHERS;
153 #endif
154
155 /* -----------------------------------------------------------------------------
156 Static function declarations
157 -------------------------------------------------------------------------- */
158
159 static bdescr * gc_alloc_block ( step *stp );
160 static void mark_root ( StgClosure **root );
161
162 // Use a register argument for evacuate, if available.
163 #if __GNUC__ >= 2
164 #define REGPARM1 __attribute__((regparm(1)))
165 #else
166 #define REGPARM1
167 #endif
168
169 REGPARM1 static StgClosure * evacuate (StgClosure *q);
170
171 static void zero_static_object_list ( StgClosure* first_static );
172
173 static rtsBool traverse_weak_ptr_list ( void );
174 static void mark_weak_ptr_list ( StgWeak **list );
175 static rtsBool traverse_blackhole_queue ( void );
176
177 static StgClosure * eval_thunk_selector ( nat field, StgSelector * p );
178
179
180 static void scavenge ( step * );
181 static void scavenge_mark_stack ( void );
182 static void scavenge_stack ( StgPtr p, StgPtr stack_end );
183 static rtsBool scavenge_one ( StgPtr p );
184 static void scavenge_large ( step * );
185 static void scavenge_static ( void );
186 static void scavenge_mutable_list ( generation *g );
187
188 static void scavenge_large_bitmap ( StgPtr p,
189 StgLargeBitmap *large_bitmap,
190 nat size );
191
192 #if 0 && defined(DEBUG)
193 static void gcCAFs ( void );
194 #endif
195
196 /* -----------------------------------------------------------------------------
197 inline functions etc. for dealing with the mark bitmap & stack.
198 -------------------------------------------------------------------------- */
199
200 #define MARK_STACK_BLOCKS 4
201
202 static bdescr *mark_stack_bdescr;
203 static StgPtr *mark_stack;
204 static StgPtr *mark_sp;
205 static StgPtr *mark_splim;
206
207 // Flag and pointers used for falling back to a linear scan when the
208 // mark stack overflows.
209 static rtsBool mark_stack_overflowed;
210 static bdescr *oldgen_scan_bd;
211 static StgPtr oldgen_scan;
212
213 STATIC_INLINE rtsBool
214 mark_stack_empty(void)
215 {
216 return mark_sp == mark_stack;
217 }
218
219 STATIC_INLINE rtsBool
220 mark_stack_full(void)
221 {
222 return mark_sp >= mark_splim;
223 }
224
225 STATIC_INLINE void
226 reset_mark_stack(void)
227 {
228 mark_sp = mark_stack;
229 }
230
231 STATIC_INLINE void
232 push_mark_stack(StgPtr p)
233 {
234 *mark_sp++ = p;
235 }
236
237 STATIC_INLINE StgPtr
238 pop_mark_stack(void)
239 {
240 return *--mark_sp;
241 }
242
243 /* -----------------------------------------------------------------------------
244 Allocate a new to-space block in the given step.
245 -------------------------------------------------------------------------- */
246
247 static bdescr *
248 gc_alloc_block(step *stp)
249 {
250 bdescr *bd = allocBlock();
251 bd->gen_no = stp->gen_no;
252 bd->step = stp;
253 bd->link = NULL;
254
255 // blocks in to-space in generations up to and including N
256 // get the BF_EVACUATED flag.
257 if (stp->gen_no <= N) {
258 bd->flags = BF_EVACUATED;
259 } else {
260 bd->flags = 0;
261 }
262
263 // Start a new to-space block, chain it on after the previous one.
264 if (stp->hp_bd != NULL) {
265 stp->hp_bd->free = stp->hp;
266 stp->hp_bd->link = bd;
267 }
268
269 stp->hp_bd = bd;
270 stp->hp = bd->start;
271 stp->hpLim = stp->hp + BLOCK_SIZE_W;
272
273 stp->n_blocks++;
274 new_blocks++;
275
276 return bd;
277 }
278
279 static bdescr *
280 gc_alloc_scavd_block(step *stp)
281 {
282 bdescr *bd = allocBlock();
283 bd->gen_no = stp->gen_no;
284 bd->step = stp;
285
286 // blocks in to-space in generations up to and including N
287 // get the BF_EVACUATED flag.
288 if (stp->gen_no <= N) {
289 bd->flags = BF_EVACUATED;
290 } else {
291 bd->flags = 0;
292 }
293
294 bd->link = stp->blocks;
295 stp->blocks = bd;
296
297 if (stp->scavd_hp != NULL) {
298 Bdescr(stp->scavd_hp)->free = stp->scavd_hp;
299 }
300 stp->scavd_hp = bd->start;
301 stp->scavd_hpLim = stp->scavd_hp + BLOCK_SIZE_W;
302
303 stp->n_blocks++;
304 new_scavd_blocks++;
305
306 return bd;
307 }
308
309 /* -----------------------------------------------------------------------------
310 GarbageCollect
311
312 Rough outline of the algorithm: for garbage collecting generation N
313 (and all younger generations):
314
315 - follow all pointers in the root set. the root set includes all
316 mutable objects in all generations (mutable_list).
317
318 - for each pointer, evacuate the object it points to into either
319
320 + to-space of the step given by step->to, which is the next
321 highest step in this generation or the first step in the next
322 generation if this is the last step.
323
324 + to-space of generations[evac_gen]->steps[0], if evac_gen != 0.
325 When we evacuate an object we attempt to evacuate
326 everything it points to into the same generation - this is
327 achieved by setting evac_gen to the desired generation. If
328 we can't do this, then an entry in the mut list has to
329 be made for the cross-generation pointer.
330
331 + if the object is already in a generation > N, then leave
332 it alone.
333
334 - repeatedly scavenge to-space from each step in each generation
335 being collected until no more objects can be evacuated.
336
337 - free from-space in each step, and set from-space = to-space.
338
339 Locks held: all capabilities are held throughout GarbageCollect().
340
341 -------------------------------------------------------------------------- */
342
343 void
344 GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc )
345 {
346 bdescr *bd;
347 step *stp;
348 lnat live, allocated, copied = 0, scavd_copied = 0;
349 lnat oldgen_saved_blocks = 0;
350 nat g, s, i;
351
352 ACQUIRE_SM_LOCK;
353
354 #ifdef PROFILING
355 CostCentreStack *prev_CCS;
356 #endif
357
358 #if defined(DEBUG) && defined(GRAN)
359 IF_DEBUG(gc, debugBelch("@@ Starting garbage collection at %ld (%lx)\n",
360 Now, Now));
361 #endif
362
363 #if defined(RTS_USER_SIGNALS)
364 // block signals
365 blockUserSignals();
366 #endif
367
368 // tell the STM to discard any cached closures its hoping to re-use
369 stmPreGCHook();
370
371 // tell the stats department that we've started a GC
372 stat_startGC();
373
374 #ifdef DEBUG
375 // check for memory leaks if DEBUG is on
376 memInventory();
377 #endif
378
379 #ifdef DEBUG
380 mutlist_MUTVARS = 0;
381 mutlist_MUTARRS = 0;
382 mutlist_OTHERS = 0;
383 #endif
384
385 // Init stats and print par specific (timing) info
386 PAR_TICKY_PAR_START();
387
388 // attribute any costs to CCS_GC
389 #ifdef PROFILING
390 prev_CCS = CCCS;
391 CCCS = CCS_GC;
392 #endif
393
394 /* Approximate how much we allocated.
395 * Todo: only when generating stats?
396 */
397 allocated = calcAllocated();
398
399 /* Figure out which generation to collect
400 */
401 if (force_major_gc) {
402 N = RtsFlags.GcFlags.generations - 1;
403 major_gc = rtsTrue;
404 } else {
405 N = 0;
406 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
407 if (generations[g].steps[0].n_blocks +
408 generations[g].steps[0].n_large_blocks
409 >= generations[g].max_blocks) {
410 N = g;
411 }
412 }
413 major_gc = (N == RtsFlags.GcFlags.generations-1);
414 }
415
416 #ifdef RTS_GTK_FRONTPANEL
417 if (RtsFlags.GcFlags.frontpanel) {
418 updateFrontPanelBeforeGC(N);
419 }
420 #endif
421
422 // check stack sanity *before* GC (ToDo: check all threads)
423 #if defined(GRAN)
424 // ToDo!: check sanity IF_DEBUG(sanity, checkTSOsSanity());
425 #endif
426 IF_DEBUG(sanity, checkFreeListSanity());
427
428 /* Initialise the static object lists
429 */
430 static_objects = END_OF_STATIC_LIST;
431 scavenged_static_objects = END_OF_STATIC_LIST;
432
433 /* Save the nursery if we're doing a two-space collection.
434 * g0s0->blocks will be used for to-space, so we need to get the
435 * nursery out of the way.
436 */
437 if (RtsFlags.GcFlags.generations == 1) {
438 saved_nursery = g0s0->blocks;
439 saved_n_blocks = g0s0->n_blocks;
440 g0s0->blocks = NULL;
441 g0s0->n_blocks = 0;
442 }
443
444 /* Keep a count of how many new blocks we allocated during this GC
445 * (used for resizing the allocation area, later).
446 */
447 new_blocks = 0;
448 new_scavd_blocks = 0;
449
450 // Initialise to-space in all the generations/steps that we're
451 // collecting.
452 //
453 for (g = 0; g <= N; g++) {
454
455 // throw away the mutable list. Invariant: the mutable list
456 // always has at least one block; this means we can avoid a check for
457 // NULL in recordMutable().
458 if (g != 0) {
459 freeChain(generations[g].mut_list);
460 generations[g].mut_list = allocBlock();
461 for (i = 0; i < n_capabilities; i++) {
462 freeChain(capabilities[i].mut_lists[g]);
463 capabilities[i].mut_lists[g] = allocBlock();
464 }
465 }
466
467 for (s = 0; s < generations[g].n_steps; s++) {
468
469 // generation 0, step 0 doesn't need to-space
470 if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
471 continue;
472 }
473
474 stp = &generations[g].steps[s];
475 ASSERT(stp->gen_no == g);
476
477 // start a new to-space for this step.
478 stp->old_blocks = stp->blocks;
479 stp->n_old_blocks = stp->n_blocks;
480
481 // allocate the first to-space block; extra blocks will be
482 // chained on as necessary.
483 stp->hp_bd = NULL;
484 bd = gc_alloc_block(stp);
485 stp->blocks = bd;
486 stp->n_blocks = 1;
487 stp->scan = bd->start;
488 stp->scan_bd = bd;
489
490 // allocate a block for "already scavenged" objects. This goes
491 // on the front of the stp->blocks list, so it won't be
492 // traversed by the scavenging sweep.
493 gc_alloc_scavd_block(stp);
494
495 // initialise the large object queues.
496 stp->new_large_objects = NULL;
497 stp->scavenged_large_objects = NULL;
498 stp->n_scavenged_large_blocks = 0;
499
500 // mark the large objects as not evacuated yet
501 for (bd = stp->large_objects; bd; bd = bd->link) {
502 bd->flags &= ~BF_EVACUATED;
503 }
504
505 // for a compacted step, we need to allocate the bitmap
506 if (stp->is_compacted) {
507 nat bitmap_size; // in bytes
508 bdescr *bitmap_bdescr;
509 StgWord *bitmap;
510
511 bitmap_size = stp->n_old_blocks * BLOCK_SIZE / (sizeof(W_)*BITS_PER_BYTE);
512
513 if (bitmap_size > 0) {
514 bitmap_bdescr = allocGroup((lnat)BLOCK_ROUND_UP(bitmap_size)
515 / BLOCK_SIZE);
516 stp->bitmap = bitmap_bdescr;
517 bitmap = bitmap_bdescr->start;
518
519 IF_DEBUG(gc, debugBelch("bitmap_size: %d, bitmap: %p",
520 bitmap_size, bitmap););
521
522 // don't forget to fill it with zeros!
523 memset(bitmap, 0, bitmap_size);
524
525 // For each block in this step, point to its bitmap from the
526 // block descriptor.
527 for (bd=stp->old_blocks; bd != NULL; bd = bd->link) {
528 bd->u.bitmap = bitmap;
529 bitmap += BLOCK_SIZE_W / (sizeof(W_)*BITS_PER_BYTE);
530
531 // Also at this point we set the BF_COMPACTED flag
532 // for this block. The invariant is that
533 // BF_COMPACTED is always unset, except during GC
534 // when it is set on those blocks which will be
535 // compacted.
536 bd->flags |= BF_COMPACTED;
537 }
538 }
539 }
540 }
541 }
542
543 /* make sure the older generations have at least one block to
544 * allocate into (this makes things easier for copy(), see below).
545 */
546 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
547 for (s = 0; s < generations[g].n_steps; s++) {
548 stp = &generations[g].steps[s];
549 if (stp->hp_bd == NULL) {
550 ASSERT(stp->blocks == NULL);
551 bd = gc_alloc_block(stp);
552 stp->blocks = bd;
553 stp->n_blocks = 1;
554 }
555 if (stp->scavd_hp == NULL) {
556 gc_alloc_scavd_block(stp);
557 stp->n_blocks++;
558 }
559 /* Set the scan pointer for older generations: remember we
560 * still have to scavenge objects that have been promoted. */
561 stp->scan = stp->hp;
562 stp->scan_bd = stp->hp_bd;
563 stp->new_large_objects = NULL;
564 stp->scavenged_large_objects = NULL;
565 stp->n_scavenged_large_blocks = 0;
566 }
567
568 /* Move the private mutable lists from each capability onto the
569 * main mutable list for the generation.
570 */
571 for (i = 0; i < n_capabilities; i++) {
572 for (bd = capabilities[i].mut_lists[g];
573 bd->link != NULL; bd = bd->link) {
574 /* nothing */
575 }
576 bd->link = generations[g].mut_list;
577 generations[g].mut_list = capabilities[i].mut_lists[g];
578 capabilities[i].mut_lists[g] = allocBlock();
579 }
580 }
581
582 /* Allocate a mark stack if we're doing a major collection.
583 */
584 if (major_gc) {
585 mark_stack_bdescr = allocGroup(MARK_STACK_BLOCKS);
586 mark_stack = (StgPtr *)mark_stack_bdescr->start;
587 mark_sp = mark_stack;
588 mark_splim = mark_stack + (MARK_STACK_BLOCKS * BLOCK_SIZE_W);
589 } else {
590 mark_stack_bdescr = NULL;
591 }
592
593 eager_promotion = rtsTrue; // for now
594
595 /* -----------------------------------------------------------------------
596 * follow all the roots that we know about:
597 * - mutable lists from each generation > N
598 * we want to *scavenge* these roots, not evacuate them: they're not
599 * going to move in this GC.
600 * Also: do them in reverse generation order. This is because we
601 * often want to promote objects that are pointed to by older
602 * generations early, so we don't have to repeatedly copy them.
603 * Doing the generations in reverse order ensures that we don't end
604 * up in the situation where we want to evac an object to gen 3 and
605 * it has already been evaced to gen 2.
606 */
607 {
608 int st;
609 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
610 generations[g].saved_mut_list = generations[g].mut_list;
611 generations[g].mut_list = allocBlock();
612 // mut_list always has at least one block.
613 }
614
615 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
616 IF_PAR_DEBUG(verbose, printMutableList(&generations[g]));
617 scavenge_mutable_list(&generations[g]);
618 evac_gen = g;
619 for (st = generations[g].n_steps-1; st >= 0; st--) {
620 scavenge(&generations[g].steps[st]);
621 }
622 }
623 }
624
625 /* follow roots from the CAF list (used by GHCi)
626 */
627 evac_gen = 0;
628 markCAFs(mark_root);
629
630 /* follow all the roots that the application knows about.
631 */
632 evac_gen = 0;
633 get_roots(mark_root);
634
635 #if defined(PAR)
636 /* And don't forget to mark the TSO if we got here direct from
637 * Haskell! */
638 /* Not needed in a seq version?
639 if (CurrentTSO) {
640 CurrentTSO = (StgTSO *)MarkRoot((StgClosure *)CurrentTSO);
641 }
642 */
643
644 // Mark the entries in the GALA table of the parallel system
645 markLocalGAs(major_gc);
646 // Mark all entries on the list of pending fetches
647 markPendingFetches(major_gc);
648 #endif
649
650 /* Mark the weak pointer list, and prepare to detect dead weak
651 * pointers.
652 */
653 mark_weak_ptr_list(&weak_ptr_list);
654 old_weak_ptr_list = weak_ptr_list;
655 weak_ptr_list = NULL;
656 weak_stage = WeakPtrs;
657
658 /* The all_threads list is like the weak_ptr_list.
659 * See traverse_weak_ptr_list() for the details.
660 */
661 old_all_threads = all_threads;
662 all_threads = END_TSO_QUEUE;
663 resurrected_threads = END_TSO_QUEUE;
664
665 /* Mark the stable pointer table.
666 */
667 markStablePtrTable(mark_root);
668
669 /* Mark the root pointer table.
670 */
671 markRootPtrTable(mark_root);
672
673 /* -------------------------------------------------------------------------
674 * Repeatedly scavenge all the areas we know about until there's no
675 * more scavenging to be done.
676 */
677 {
678 rtsBool flag;
679 loop:
680 flag = rtsFalse;
681
682 // scavenge static objects
683 if (major_gc && static_objects != END_OF_STATIC_LIST) {
684 IF_DEBUG(sanity, checkStaticObjects(static_objects));
685 scavenge_static();
686 }
687
688 /* When scavenging the older generations: Objects may have been
689 * evacuated from generations <= N into older generations, and we
690 * need to scavenge these objects. We're going to try to ensure that
691 * any evacuations that occur move the objects into at least the
692 * same generation as the object being scavenged, otherwise we
693 * have to create new entries on the mutable list for the older
694 * generation.
695 */
696
697 // scavenge each step in generations 0..maxgen
698 {
699 long gen;
700 int st;
701
702 loop2:
703 // scavenge objects in compacted generation
704 if (mark_stack_overflowed || oldgen_scan_bd != NULL ||
705 (mark_stack_bdescr != NULL && !mark_stack_empty())) {
706 scavenge_mark_stack();
707 flag = rtsTrue;
708 }
709
710 for (gen = RtsFlags.GcFlags.generations; --gen >= 0; ) {
711 for (st = generations[gen].n_steps; --st >= 0; ) {
712 if (gen == 0 && st == 0 && RtsFlags.GcFlags.generations > 1) {
713 continue;
714 }
715 stp = &generations[gen].steps[st];
716 evac_gen = gen;
717 if (stp->hp_bd != stp->scan_bd || stp->scan < stp->hp) {
718 scavenge(stp);
719 flag = rtsTrue;
720 goto loop2;
721 }
722 if (stp->new_large_objects != NULL) {
723 scavenge_large(stp);
724 flag = rtsTrue;
725 goto loop2;
726 }
727 }
728 }
729 }
730
731 // if any blackholes are alive, make the threads that wait on
732 // them alive too.
733 if (traverse_blackhole_queue())
734 flag = rtsTrue;
735
736 if (flag) { goto loop; }
737
738 // must be last... invariant is that everything is fully
739 // scavenged at this point.
740 if (traverse_weak_ptr_list()) { // returns rtsTrue if evaced something
741 goto loop;
742 }
743 }
744
745 /* Update the pointers from the task list - these are
746 * treated as weak pointers because we want to allow a main thread
747 * to get a BlockedOnDeadMVar exception in the same way as any other
748 * thread. Note that the threads should all have been retained by
749 * GC by virtue of being on the all_threads list, we're just
750 * updating pointers here.
751 */
752 {
753 Task *task;
754 StgTSO *tso;
755 for (task = all_tasks; task != NULL; task = task->all_link) {
756 if (!task->stopped && task->tso) {
757 ASSERT(task->tso->bound == task);
758 tso = (StgTSO *) isAlive((StgClosure *)task->tso);
759 if (tso == NULL) {
760 barf("task %p: main thread %d has been GC'd",
761 #ifdef THREADED_RTS
762 (void *)task->id,
763 #else
764 (void *)task,
765 #endif
766 task->tso->id);
767 }
768 task->tso = tso;
769 }
770 }
771 }
772
773 #if defined(PAR)
774 // Reconstruct the Global Address tables used in GUM
775 rebuildGAtables(major_gc);
776 IF_DEBUG(sanity, checkLAGAtable(rtsTrue/*check closures, too*/));
777 #endif
778
779 // Now see which stable names are still alive.
780 gcStablePtrTable();
781
782 // Tidy the end of the to-space chains
783 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
784 for (s = 0; s < generations[g].n_steps; s++) {
785 stp = &generations[g].steps[s];
786 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
787 ASSERT(Bdescr(stp->hp) == stp->hp_bd);
788 stp->hp_bd->free = stp->hp;
789 Bdescr(stp->scavd_hp)->free = stp->scavd_hp;
790 }
791 }
792 }
793
794 #ifdef PROFILING
795 // We call processHeapClosureForDead() on every closure destroyed during
796 // the current garbage collection, so we invoke LdvCensusForDead().
797 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV
798 || RtsFlags.ProfFlags.bioSelector != NULL)
799 LdvCensusForDead(N);
800 #endif
801
802 // NO MORE EVACUATION AFTER THIS POINT!
803 // Finally: compaction of the oldest generation.
804 if (major_gc && oldest_gen->steps[0].is_compacted) {
805 // save number of blocks for stats
806 oldgen_saved_blocks = oldest_gen->steps[0].n_old_blocks;
807 compact(get_roots);
808 }
809
810 IF_DEBUG(sanity, checkGlobalTSOList(rtsFalse));
811
812 /* run through all the generations/steps and tidy up
813 */
814 copied = new_blocks * BLOCK_SIZE_W;
815 scavd_copied = new_scavd_blocks * BLOCK_SIZE_W;
816 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
817
818 if (g <= N) {
819 generations[g].collections++; // for stats
820 }
821
822 // Count the mutable list as bytes "copied" for the purposes of
823 // stats. Every mutable list is copied during every GC.
824 if (g > 0) {
825 nat mut_list_size = 0;
826 for (bd = generations[g].mut_list; bd != NULL; bd = bd->link) {
827 mut_list_size += bd->free - bd->start;
828 }
829 copied += mut_list_size;
830
831 IF_DEBUG(gc, debugBelch("mut_list_size: %ld (%d vars, %d arrays, %d others)\n", mut_list_size * sizeof(W_), mutlist_MUTVARS, mutlist_MUTARRS, mutlist_OTHERS));
832 }
833
834 for (s = 0; s < generations[g].n_steps; s++) {
835 bdescr *next;
836 stp = &generations[g].steps[s];
837
838 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
839 // stats information: how much we copied
840 if (g <= N) {
841 copied -= stp->hp_bd->start + BLOCK_SIZE_W -
842 stp->hp_bd->free;
843 scavd_copied -= (P_)(BLOCK_ROUND_UP(stp->scavd_hp)) - stp->scavd_hp;
844 }
845 }
846
847 // for generations we collected...
848 if (g <= N) {
849
850 /* free old memory and shift to-space into from-space for all
851 * the collected steps (except the allocation area). These
852 * freed blocks will probaby be quickly recycled.
853 */
854 if (!(g == 0 && s == 0)) {
855 if (stp->is_compacted) {
856 // for a compacted step, just shift the new to-space
857 // onto the front of the now-compacted existing blocks.
858 for (bd = stp->blocks; bd != NULL; bd = bd->link) {
859 bd->flags &= ~BF_EVACUATED; // now from-space
860 }
861 // tack the new blocks on the end of the existing blocks
862 if (stp->old_blocks != NULL) {
863 for (bd = stp->old_blocks; bd != NULL; bd = next) {
864 // NB. this step might not be compacted next
865 // time, so reset the BF_COMPACTED flags.
866 // They are set before GC if we're going to
867 // compact. (search for BF_COMPACTED above).
868 bd->flags &= ~BF_COMPACTED;
869 next = bd->link;
870 if (next == NULL) {
871 bd->link = stp->blocks;
872 }
873 }
874 stp->blocks = stp->old_blocks;
875 }
876 // add the new blocks to the block tally
877 stp->n_blocks += stp->n_old_blocks;
878 ASSERT(countBlocks(stp->blocks) == stp->n_blocks);
879 } else {
880 freeChain(stp->old_blocks);
881 for (bd = stp->blocks; bd != NULL; bd = bd->link) {
882 bd->flags &= ~BF_EVACUATED; // now from-space
883 }
884 }
885 stp->old_blocks = NULL;
886 stp->n_old_blocks = 0;
887 }
888
889 /* LARGE OBJECTS. The current live large objects are chained on
890 * scavenged_large, having been moved during garbage
891 * collection from large_objects. Any objects left on
892 * large_objects list are therefore dead, so we free them here.
893 */
894 for (bd = stp->large_objects; bd != NULL; bd = next) {
895 next = bd->link;
896 freeGroup(bd);
897 bd = next;
898 }
899
900 // update the count of blocks used by large objects
901 for (bd = stp->scavenged_large_objects; bd != NULL; bd = bd->link) {
902 bd->flags &= ~BF_EVACUATED;
903 }
904 stp->large_objects = stp->scavenged_large_objects;
905 stp->n_large_blocks = stp->n_scavenged_large_blocks;
906
907 } else {
908 // for older generations...
909
910 /* For older generations, we need to append the
911 * scavenged_large_object list (i.e. large objects that have been
912 * promoted during this GC) to the large_object list for that step.
913 */
914 for (bd = stp->scavenged_large_objects; bd; bd = next) {
915 next = bd->link;
916 bd->flags &= ~BF_EVACUATED;
917 dbl_link_onto(bd, &stp->large_objects);
918 }
919
920 // add the new blocks we promoted during this GC
921 stp->n_large_blocks += stp->n_scavenged_large_blocks;
922 }
923 }
924 }
925
926 /* Reset the sizes of the older generations when we do a major
927 * collection.
928 *
929 * CURRENT STRATEGY: make all generations except zero the same size.
930 * We have to stay within the maximum heap size, and leave a certain
931 * percentage of the maximum heap size available to allocate into.
932 */
933 if (major_gc && RtsFlags.GcFlags.generations > 1) {
934 nat live, size, min_alloc;
935 nat max = RtsFlags.GcFlags.maxHeapSize;
936 nat gens = RtsFlags.GcFlags.generations;
937
938 // live in the oldest generations
939 live = oldest_gen->steps[0].n_blocks +
940 oldest_gen->steps[0].n_large_blocks;
941
942 // default max size for all generations except zero
943 size = stg_max(live * RtsFlags.GcFlags.oldGenFactor,
944 RtsFlags.GcFlags.minOldGenSize);
945
946 // minimum size for generation zero
947 min_alloc = stg_max((RtsFlags.GcFlags.pcFreeHeap * max) / 200,
948 RtsFlags.GcFlags.minAllocAreaSize);
949
950 // Auto-enable compaction when the residency reaches a
951 // certain percentage of the maximum heap size (default: 30%).
952 if (RtsFlags.GcFlags.generations > 1 &&
953 (RtsFlags.GcFlags.compact ||
954 (max > 0 &&
955 oldest_gen->steps[0].n_blocks >
956 (RtsFlags.GcFlags.compactThreshold * max) / 100))) {
957 oldest_gen->steps[0].is_compacted = 1;
958 // debugBelch("compaction: on\n", live);
959 } else {
960 oldest_gen->steps[0].is_compacted = 0;
961 // debugBelch("compaction: off\n", live);
962 }
963
964 // if we're going to go over the maximum heap size, reduce the
965 // size of the generations accordingly. The calculation is
966 // different if compaction is turned on, because we don't need
967 // to double the space required to collect the old generation.
968 if (max != 0) {
969
970 // this test is necessary to ensure that the calculations
971 // below don't have any negative results - we're working
972 // with unsigned values here.
973 if (max < min_alloc) {
974 heapOverflow();
975 }
976
977 if (oldest_gen->steps[0].is_compacted) {
978 if ( (size + (size - 1) * (gens - 2) * 2) + min_alloc > max ) {
979 size = (max - min_alloc) / ((gens - 1) * 2 - 1);
980 }
981 } else {
982 if ( (size * (gens - 1) * 2) + min_alloc > max ) {
983 size = (max - min_alloc) / ((gens - 1) * 2);
984 }
985 }
986
987 if (size < live) {
988 heapOverflow();
989 }
990 }
991
992 #if 0
993 debugBelch("live: %d, min_alloc: %d, size : %d, max = %d\n", live,
994 min_alloc, size, max);
995 #endif
996
997 for (g = 0; g < gens; g++) {
998 generations[g].max_blocks = size;
999 }
1000 }
1001
1002 // Guess the amount of live data for stats.
1003 live = calcLive();
1004
1005 /* Free the small objects allocated via allocate(), since this will
1006 * all have been copied into G0S1 now.
1007 */
1008 if (small_alloc_list != NULL) {
1009 freeChain(small_alloc_list);
1010 }
1011 small_alloc_list = NULL;
1012 alloc_blocks = 0;
1013 alloc_Hp = NULL;
1014 alloc_HpLim = NULL;
1015 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
1016
1017 // Start a new pinned_object_block
1018 pinned_object_block = NULL;
1019
1020 /* Free the mark stack.
1021 */
1022 if (mark_stack_bdescr != NULL) {
1023 freeGroup(mark_stack_bdescr);
1024 }
1025
1026 /* Free any bitmaps.
1027 */
1028 for (g = 0; g <= N; g++) {
1029 for (s = 0; s < generations[g].n_steps; s++) {
1030 stp = &generations[g].steps[s];
1031 if (stp->bitmap != NULL) {
1032 freeGroup(stp->bitmap);
1033 stp->bitmap = NULL;
1034 }
1035 }
1036 }
1037
1038 /* Two-space collector:
1039 * Free the old to-space, and estimate the amount of live data.
1040 */
1041 if (RtsFlags.GcFlags.generations == 1) {
1042 nat blocks;
1043
1044 if (g0s0->old_blocks != NULL) {
1045 freeChain(g0s0->old_blocks);
1046 }
1047 for (bd = g0s0->blocks; bd != NULL; bd = bd->link) {
1048 bd->flags = 0; // now from-space
1049 }
1050 g0s0->old_blocks = g0s0->blocks;
1051 g0s0->n_old_blocks = g0s0->n_blocks;
1052 g0s0->blocks = saved_nursery;
1053 g0s0->n_blocks = saved_n_blocks;
1054
1055 /* For a two-space collector, we need to resize the nursery. */
1056
1057 /* set up a new nursery. Allocate a nursery size based on a
1058 * function of the amount of live data (by default a factor of 2)
1059 * Use the blocks from the old nursery if possible, freeing up any
1060 * left over blocks.
1061 *
1062 * If we get near the maximum heap size, then adjust our nursery
1063 * size accordingly. If the nursery is the same size as the live
1064 * data (L), then we need 3L bytes. We can reduce the size of the
1065 * nursery to bring the required memory down near 2L bytes.
1066 *
1067 * A normal 2-space collector would need 4L bytes to give the same
1068 * performance we get from 3L bytes, reducing to the same
1069 * performance at 2L bytes.
1070 */
1071 blocks = g0s0->n_old_blocks;
1072
1073 if ( RtsFlags.GcFlags.maxHeapSize != 0 &&
1074 blocks * RtsFlags.GcFlags.oldGenFactor * 2 >
1075 RtsFlags.GcFlags.maxHeapSize ) {
1076 long adjusted_blocks; // signed on purpose
1077 int pc_free;
1078
1079 adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks);
1080 IF_DEBUG(gc, debugBelch("@@ Near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %ld", RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks));
1081 pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize;
1082 if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even be < 0 */ {
1083 heapOverflow();
1084 }
1085 blocks = adjusted_blocks;
1086
1087 } else {
1088 blocks *= RtsFlags.GcFlags.oldGenFactor;
1089 if (blocks < RtsFlags.GcFlags.minAllocAreaSize) {
1090 blocks = RtsFlags.GcFlags.minAllocAreaSize;
1091 }
1092 }
1093 resizeNurseries(blocks);
1094
1095 } else {
1096 /* Generational collector:
1097 * If the user has given us a suggested heap size, adjust our
1098 * allocation area to make best use of the memory available.
1099 */
1100
1101 if (RtsFlags.GcFlags.heapSizeSuggestion) {
1102 long blocks;
1103 nat needed = calcNeeded(); // approx blocks needed at next GC
1104
1105 /* Guess how much will be live in generation 0 step 0 next time.
1106 * A good approximation is obtained by finding the
1107 * percentage of g0s0 that was live at the last minor GC.
1108 */
1109 if (N == 0) {
1110 g0s0_pcnt_kept = (new_blocks * 100) / countNurseryBlocks();
1111 }
1112
1113 /* Estimate a size for the allocation area based on the
1114 * information available. We might end up going slightly under
1115 * or over the suggested heap size, but we should be pretty
1116 * close on average.
1117 *
1118 * Formula: suggested - needed
1119 * ----------------------------
1120 * 1 + g0s0_pcnt_kept/100
1121 *
1122 * where 'needed' is the amount of memory needed at the next
1123 * collection for collecting all steps except g0s0.
1124 */
1125 blocks =
1126 (((long)RtsFlags.GcFlags.heapSizeSuggestion - (long)needed) * 100) /
1127 (100 + (long)g0s0_pcnt_kept);
1128
1129 if (blocks < (long)RtsFlags.GcFlags.minAllocAreaSize) {
1130 blocks = RtsFlags.GcFlags.minAllocAreaSize;
1131 }
1132
1133 resizeNurseries((nat)blocks);
1134
1135 } else {
1136 // we might have added extra large blocks to the nursery, so
1137 // resize back to minAllocAreaSize again.
1138 resizeNurseriesFixed(RtsFlags.GcFlags.minAllocAreaSize);
1139 }
1140 }
1141
1142 // mark the garbage collected CAFs as dead
1143 #if 0 && defined(DEBUG) // doesn't work at the moment
1144 if (major_gc) { gcCAFs(); }
1145 #endif
1146
1147 #ifdef PROFILING
1148 // resetStaticObjectForRetainerProfiling() must be called before
1149 // zeroing below.
1150 resetStaticObjectForRetainerProfiling();
1151 #endif
1152
1153 // zero the scavenged static object list
1154 if (major_gc) {
1155 zero_static_object_list(scavenged_static_objects);
1156 }
1157
1158 // Reset the nursery
1159 resetNurseries();
1160
1161 // start any pending finalizers
1162 RELEASE_SM_LOCK;
1163 scheduleFinalizers(last_free_capability, old_weak_ptr_list);
1164 ACQUIRE_SM_LOCK;
1165
1166 // send exceptions to any threads which were about to die
1167 RELEASE_SM_LOCK;
1168 resurrectThreads(resurrected_threads);
1169 ACQUIRE_SM_LOCK;
1170
1171 // Update the stable pointer hash table.
1172 updateStablePtrTable(major_gc);
1173
1174 // check sanity after GC
1175 IF_DEBUG(sanity, checkSanity());
1176
1177 // extra GC trace info
1178 IF_DEBUG(gc, statDescribeGens());
1179
1180 #ifdef DEBUG
1181 // symbol-table based profiling
1182 /* heapCensus(to_blocks); */ /* ToDo */
1183 #endif
1184
1185 // restore enclosing cost centre
1186 #ifdef PROFILING
1187 CCCS = prev_CCS;
1188 #endif
1189
1190 #ifdef DEBUG
1191 // check for memory leaks if DEBUG is on
1192 memInventory();
1193 #endif
1194
1195 #ifdef RTS_GTK_FRONTPANEL
1196 if (RtsFlags.GcFlags.frontpanel) {
1197 updateFrontPanelAfterGC( N, live );
1198 }
1199 #endif
1200
1201 // ok, GC over: tell the stats department what happened.
1202 stat_endGC(allocated, live, copied, scavd_copied, N);
1203
1204 #if defined(RTS_USER_SIGNALS)
1205 // unblock signals again
1206 unblockUserSignals();
1207 #endif
1208
1209 RELEASE_SM_LOCK;
1210
1211 //PAR_TICKY_TP();
1212 }
1213
1214
1215 /* -----------------------------------------------------------------------------
1216 Weak Pointers
1217
1218 traverse_weak_ptr_list is called possibly many times during garbage
1219 collection. It returns a flag indicating whether it did any work
1220 (i.e. called evacuate on any live pointers).
1221
1222 Invariant: traverse_weak_ptr_list is called when the heap is in an
1223 idempotent state. That means that there are no pending
1224 evacuate/scavenge operations. This invariant helps the weak
1225 pointer code decide which weak pointers are dead - if there are no
1226 new live weak pointers, then all the currently unreachable ones are
1227 dead.
1228
1229 For generational GC: we just don't try to finalize weak pointers in
1230 older generations than the one we're collecting. This could
1231 probably be optimised by keeping per-generation lists of weak
1232 pointers, but for a few weak pointers this scheme will work.
1233
1234 There are three distinct stages to processing weak pointers:
1235
1236 - weak_stage == WeakPtrs
1237
1238 We process all the weak pointers whos keys are alive (evacuate
1239 their values and finalizers), and repeat until we can find no new
1240 live keys. If no live keys are found in this pass, then we
1241 evacuate the finalizers of all the dead weak pointers in order to
1242 run them.
1243
1244 - weak_stage == WeakThreads
1245
1246 Now, we discover which *threads* are still alive. Pointers to
1247 threads from the all_threads and main thread lists are the
1248 weakest of all: a pointers from the finalizer of a dead weak
1249 pointer can keep a thread alive. Any threads found to be unreachable
1250 are evacuated and placed on the resurrected_threads list so we
1251 can send them a signal later.
1252
1253 - weak_stage == WeakDone
1254
1255 No more evacuation is done.
1256
1257 -------------------------------------------------------------------------- */
1258
1259 static rtsBool
1260 traverse_weak_ptr_list(void)
1261 {
1262 StgWeak *w, **last_w, *next_w;
1263 StgClosure *new;
1264 rtsBool flag = rtsFalse;
1265
1266 switch (weak_stage) {
1267
1268 case WeakDone:
1269 return rtsFalse;
1270
1271 case WeakPtrs:
1272 /* doesn't matter where we evacuate values/finalizers to, since
1273 * these pointers are treated as roots (iff the keys are alive).
1274 */
1275 evac_gen = 0;
1276
1277 last_w = &old_weak_ptr_list;
1278 for (w = old_weak_ptr_list; w != NULL; w = next_w) {
1279
1280 /* There might be a DEAD_WEAK on the list if finalizeWeak# was
1281 * called on a live weak pointer object. Just remove it.
1282 */
1283 if (w->header.info == &stg_DEAD_WEAK_info) {
1284 next_w = ((StgDeadWeak *)w)->link;
1285 *last_w = next_w;
1286 continue;
1287 }
1288
1289 switch (get_itbl(w)->type) {
1290
1291 case EVACUATED:
1292 next_w = (StgWeak *)((StgEvacuated *)w)->evacuee;
1293 *last_w = next_w;
1294 continue;
1295
1296 case WEAK:
1297 /* Now, check whether the key is reachable.
1298 */
1299 new = isAlive(w->key);
1300 if (new != NULL) {
1301 w->key = new;
1302 // evacuate the value and finalizer
1303 w->value = evacuate(w->value);
1304 w->finalizer = evacuate(w->finalizer);
1305 // remove this weak ptr from the old_weak_ptr list
1306 *last_w = w->link;
1307 // and put it on the new weak ptr list
1308 next_w = w->link;
1309 w->link = weak_ptr_list;
1310 weak_ptr_list = w;
1311 flag = rtsTrue;
1312 IF_DEBUG(weak, debugBelch("Weak pointer still alive at %p -> %p",
1313 w, w->key));
1314 continue;
1315 }
1316 else {
1317 last_w = &(w->link);
1318 next_w = w->link;
1319 continue;
1320 }
1321
1322 default:
1323 barf("traverse_weak_ptr_list: not WEAK");
1324 }
1325 }
1326
1327 /* If we didn't make any changes, then we can go round and kill all
1328 * the dead weak pointers. The old_weak_ptr list is used as a list
1329 * of pending finalizers later on.
1330 */
1331 if (flag == rtsFalse) {
1332 for (w = old_weak_ptr_list; w; w = w->link) {
1333 w->finalizer = evacuate(w->finalizer);
1334 }
1335
1336 // Next, move to the WeakThreads stage after fully
1337 // scavenging the finalizers we've just evacuated.
1338 weak_stage = WeakThreads;
1339 }
1340
1341 return rtsTrue;
1342
1343 case WeakThreads:
1344 /* Now deal with the all_threads list, which behaves somewhat like
1345 * the weak ptr list. If we discover any threads that are about to
1346 * become garbage, we wake them up and administer an exception.
1347 */
1348 {
1349 StgTSO *t, *tmp, *next, **prev;
1350
1351 prev = &old_all_threads;
1352 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
1353
1354 tmp = (StgTSO *)isAlive((StgClosure *)t);
1355
1356 if (tmp != NULL) {
1357 t = tmp;
1358 }
1359
1360 ASSERT(get_itbl(t)->type == TSO);
1361 switch (t->what_next) {
1362 case ThreadRelocated:
1363 next = t->link;
1364 *prev = next;
1365 continue;
1366 case ThreadKilled:
1367 case ThreadComplete:
1368 // finshed or died. The thread might still be alive, but we
1369 // don't keep it on the all_threads list. Don't forget to
1370 // stub out its global_link field.
1371 next = t->global_link;
1372 t->global_link = END_TSO_QUEUE;
1373 *prev = next;
1374 continue;
1375 default:
1376 ;
1377 }
1378
1379 if (tmp == NULL) {
1380 // not alive (yet): leave this thread on the
1381 // old_all_threads list.
1382 prev = &(t->global_link);
1383 next = t->global_link;
1384 }
1385 else {
1386 // alive: move this thread onto the all_threads list.
1387 next = t->global_link;
1388 t->global_link = all_threads;
1389 all_threads = t;
1390 *prev = next;
1391 }
1392 }
1393 }
1394
1395 /* If we evacuated any threads, we need to go back to the scavenger.
1396 */
1397 if (flag) return rtsTrue;
1398
1399 /* And resurrect any threads which were about to become garbage.
1400 */
1401 {
1402 StgTSO *t, *tmp, *next;
1403 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
1404 next = t->global_link;
1405 tmp = (StgTSO *)evacuate((StgClosure *)t);
1406 tmp->global_link = resurrected_threads;
1407 resurrected_threads = tmp;
1408 }
1409 }
1410
1411 /* Finally, we can update the blackhole_queue. This queue
1412 * simply strings together TSOs blocked on black holes, it is
1413 * not intended to keep anything alive. Hence, we do not follow
1414 * pointers on the blackhole_queue until now, when we have
1415 * determined which TSOs are otherwise reachable. We know at
1416 * this point that all TSOs have been evacuated, however.
1417 */
1418 {
1419 StgTSO **pt;
1420 for (pt = &blackhole_queue; *pt != END_TSO_QUEUE; pt = &((*pt)->link)) {
1421 *pt = (StgTSO *)isAlive((StgClosure *)*pt);
1422 ASSERT(*pt != NULL);
1423 }
1424 }
1425
1426 weak_stage = WeakDone; // *now* we're done,
1427 return rtsTrue; // but one more round of scavenging, please
1428
1429 default:
1430 barf("traverse_weak_ptr_list");
1431 return rtsTrue;
1432 }
1433
1434 }
1435
1436 /* -----------------------------------------------------------------------------
1437 The blackhole queue
1438
1439 Threads on this list behave like weak pointers during the normal
1440 phase of garbage collection: if the blackhole is reachable, then
1441 the thread is reachable too.
1442 -------------------------------------------------------------------------- */
1443 static rtsBool
1444 traverse_blackhole_queue (void)
1445 {
1446 StgTSO *prev, *t, *tmp;
1447 rtsBool flag;
1448
1449 flag = rtsFalse;
1450 prev = NULL;
1451
1452 for (t = blackhole_queue; t != END_TSO_QUEUE; prev=t, t = t->link) {
1453 if (! (tmp = (StgTSO *)isAlive((StgClosure*)t))) {
1454 if (isAlive(t->block_info.closure)) {
1455 t = (StgTSO *)evacuate((StgClosure *)t);
1456 if (prev) prev->link = t;
1457 flag = rtsTrue;
1458 }
1459 }
1460 }
1461 return flag;
1462 }
1463
1464 /* -----------------------------------------------------------------------------
1465 After GC, the live weak pointer list may have forwarding pointers
1466 on it, because a weak pointer object was evacuated after being
1467 moved to the live weak pointer list. We remove those forwarding
1468 pointers here.
1469
1470 Also, we don't consider weak pointer objects to be reachable, but
1471 we must nevertheless consider them to be "live" and retain them.
1472 Therefore any weak pointer objects which haven't as yet been
1473 evacuated need to be evacuated now.
1474 -------------------------------------------------------------------------- */
1475
1476
1477 static void
1478 mark_weak_ptr_list ( StgWeak **list )
1479 {
1480 StgWeak *w, **last_w;
1481
1482 last_w = list;
1483 for (w = *list; w; w = w->link) {
1484 // w might be WEAK, EVACUATED, or DEAD_WEAK (actually CON_STATIC) here
1485 ASSERT(w->header.info == &stg_DEAD_WEAK_info
1486 || get_itbl(w)->type == WEAK || get_itbl(w)->type == EVACUATED);
1487 w = (StgWeak *)evacuate((StgClosure *)w);
1488 *last_w = w;
1489 last_w = &(w->link);
1490 }
1491 }
1492
1493 /* -----------------------------------------------------------------------------
1494 isAlive determines whether the given closure is still alive (after
1495 a garbage collection) or not. It returns the new address of the
1496 closure if it is alive, or NULL otherwise.
1497
1498 NOTE: Use it before compaction only!
1499 -------------------------------------------------------------------------- */
1500
1501
1502 StgClosure *
1503 isAlive(StgClosure *p)
1504 {
1505 const StgInfoTable *info;
1506 bdescr *bd;
1507
1508 while (1) {
1509
1510 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1511 info = get_itbl(p);
1512
1513 // ignore static closures
1514 //
1515 // ToDo: for static closures, check the static link field.
1516 // Problem here is that we sometimes don't set the link field, eg.
1517 // for static closures with an empty SRT or CONSTR_STATIC_NOCAFs.
1518 //
1519 if (!HEAP_ALLOCED(p)) {
1520 return p;
1521 }
1522
1523 // ignore closures in generations that we're not collecting.
1524 bd = Bdescr((P_)p);
1525 if (bd->gen_no > N) {
1526 return p;
1527 }
1528
1529 // if it's a pointer into to-space, then we're done
1530 if (bd->flags & BF_EVACUATED) {
1531 return p;
1532 }
1533
1534 // large objects use the evacuated flag
1535 if (bd->flags & BF_LARGE) {
1536 return NULL;
1537 }
1538
1539 // check the mark bit for compacted steps
1540 if ((bd->flags & BF_COMPACTED) && is_marked((P_)p,bd)) {
1541 return p;
1542 }
1543
1544 switch (info->type) {
1545
1546 case IND:
1547 case IND_STATIC:
1548 case IND_PERM:
1549 case IND_OLDGEN: // rely on compatible layout with StgInd
1550 case IND_OLDGEN_PERM:
1551 // follow indirections
1552 p = ((StgInd *)p)->indirectee;
1553 continue;
1554
1555 case EVACUATED:
1556 // alive!
1557 return ((StgEvacuated *)p)->evacuee;
1558
1559 case TSO:
1560 if (((StgTSO *)p)->what_next == ThreadRelocated) {
1561 p = (StgClosure *)((StgTSO *)p)->link;
1562 continue;
1563 }
1564 return NULL;
1565
1566 default:
1567 // dead.
1568 return NULL;
1569 }
1570 }
1571 }
1572
1573 static void
1574 mark_root(StgClosure **root)
1575 {
1576 *root = evacuate(*root);
1577 }
1578
1579 STATIC_INLINE void
1580 upd_evacuee(StgClosure *p, StgClosure *dest)
1581 {
1582 // not true: (ToDo: perhaps it should be)
1583 // ASSERT(Bdescr((P_)dest)->flags & BF_EVACUATED);
1584 SET_INFO(p, &stg_EVACUATED_info);
1585 ((StgEvacuated *)p)->evacuee = dest;
1586 }
1587
1588
1589 STATIC_INLINE StgClosure *
1590 copy(StgClosure *src, nat size, step *stp)
1591 {
1592 StgPtr to, from;
1593 nat i;
1594 #ifdef PROFILING
1595 // @LDV profiling
1596 nat size_org = size;
1597 #endif
1598
1599 TICK_GC_WORDS_COPIED(size);
1600 /* Find out where we're going, using the handy "to" pointer in
1601 * the step of the source object. If it turns out we need to
1602 * evacuate to an older generation, adjust it here (see comment
1603 * by evacuate()).
1604 */
1605 if (stp->gen_no < evac_gen) {
1606 if (eager_promotion) {
1607 stp = &generations[evac_gen].steps[0];
1608 } else {
1609 failed_to_evac = rtsTrue;
1610 }
1611 }
1612
1613 /* chain a new block onto the to-space for the destination step if
1614 * necessary.
1615 */
1616 if (stp->hp + size >= stp->hpLim) {
1617 gc_alloc_block(stp);
1618 }
1619
1620 to = stp->hp;
1621 from = (StgPtr)src;
1622 stp->hp = to + size;
1623 for (i = 0; i < size; i++) { // unroll for small i
1624 to[i] = from[i];
1625 }
1626 upd_evacuee((StgClosure *)from,(StgClosure *)to);
1627
1628 #ifdef PROFILING
1629 // We store the size of the just evacuated object in the LDV word so that
1630 // the profiler can guess the position of the next object later.
1631 SET_EVACUAEE_FOR_LDV(from, size_org);
1632 #endif
1633 return (StgClosure *)to;
1634 }
1635
1636 // Same as copy() above, except the object will be allocated in memory
1637 // that will not be scavenged. Used for object that have no pointer
1638 // fields.
1639 STATIC_INLINE StgClosure *
1640 copy_noscav(StgClosure *src, nat size, step *stp)
1641 {
1642 StgPtr to, from;
1643 nat i;
1644 #ifdef PROFILING
1645 // @LDV profiling
1646 nat size_org = size;
1647 #endif
1648
1649 TICK_GC_WORDS_COPIED(size);
1650 /* Find out where we're going, using the handy "to" pointer in
1651 * the step of the source object. If it turns out we need to
1652 * evacuate to an older generation, adjust it here (see comment
1653 * by evacuate()).
1654 */
1655 if (stp->gen_no < evac_gen) {
1656 if (eager_promotion) {
1657 stp = &generations[evac_gen].steps[0];
1658 } else {
1659 failed_to_evac = rtsTrue;
1660 }
1661 }
1662
1663 /* chain a new block onto the to-space for the destination step if
1664 * necessary.
1665 */
1666 if (stp->scavd_hp + size >= stp->scavd_hpLim) {
1667 gc_alloc_scavd_block(stp);
1668 }
1669
1670 to = stp->scavd_hp;
1671 from = (StgPtr)src;
1672 stp->scavd_hp = to + size;
1673 for (i = 0; i < size; i++) { // unroll for small i
1674 to[i] = from[i];
1675 }
1676 upd_evacuee((StgClosure *)from,(StgClosure *)to);
1677
1678 #ifdef PROFILING
1679 // We store the size of the just evacuated object in the LDV word so that
1680 // the profiler can guess the position of the next object later.
1681 SET_EVACUAEE_FOR_LDV(from, size_org);
1682 #endif
1683 return (StgClosure *)to;
1684 }
1685
1686 /* Special version of copy() for when we only want to copy the info
1687 * pointer of an object, but reserve some padding after it. This is
1688 * used to optimise evacuation of BLACKHOLEs.
1689 */
1690
1691
1692 static StgClosure *
1693 copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *stp)
1694 {
1695 P_ dest, to, from;
1696 #ifdef PROFILING
1697 // @LDV profiling
1698 nat size_to_copy_org = size_to_copy;
1699 #endif
1700
1701 TICK_GC_WORDS_COPIED(size_to_copy);
1702 if (stp->gen_no < evac_gen) {
1703 if (eager_promotion) {
1704 stp = &generations[evac_gen].steps[0];
1705 } else {
1706 failed_to_evac = rtsTrue;
1707 }
1708 }
1709
1710 if (stp->hp + size_to_reserve >= stp->hpLim) {
1711 gc_alloc_block(stp);
1712 }
1713
1714 for(to = stp->hp, from = (P_)src; size_to_copy>0; --size_to_copy) {
1715 *to++ = *from++;
1716 }
1717
1718 dest = stp->hp;
1719 stp->hp += size_to_reserve;
1720 upd_evacuee(src,(StgClosure *)dest);
1721 #ifdef PROFILING
1722 // We store the size of the just evacuated object in the LDV word so that
1723 // the profiler can guess the position of the next object later.
1724 // size_to_copy_org is wrong because the closure already occupies size_to_reserve
1725 // words.
1726 SET_EVACUAEE_FOR_LDV(src, size_to_reserve);
1727 // fill the slop
1728 if (size_to_reserve - size_to_copy_org > 0)
1729 LDV_FILL_SLOP(stp->hp - 1, (int)(size_to_reserve - size_to_copy_org));
1730 #endif
1731 return (StgClosure *)dest;
1732 }
1733
1734
1735 /* -----------------------------------------------------------------------------
1736 Evacuate a large object
1737
1738 This just consists of removing the object from the (doubly-linked)
1739 step->large_objects list, and linking it on to the (singly-linked)
1740 step->new_large_objects list, from where it will be scavenged later.
1741
1742 Convention: bd->flags has BF_EVACUATED set for a large object
1743 that has been evacuated, or unset otherwise.
1744 -------------------------------------------------------------------------- */
1745
1746
1747 STATIC_INLINE void
1748 evacuate_large(StgPtr p)
1749 {
1750 bdescr *bd = Bdescr(p);
1751 step *stp;
1752
1753 // object must be at the beginning of the block (or be a ByteArray)
1754 ASSERT(get_itbl((StgClosure *)p)->type == ARR_WORDS ||
1755 (((W_)p & BLOCK_MASK) == 0));
1756
1757 // already evacuated?
1758 if (bd->flags & BF_EVACUATED) {
1759 /* Don't forget to set the failed_to_evac flag if we didn't get
1760 * the desired destination (see comments in evacuate()).
1761 */
1762 if (bd->gen_no < evac_gen) {
1763 failed_to_evac = rtsTrue;
1764 TICK_GC_FAILED_PROMOTION();
1765 }
1766 return;
1767 }
1768
1769 stp = bd->step;
1770 // remove from large_object list
1771 if (bd->u.back) {
1772 bd->u.back->link = bd->link;
1773 } else { // first object in the list
1774 stp->large_objects = bd->link;
1775 }
1776 if (bd->link) {
1777 bd->link->u.back = bd->u.back;
1778 }
1779
1780 /* link it on to the evacuated large object list of the destination step
1781 */
1782 stp = bd->step->to;
1783 if (stp->gen_no < evac_gen) {
1784 if (eager_promotion) {
1785 stp = &generations[evac_gen].steps[0];
1786 } else {
1787 failed_to_evac = rtsTrue;
1788 }
1789 }
1790
1791 bd->step = stp;
1792 bd->gen_no = stp->gen_no;
1793 bd->link = stp->new_large_objects;
1794 stp->new_large_objects = bd;
1795 bd->flags |= BF_EVACUATED;
1796 }
1797
1798 /* -----------------------------------------------------------------------------
1799 Evacuate
1800
1801 This is called (eventually) for every live object in the system.
1802
1803 The caller to evacuate specifies a desired generation in the
1804 evac_gen global variable. The following conditions apply to
1805 evacuating an object which resides in generation M when we're
1806 collecting up to generation N
1807
1808 if M >= evac_gen
1809 if M > N do nothing
1810 else evac to step->to
1811
1812 if M < evac_gen evac to evac_gen, step 0
1813
1814 if the object is already evacuated, then we check which generation
1815 it now resides in.
1816
1817 if M >= evac_gen do nothing
1818 if M < evac_gen set failed_to_evac flag to indicate that we
1819 didn't manage to evacuate this object into evac_gen.
1820
1821
1822 OPTIMISATION NOTES:
1823
1824 evacuate() is the single most important function performance-wise
1825 in the GC. Various things have been tried to speed it up, but as
1826 far as I can tell the code generated by gcc 3.2 with -O2 is about
1827 as good as it's going to get. We pass the argument to evacuate()
1828 in a register using the 'regparm' attribute (see the prototype for
1829 evacuate() near the top of this file).
1830
1831 Changing evacuate() to take an (StgClosure **) rather than
1832 returning the new pointer seems attractive, because we can avoid
1833 writing back the pointer when it hasn't changed (eg. for a static
1834 object, or an object in a generation > N). However, I tried it and
1835 it doesn't help. One reason is that the (StgClosure **) pointer
1836 gets spilled to the stack inside evacuate(), resulting in far more
1837 extra reads/writes than we save.
1838 -------------------------------------------------------------------------- */
1839
1840 REGPARM1 static StgClosure *
1841 evacuate(StgClosure *q)
1842 {
1843 #if defined(PAR)
1844 StgClosure *to;
1845 #endif
1846 bdescr *bd = NULL;
1847 step *stp;
1848 const StgInfoTable *info;
1849
1850 loop:
1851 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q));
1852
1853 if (!HEAP_ALLOCED(q)) {
1854
1855 if (!major_gc) return q;
1856
1857 info = get_itbl(q);
1858 switch (info->type) {
1859
1860 case THUNK_STATIC:
1861 if (info->srt_bitmap != 0 &&
1862 *THUNK_STATIC_LINK((StgClosure *)q) == NULL) {
1863 *THUNK_STATIC_LINK((StgClosure *)q) = static_objects;
1864 static_objects = (StgClosure *)q;
1865 }
1866 return q;
1867
1868 case FUN_STATIC:
1869 if (info->srt_bitmap != 0 &&
1870 *FUN_STATIC_LINK((StgClosure *)q) == NULL) {
1871 *FUN_STATIC_LINK((StgClosure *)q) = static_objects;
1872 static_objects = (StgClosure *)q;
1873 }
1874 return q;
1875
1876 case IND_STATIC:
1877 /* If q->saved_info != NULL, then it's a revertible CAF - it'll be
1878 * on the CAF list, so don't do anything with it here (we'll
1879 * scavenge it later).
1880 */
1881 if (((StgIndStatic *)q)->saved_info == NULL
1882 && *IND_STATIC_LINK((StgClosure *)q) == NULL) {
1883 *IND_STATIC_LINK((StgClosure *)q) = static_objects;
1884 static_objects = (StgClosure *)q;
1885 }
1886 return q;
1887
1888 case CONSTR_STATIC:
1889 if (*STATIC_LINK(info,(StgClosure *)q) == NULL) {
1890 *STATIC_LINK(info,(StgClosure *)q) = static_objects;
1891 static_objects = (StgClosure *)q;
1892 }
1893 return q;
1894
1895 case CONSTR_INTLIKE:
1896 case CONSTR_CHARLIKE:
1897 case CONSTR_NOCAF_STATIC:
1898 /* no need to put these on the static linked list, they don't need
1899 * to be scavenged.
1900 */
1901 return q;
1902
1903 default:
1904 barf("evacuate(static): strange closure type %d", (int)(info->type));
1905 }
1906 }
1907
1908 bd = Bdescr((P_)q);
1909
1910 if (bd->gen_no > N) {
1911 /* Can't evacuate this object, because it's in a generation
1912 * older than the ones we're collecting. Let's hope that it's
1913 * in evac_gen or older, or we will have to arrange to track
1914 * this pointer using the mutable list.
1915 */
1916 if (bd->gen_no < evac_gen) {
1917 // nope
1918 failed_to_evac = rtsTrue;
1919 TICK_GC_FAILED_PROMOTION();
1920 }
1921 return q;
1922 }
1923
1924 if ((bd->flags & (BF_LARGE | BF_COMPACTED | BF_EVACUATED)) != 0) {
1925
1926 /* pointer into to-space: just return it. This normally
1927 * shouldn't happen, but alllowing it makes certain things
1928 * slightly easier (eg. the mutable list can contain the same
1929 * object twice, for example).
1930 */
1931 if (bd->flags & BF_EVACUATED) {
1932 if (bd->gen_no < evac_gen) {
1933 failed_to_evac = rtsTrue;
1934 TICK_GC_FAILED_PROMOTION();
1935 }
1936 return q;
1937 }
1938
1939 /* evacuate large objects by re-linking them onto a different list.
1940 */
1941 if (bd->flags & BF_LARGE) {
1942 info = get_itbl(q);
1943 if (info->type == TSO &&
1944 ((StgTSO *)q)->what_next == ThreadRelocated) {
1945 q = (StgClosure *)((StgTSO *)q)->link;
1946 goto loop;
1947 }
1948 evacuate_large((P_)q);
1949 return q;
1950 }
1951
1952 /* If the object is in a step that we're compacting, then we
1953 * need to use an alternative evacuate procedure.
1954 */
1955 if (bd->flags & BF_COMPACTED) {
1956 if (!is_marked((P_)q,bd)) {
1957 mark((P_)q,bd);
1958 if (mark_stack_full()) {
1959 mark_stack_overflowed = rtsTrue;
1960 reset_mark_stack();
1961 }
1962 push_mark_stack((P_)q);
1963 }
1964 return q;
1965 }
1966 }
1967
1968 stp = bd->step->to;
1969
1970 info = get_itbl(q);
1971
1972 switch (info->type) {
1973
1974 case MUT_VAR_CLEAN:
1975 case MUT_VAR_DIRTY:
1976 case MVAR:
1977 return copy(q,sizeW_fromITBL(info),stp);
1978
1979 case CONSTR_0_1:
1980 {
1981 StgWord w = (StgWord)q->payload[0];
1982 if (q->header.info == Czh_con_info &&
1983 // unsigned, so always true: (StgChar)w >= MIN_CHARLIKE &&
1984 (StgChar)w <= MAX_CHARLIKE) {
1985 return (StgClosure *)CHARLIKE_CLOSURE((StgChar)w);
1986 }
1987 if (q->header.info == Izh_con_info &&
1988 (StgInt)w >= MIN_INTLIKE && (StgInt)w <= MAX_INTLIKE) {
1989 return (StgClosure *)INTLIKE_CLOSURE((StgInt)w);
1990 }
1991 // else
1992 return copy_noscav(q,sizeofW(StgHeader)+1,stp);
1993 }
1994
1995 case FUN_0_1:
1996 case FUN_1_0:
1997 case CONSTR_1_0:
1998 return copy(q,sizeofW(StgHeader)+1,stp);
1999
2000 case THUNK_1_0:
2001 case THUNK_0_1:
2002 return copy(q,sizeofW(StgThunk)+1,stp);
2003
2004 case THUNK_1_1:
2005 case THUNK_2_0:
2006 case THUNK_0_2:
2007 #ifdef NO_PROMOTE_THUNKS
2008 if (bd->gen_no == 0 &&
2009 bd->step->no != 0 &&
2010 bd->step->no == generations[bd->gen_no].n_steps-1) {
2011 stp = bd->step;
2012 }
2013 #endif
2014 return copy(q,sizeofW(StgThunk)+2,stp);
2015
2016 case FUN_1_1:
2017 case FUN_2_0:
2018 case CONSTR_1_1:
2019 case CONSTR_2_0:
2020 case FUN_0_2:
2021 return copy(q,sizeofW(StgHeader)+2,stp);
2022
2023 case CONSTR_0_2:
2024 return copy_noscav(q,sizeofW(StgHeader)+2,stp);
2025
2026 case THUNK:
2027 return copy(q,thunk_sizeW_fromITBL(info),stp);
2028
2029 case FUN:
2030 case CONSTR:
2031 case IND_PERM:
2032 case IND_OLDGEN_PERM:
2033 case WEAK:
2034 case STABLE_NAME:
2035 return copy(q,sizeW_fromITBL(info),stp);
2036
2037 case BCO:
2038 return copy(q,bco_sizeW((StgBCO *)q),stp);
2039
2040 case CAF_BLACKHOLE:
2041 case SE_CAF_BLACKHOLE:
2042 case SE_BLACKHOLE:
2043 case BLACKHOLE:
2044 return copyPart(q,BLACKHOLE_sizeW(),sizeofW(StgHeader),stp);
2045
2046 case THUNK_SELECTOR:
2047 {
2048 StgClosure *p;
2049 const StgInfoTable *info_ptr;
2050
2051 if (thunk_selector_depth > MAX_THUNK_SELECTOR_DEPTH) {
2052 return copy(q,THUNK_SELECTOR_sizeW(),stp);
2053 }
2054
2055 // stashed away for LDV profiling, see below
2056 info_ptr = q->header.info;
2057
2058 p = eval_thunk_selector(info->layout.selector_offset,
2059 (StgSelector *)q);
2060
2061 if (p == NULL) {
2062 return copy(q,THUNK_SELECTOR_sizeW(),stp);
2063 } else {
2064 StgClosure *val;
2065 // q is still BLACKHOLE'd.
2066 thunk_selector_depth++;
2067 val = evacuate(p);
2068 thunk_selector_depth--;
2069
2070 #ifdef PROFILING
2071 // For the purposes of LDV profiling, we have destroyed
2072 // the original selector thunk.
2073 SET_INFO(q, info_ptr);
2074 LDV_RECORD_DEAD_FILL_SLOP_DYNAMIC(q);
2075 #endif
2076
2077 // Update the THUNK_SELECTOR with an indirection to the
2078 // EVACUATED closure now at p. Why do this rather than
2079 // upd_evacuee(q,p)? Because we have an invariant that an
2080 // EVACUATED closure always points to an object in the
2081 // same or an older generation (required by the short-cut
2082 // test in the EVACUATED case, below).
2083 SET_INFO(q, &stg_IND_info);
2084 ((StgInd *)q)->indirectee = p;
2085
2086 // For the purposes of LDV profiling, we have created an
2087 // indirection.
2088 LDV_RECORD_CREATE(q);
2089
2090 return val;
2091 }
2092 }
2093
2094 case IND:
2095 case IND_OLDGEN:
2096 // follow chains of indirections, don't evacuate them
2097 q = ((StgInd*)q)->indirectee;
2098 goto loop;
2099
2100 case RET_BCO:
2101 case RET_SMALL:
2102 case RET_VEC_SMALL:
2103 case RET_BIG:
2104 case RET_VEC_BIG:
2105 case RET_DYN:
2106 case UPDATE_FRAME:
2107 case STOP_FRAME:
2108 case CATCH_FRAME:
2109 case CATCH_STM_FRAME:
2110 case CATCH_RETRY_FRAME:
2111 case ATOMICALLY_FRAME:
2112 // shouldn't see these
2113 barf("evacuate: stack frame at %p\n", q);
2114
2115 case PAP:
2116 return copy(q,pap_sizeW((StgPAP*)q),stp);
2117
2118 case AP:
2119 return copy(q,ap_sizeW((StgAP*)q),stp);
2120
2121 case AP_STACK:
2122 return copy(q,ap_stack_sizeW((StgAP_STACK*)q),stp);
2123
2124 case EVACUATED:
2125 /* Already evacuated, just return the forwarding address.
2126 * HOWEVER: if the requested destination generation (evac_gen) is
2127 * older than the actual generation (because the object was
2128 * already evacuated to a younger generation) then we have to
2129 * set the failed_to_evac flag to indicate that we couldn't
2130 * manage to promote the object to the desired generation.
2131 */
2132 /*
2133 * Optimisation: the check is fairly expensive, but we can often
2134 * shortcut it if either the required generation is 0, or the
2135 * current object (the EVACUATED) is in a high enough generation.
2136 * We know that an EVACUATED always points to an object in the
2137 * same or an older generation. stp is the lowest step that the
2138 * current object would be evacuated to, so we only do the full
2139 * check if stp is too low.
2140 */
2141 if (evac_gen > 0 && stp->gen_no < evac_gen) { // optimisation
2142 StgClosure *p = ((StgEvacuated*)q)->evacuee;
2143 if (HEAP_ALLOCED(p) && Bdescr((P_)p)->gen_no < evac_gen) {
2144 failed_to_evac = rtsTrue;
2145 TICK_GC_FAILED_PROMOTION();
2146 }
2147 }
2148 return ((StgEvacuated*)q)->evacuee;
2149
2150 case ARR_WORDS:
2151 // just copy the block
2152 return copy_noscav(q,arr_words_sizeW((StgArrWords *)q),stp);
2153
2154 case MUT_ARR_PTRS_CLEAN:
2155 case MUT_ARR_PTRS_DIRTY:
2156 case MUT_ARR_PTRS_FROZEN:
2157 case MUT_ARR_PTRS_FROZEN0:
2158 // just copy the block
2159 return copy(q,mut_arr_ptrs_sizeW((StgMutArrPtrs *)q),stp);
2160
2161 case TSO:
2162 {
2163 StgTSO *tso = (StgTSO *)q;
2164
2165 /* Deal with redirected TSOs (a TSO that's had its stack enlarged).
2166 */
2167 if (tso->what_next == ThreadRelocated) {
2168 q = (StgClosure *)tso->link;
2169 goto loop;
2170 }
2171
2172 /* To evacuate a small TSO, we need to relocate the update frame
2173 * list it contains.
2174 */
2175 {
2176 StgTSO *new_tso;
2177 StgPtr p, q;
2178
2179 new_tso = (StgTSO *)copyPart((StgClosure *)tso,
2180 tso_sizeW(tso),
2181 sizeofW(StgTSO), stp);
2182 move_TSO(tso, new_tso);
2183 for (p = tso->sp, q = new_tso->sp;
2184 p < tso->stack+tso->stack_size;) {
2185 *q++ = *p++;
2186 }
2187
2188 return (StgClosure *)new_tso;
2189 }
2190 }
2191
2192 #if defined(PAR)
2193 case RBH:
2194 {
2195 //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str);
2196 to = copy(q,BLACKHOLE_sizeW(),stp);
2197 //ToDo: derive size etc from reverted IP
2198 //to = copy(q,size,stp);
2199 IF_DEBUG(gc,
2200 debugBelch("@@ evacuate: RBH %p (%s) to %p (%s)",
2201 q, info_type(q), to, info_type(to)));
2202 return to;
2203 }
2204
2205 case BLOCKED_FETCH:
2206 ASSERT(sizeofW(StgBlockedFetch) >= MIN_PAYLOD_SIZE);
2207 to = copy(q,sizeofW(StgBlockedFetch),stp);
2208 IF_DEBUG(gc,
2209 debugBelch("@@ evacuate: %p (%s) to %p (%s)",
2210 q, info_type(q), to, info_type(to)));
2211 return to;
2212
2213 # ifdef DIST
2214 case REMOTE_REF:
2215 # endif
2216 case FETCH_ME:
2217 ASSERT(sizeofW(StgBlockedFetch) >= MIN_PAYLOAD_SIZE);
2218 to = copy(q,sizeofW(StgFetchMe),stp);
2219 IF_DEBUG(gc,
2220 debugBelch("@@ evacuate: %p (%s) to %p (%s)",
2221 q, info_type(q), to, info_type(to)));
2222 return to;
2223
2224 case FETCH_ME_BQ:
2225 ASSERT(sizeofW(StgBlockedFetch) >= MIN_PAYLOAD_SIZE);
2226 to = copy(q,sizeofW(StgFetchMeBlockingQueue),stp);
2227 IF_DEBUG(gc,
2228 debugBelch("@@ evacuate: %p (%s) to %p (%s)",
2229 q, info_type(q), to, info_type(to)));
2230 return to;
2231 #endif
2232
2233 case TREC_HEADER:
2234 return copy(q,sizeofW(StgTRecHeader),stp);
2235
2236 case TVAR_WAIT_QUEUE:
2237 return copy(q,sizeofW(StgTVarWaitQueue),stp);
2238
2239 case TVAR:
2240 return copy(q,sizeofW(StgTVar),stp);
2241
2242 case TREC_CHUNK:
2243 return copy(q,sizeofW(StgTRecChunk),stp);
2244
2245 default:
2246 barf("evacuate: strange closure type %d", (int)(info->type));
2247 }
2248
2249 barf("evacuate");
2250 }
2251
2252 /* -----------------------------------------------------------------------------
2253 Evaluate a THUNK_SELECTOR if possible.
2254
2255 returns: NULL if we couldn't evaluate this THUNK_SELECTOR, or
2256 a closure pointer if we evaluated it and this is the result. Note
2257 that "evaluating" the THUNK_SELECTOR doesn't necessarily mean
2258 reducing it to HNF, just that we have eliminated the selection.
2259 The result might be another thunk, or even another THUNK_SELECTOR.
2260
2261 If the return value is non-NULL, the original selector thunk has
2262 been BLACKHOLE'd, and should be updated with an indirection or a
2263 forwarding pointer. If the return value is NULL, then the selector
2264 thunk is unchanged.
2265
2266 ***
2267 ToDo: the treatment of THUNK_SELECTORS could be improved in the
2268 following way (from a suggestion by Ian Lynagh):
2269
2270 We can have a chain like this:
2271
2272 sel_0 --> (a,b)
2273 |
2274 |-----> sel_0 --> (a,b)
2275 |
2276 |-----> sel_0 --> ...
2277
2278 and the depth limit means we don't go all the way to the end of the
2279 chain, which results in a space leak. This affects the recursive
2280 call to evacuate() in the THUNK_SELECTOR case in evacuate(): *not*
2281 the recursive call to eval_thunk_selector() in
2282 eval_thunk_selector().
2283
2284 We could eliminate the depth bound in this case, in the following
2285 way:
2286
2287 - traverse the chain once to discover the *value* of the
2288 THUNK_SELECTOR. Mark all THUNK_SELECTORS that we
2289 visit on the way as having been visited already (somehow).
2290
2291 - in a second pass, traverse the chain again updating all
2292 THUNK_SEELCTORS that we find on the way with indirections to
2293 the value.
2294
2295 - if we encounter a "marked" THUNK_SELECTOR in a normal
2296 evacuate(), we konw it can't be updated so just evac it.
2297
2298 Program that illustrates the problem:
2299
2300 foo [] = ([], [])
2301 foo (x:xs) = let (ys, zs) = foo xs
2302 in if x >= 0 then (x:ys, zs) else (ys, x:zs)
2303
2304 main = bar [1..(100000000::Int)]
2305 bar xs = (\(ys, zs) -> print ys >> print zs) (foo xs)
2306
2307 -------------------------------------------------------------------------- */
2308
2309 static inline rtsBool
2310 is_to_space ( StgClosure *p )
2311 {
2312 bdescr *bd;
2313
2314 bd = Bdescr((StgPtr)p);
2315 if (HEAP_ALLOCED(p) &&
2316 ((bd->flags & BF_EVACUATED)
2317 || ((bd->flags & BF_COMPACTED) &&
2318 is_marked((P_)p,bd)))) {
2319 return rtsTrue;
2320 } else {
2321 return rtsFalse;
2322 }
2323 }
2324
2325 static StgClosure *
2326 eval_thunk_selector( nat field, StgSelector * p )
2327 {
2328 StgInfoTable *info;
2329 const StgInfoTable *info_ptr;
2330 StgClosure *selectee;
2331
2332 selectee = p->selectee;
2333
2334 // Save the real info pointer (NOTE: not the same as get_itbl()).
2335 info_ptr = p->header.info;
2336
2337 // If the THUNK_SELECTOR is in a generation that we are not
2338 // collecting, then bail out early. We won't be able to save any
2339 // space in any case, and updating with an indirection is trickier
2340 // in an old gen.
2341 if (Bdescr((StgPtr)p)->gen_no > N) {
2342 return NULL;
2343 }
2344
2345 // BLACKHOLE the selector thunk, since it is now under evaluation.
2346 // This is important to stop us going into an infinite loop if
2347 // this selector thunk eventually refers to itself.
2348 SET_INFO(p,&stg_BLACKHOLE_info);
2349
2350 selector_loop:
2351
2352 // We don't want to end up in to-space, because this causes
2353 // problems when the GC later tries to evacuate the result of
2354 // eval_thunk_selector(). There are various ways this could
2355 // happen:
2356 //
2357 // 1. following an IND_STATIC
2358 //
2359 // 2. when the old generation is compacted, the mark phase updates
2360 // from-space pointers to be to-space pointers, and we can't
2361 // reliably tell which we're following (eg. from an IND_STATIC).
2362 //
2363 // 3. compacting GC again: if we're looking at a constructor in
2364 // the compacted generation, it might point directly to objects
2365 // in to-space. We must bale out here, otherwise doing the selection
2366 // will result in a to-space pointer being returned.
2367 //
2368 // (1) is dealt with using a BF_EVACUATED test on the
2369 // selectee. (2) and (3): we can tell if we're looking at an
2370 // object in the compacted generation that might point to
2371 // to-space objects by testing that (a) it is BF_COMPACTED, (b)
2372 // the compacted generation is being collected, and (c) the
2373 // object is marked. Only a marked object may have pointers that
2374 // point to to-space objects, because that happens when
2375 // scavenging.
2376 //
2377 // The to-space test is now embodied in the in_to_space() inline
2378 // function, as it is re-used below.
2379 //
2380 if (is_to_space(selectee)) {
2381 goto bale_out;
2382 }
2383
2384 info = get_itbl(selectee);
2385 switch (info->type) {
2386 case CONSTR:
2387 case CONSTR_1_0:
2388 case CONSTR_0_1:
2389 case CONSTR_2_0:
2390 case CONSTR_1_1:
2391 case CONSTR_0_2:
2392 case CONSTR_STATIC:
2393 case CONSTR_NOCAF_STATIC:
2394 // check that the size is in range
2395 ASSERT(field < (StgWord32)(info->layout.payload.ptrs +
2396 info->layout.payload.nptrs));
2397
2398 // Select the right field from the constructor, and check
2399 // that the result isn't in to-space. It might be in
2400 // to-space if, for example, this constructor contains
2401 // pointers to younger-gen objects (and is on the mut-once
2402 // list).
2403 //
2404 {
2405 StgClosure *q;
2406 q = selectee->payload[field];
2407 if (is_to_space(q)) {
2408 goto bale_out;
2409 } else {
2410 return q;
2411 }
2412 }
2413
2414 case IND:
2415 case IND_PERM:
2416 case IND_OLDGEN:
2417 case IND_OLDGEN_PERM:
2418 case IND_STATIC:
2419 selectee = ((StgInd *)selectee)->indirectee;
2420 goto selector_loop;
2421
2422 case EVACUATED:
2423 // We don't follow pointers into to-space; the constructor
2424 // has already been evacuated, so we won't save any space
2425 // leaks by evaluating this selector thunk anyhow.
2426 break;
2427
2428 case THUNK_SELECTOR:
2429 {
2430 StgClosure *val;
2431
2432 // check that we don't recurse too much, re-using the
2433 // depth bound also used in evacuate().
2434 if (thunk_selector_depth >= MAX_THUNK_SELECTOR_DEPTH) {
2435 break;
2436 }
2437 thunk_selector_depth++;
2438
2439 val = eval_thunk_selector(info->layout.selector_offset,
2440 (StgSelector *)selectee);
2441
2442 thunk_selector_depth--;
2443
2444 if (val == NULL) {
2445 break;
2446 } else {
2447 // We evaluated this selector thunk, so update it with
2448 // an indirection. NOTE: we don't use UPD_IND here,
2449 // because we are guaranteed that p is in a generation
2450 // that we are collecting, and we never want to put the
2451 // indirection on a mutable list.
2452 #ifdef PROFILING
2453 // For the purposes of LDV profiling, we have destroyed
2454 // the original selector thunk.
2455 SET_INFO(p, info_ptr);
2456 LDV_RECORD_DEAD_FILL_SLOP_DYNAMIC(selectee);
2457 #endif
2458 ((StgInd *)selectee)->indirectee = val;
2459 SET_INFO(selectee,&stg_IND_info);
2460
2461 // For the purposes of LDV profiling, we have created an
2462 // indirection.
2463 LDV_RECORD_CREATE(selectee);
2464
2465 selectee = val;
2466 goto selector_loop;
2467 }
2468 }
2469
2470 case AP:
2471 case AP_STACK:
2472 case THUNK:
2473 case THUNK_1_0:
2474 case THUNK_0_1:
2475 case THUNK_2_0:
2476 case THUNK_1_1:
2477 case THUNK_0_2:
2478 case THUNK_STATIC:
2479 case CAF_BLACKHOLE:
2480 case SE_CAF_BLACKHOLE:
2481 case SE_BLACKHOLE:
2482 case BLACKHOLE:
2483 #if defined(PAR)
2484 case RBH:
2485 case BLOCKED_FETCH:
2486 # ifdef DIST
2487 case REMOTE_REF:
2488 # endif
2489 case FETCH_ME:
2490 case FETCH_ME_BQ:
2491 #endif
2492 // not evaluated yet
2493 break;
2494
2495 default:
2496 barf("eval_thunk_selector: strange selectee %d",
2497 (int)(info->type));
2498 }
2499
2500 bale_out:
2501 // We didn't manage to evaluate this thunk; restore the old info pointer
2502 SET_INFO(p, info_ptr);
2503 return NULL;
2504 }
2505
2506 /* -----------------------------------------------------------------------------
2507 move_TSO is called to update the TSO structure after it has been
2508 moved from one place to another.
2509 -------------------------------------------------------------------------- */
2510
2511 void
2512 move_TSO (StgTSO *src, StgTSO *dest)
2513 {
2514 ptrdiff_t diff;
2515
2516 // relocate the stack pointer...
2517 diff = (StgPtr)dest - (StgPtr)src; // In *words*
2518 dest->sp = (StgPtr)dest->sp + diff;
2519 }
2520
2521 /* Similar to scavenge_large_bitmap(), but we don't write back the
2522 * pointers we get back from evacuate().
2523 */
2524 static void
2525 scavenge_large_srt_bitmap( StgLargeSRT *large_srt )
2526 {
2527 nat i, b, size;
2528 StgWord bitmap;
2529 StgClosure **p;
2530
2531 b = 0;
2532 bitmap = large_srt->l.bitmap[b];
2533 size = (nat)large_srt->l.size;
2534 p = (StgClosure **)large_srt->srt;
2535 for (i = 0; i < size; ) {
2536 if ((bitmap & 1) != 0) {
2537 evacuate(*p);
2538 }
2539 i++;
2540 p++;
2541 if (i % BITS_IN(W_) == 0) {
2542 b++;
2543 bitmap = large_srt->l.bitmap[b];
2544 } else {
2545 bitmap = bitmap >> 1;
2546 }
2547 }
2548 }
2549
2550 /* evacuate the SRT. If srt_bitmap is zero, then there isn't an
2551 * srt field in the info table. That's ok, because we'll
2552 * never dereference it.
2553 */
2554 STATIC_INLINE void
2555 scavenge_srt (StgClosure **srt, nat srt_bitmap)
2556 {
2557 nat bitmap;
2558 StgClosure **p;
2559
2560 bitmap = srt_bitmap;
2561 p = srt;
2562
2563 if (bitmap == (StgHalfWord)(-1)) {
2564 scavenge_large_srt_bitmap( (StgLargeSRT *)srt );
2565 return;
2566 }
2567
2568 while (bitmap != 0) {
2569 if ((bitmap & 1) != 0) {
2570 #ifdef ENABLE_WIN32_DLL_SUPPORT
2571 // Special-case to handle references to closures hiding out in DLLs, since
2572 // double indirections required to get at those. The code generator knows
2573 // which is which when generating the SRT, so it stores the (indirect)
2574 // reference to the DLL closure in the table by first adding one to it.
2575 // We check for this here, and undo the addition before evacuating it.
2576 //
2577 // If the SRT entry hasn't got bit 0 set, the SRT entry points to a
2578 // closure that's fixed at link-time, and no extra magic is required.
2579 if ( (unsigned long)(*srt) & 0x1 ) {
2580 evacuate(*stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
2581 } else {
2582 evacuate(*p);
2583 }
2584 #else
2585 evacuate(*p);
2586 #endif
2587 }
2588 p++;
2589 bitmap = bitmap >> 1;
2590 }
2591 }
2592
2593
2594 STATIC_INLINE void
2595 scavenge_thunk_srt(const StgInfoTable *info)
2596 {
2597 StgThunkInfoTable *thunk_info;
2598
2599 if (!major_gc) return;
2600
2601 thunk_info = itbl_to_thunk_itbl(info);
2602 scavenge_srt((StgClosure **)GET_SRT(thunk_info), thunk_info->i.srt_bitmap);
2603 }
2604
2605 STATIC_INLINE void
2606 scavenge_fun_srt(const StgInfoTable *info)
2607 {
2608 StgFunInfoTable *fun_info;
2609
2610 if (!major_gc) return;
2611
2612 fun_info = itbl_to_fun_itbl(info);
2613 scavenge_srt((StgClosure **)GET_FUN_SRT(fun_info), fun_info->i.srt_bitmap);
2614 }
2615
2616 /* -----------------------------------------------------------------------------
2617 Scavenge a TSO.
2618 -------------------------------------------------------------------------- */
2619
2620 static void
2621 scavengeTSO (StgTSO *tso)
2622 {
2623 if ( tso->why_blocked == BlockedOnMVar
2624 || tso->why_blocked == BlockedOnBlackHole
2625 || tso->why_blocked == BlockedOnException
2626 #if defined(PAR)
2627 || tso->why_blocked == BlockedOnGA
2628 || tso->why_blocked == BlockedOnGA_NoSend
2629 #endif
2630 ) {
2631 tso->block_info.closure = evacuate(tso->block_info.closure);
2632 }
2633 if ( tso->blocked_exceptions != NULL ) {
2634 tso->blocked_exceptions =
2635 (StgTSO *)evacuate((StgClosure *)tso->blocked_exceptions);
2636 }
2637
2638 // We don't always chase the link field: TSOs on the blackhole
2639 // queue are not automatically alive, so the link field is a
2640 // "weak" pointer in that case.
2641 if (tso->why_blocked != BlockedOnBlackHole) {
2642 tso->link = (StgTSO *)evacuate((StgClosure *)tso->link);
2643 }
2644
2645 // scavange current transaction record
2646 tso->trec = (StgTRecHeader *)evacuate((StgClosure *)tso->trec);
2647
2648 // scavenge this thread's stack
2649 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
2650 }
2651
2652 /* -----------------------------------------------------------------------------
2653 Blocks of function args occur on the stack (at the top) and
2654 in PAPs.
2655 -------------------------------------------------------------------------- */
2656
2657 STATIC_INLINE StgPtr
2658 scavenge_arg_block (StgFunInfoTable *fun_info, StgClosure **args)
2659 {
2660 StgPtr p;
2661 StgWord bitmap;
2662 nat size;
2663
2664 p = (StgPtr)args;
2665 switch (fun_info->f.fun_type) {
2666 case ARG_GEN:
2667 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
2668 size = BITMAP_SIZE(fun_info->f.b.bitmap);
2669 goto small_bitmap;
2670 case ARG_GEN_BIG:
2671 size = GET_FUN_LARGE_BITMAP(fun_info)->size;
2672 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
2673 p += size;
2674 break;
2675 default:
2676 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
2677 size = BITMAP_SIZE(stg_arg_bitmaps[fun_info->f.fun_type]);
2678 small_bitmap:
2679 while (size > 0) {
2680 if ((bitmap & 1) == 0) {
2681 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
2682 }
2683 p++;
2684 bitmap = bitmap >> 1;
2685 size--;
2686 }
2687 break;
2688 }
2689 return p;
2690 }
2691
2692 STATIC_INLINE StgPtr
2693 scavenge_PAP_payload (StgClosure *fun, StgClosure **payload, StgWord size)
2694 {
2695 StgPtr p;
2696 StgWord bitmap;
2697 StgFunInfoTable *fun_info;
2698
2699 fun_info = get_fun_itbl(fun);
2700 ASSERT(fun_info->i.type != PAP);
2701 p = (StgPtr)payload;
2702
2703 switch (fun_info->f.fun_type) {
2704 case ARG_GEN:
2705 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
2706 goto small_bitmap;
2707 case ARG_GEN_BIG:
2708 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
2709 p += size;
2710 break;
2711 case ARG_BCO:
2712 scavenge_large_bitmap((StgPtr)payload, BCO_BITMAP(fun), size);
2713 p += size;
2714 break;
2715 default:
2716 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
2717 small_bitmap:
2718 while (size > 0) {
2719 if ((bitmap & 1) == 0) {
2720 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
2721 }
2722 p++;
2723 bitmap = bitmap >> 1;
2724 size--;
2725 }
2726 break;
2727 }
2728 return p;
2729 }
2730
2731 STATIC_INLINE StgPtr
2732 scavenge_PAP (StgPAP *pap)
2733 {
2734 pap->fun = evacuate(pap->fun);
2735 return scavenge_PAP_payload (pap->fun, pap->payload, pap->n_args);
2736 }
2737
2738 STATIC_INLINE StgPtr
2739 scavenge_AP (StgAP *ap)
2740 {
2741 ap->fun = evacuate(ap->fun);
2742 return scavenge_PAP_payload (ap->fun, ap->payload, ap->n_args);
2743 }
2744
2745 /* -----------------------------------------------------------------------------
2746 Scavenge a given step until there are no more objects in this step
2747 to scavenge.
2748
2749 evac_gen is set by the caller to be either zero (for a step in a
2750 generation < N) or G where G is the generation of the step being
2751 scavenged.
2752
2753 We sometimes temporarily change evac_gen back to zero if we're
2754 scavenging a mutable object where early promotion isn't such a good
2755 idea.
2756 -------------------------------------------------------------------------- */
2757
2758 static void
2759 scavenge(step *stp)
2760 {
2761 StgPtr p, q;
2762 StgInfoTable *info;
2763 bdescr *bd;
2764 nat saved_evac_gen = evac_gen;
2765
2766 p = stp->scan;
2767 bd = stp->scan_bd;
2768
2769 failed_to_evac = rtsFalse;
2770
2771 /* scavenge phase - standard breadth-first scavenging of the
2772 * evacuated objects
2773 */
2774
2775 while (bd != stp->hp_bd || p < stp->hp) {
2776
2777 // If we're at the end of this block, move on to the next block
2778 if (bd != stp->hp_bd && p == bd->free) {
2779 bd = bd->link;
2780 p = bd->start;
2781 continue;
2782 }
2783
2784 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
2785 info = get_itbl((StgClosure *)p);
2786
2787 ASSERT(thunk_selector_depth == 0);
2788
2789 q = p;
2790 switch (info->type) {
2791
2792 case MVAR:
2793 {
2794 StgMVar *mvar = ((StgMVar *)p);
2795 evac_gen = 0;
2796 mvar->head = (StgTSO *)evacuate((StgClosure *)mvar->head);
2797 mvar->tail = (StgTSO *)evacuate((StgClosure *)mvar->tail);
2798 mvar->value = evacuate((StgClosure *)mvar->value);
2799 evac_gen = saved_evac_gen;
2800 failed_to_evac = rtsTrue; // mutable.
2801 p += sizeofW(StgMVar);
2802 break;
2803 }
2804
2805 case FUN_2_0:
2806 scavenge_fun_srt(info);
2807 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
2808 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2809 p += sizeofW(StgHeader) + 2;
2810 break;
2811
2812 case THUNK_2_0:
2813 scavenge_thunk_srt(info);
2814 ((StgThunk *)p)->payload[1] = evacuate(((StgThunk *)p)->payload[1]);
2815 ((StgThunk *)p)->payload[0] = evacuate(((StgThunk *)p)->payload[0]);
2816 p += sizeofW(StgThunk) + 2;
2817 break;
2818
2819 case CONSTR_2_0:
2820 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
2821 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2822 p += sizeofW(StgHeader) + 2;
2823 break;
2824
2825 case THUNK_1_0:
2826 scavenge_thunk_srt(info);
2827 ((StgThunk *)p)->payload[0] = evacuate(((StgThunk *)p)->payload[0]);
2828 p += sizeofW(StgThunk) + 1;
2829 break;
2830
2831 case FUN_1_0:
2832 scavenge_fun_srt(info);
2833 case CONSTR_1_0:
2834 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2835 p += sizeofW(StgHeader) + 1;
2836 break;
2837
2838 case THUNK_0_1:
2839 scavenge_thunk_srt(info);
2840 p += sizeofW(StgThunk) + 1;
2841 break;
2842
2843 case FUN_0_1:
2844 scavenge_fun_srt(info);
2845 case CONSTR_0_1:
2846 p += sizeofW(StgHeader) + 1;
2847 break;
2848
2849 case THUNK_0_2:
2850 scavenge_thunk_srt(info);
2851 p += sizeofW(StgThunk) + 2;
2852 break;
2853
2854 case FUN_0_2:
2855 scavenge_fun_srt(info);
2856 case CONSTR_0_2:
2857 p += sizeofW(StgHeader) + 2;
2858 break;
2859
2860 case THUNK_1_1:
2861 scavenge_thunk_srt(info);
2862 ((StgThunk *)p)->payload[0] = evacuate(((StgThunk *)p)->payload[0]);
2863 p += sizeofW(StgThunk) + 2;
2864 break;
2865
2866 case FUN_1_1:
2867 scavenge_fun_srt(info);
2868 case CONSTR_1_1:
2869 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2870 p += sizeofW(StgHeader) + 2;
2871 break;
2872
2873 case FUN:
2874 scavenge_fun_srt(info);
2875 goto gen_obj;
2876
2877 case THUNK:
2878 {
2879 StgPtr end;
2880
2881 scavenge_thunk_srt(info);
2882 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
2883 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
2884 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
2885 }
2886 p += info->layout.payload.nptrs;
2887 break;
2888 }
2889
2890 gen_obj:
2891 case CONSTR:
2892 case WEAK:
2893 case STABLE_NAME:
2894 {
2895 StgPtr end;
2896
2897 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
2898 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
2899 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
2900 }
2901 p += info->layout.payload.nptrs;
2902 break;
2903 }
2904
2905 case BCO: {
2906 StgBCO *bco = (StgBCO *)p;
2907 bco->instrs = (StgArrWords *)evacuate((StgClosure *)bco->instrs);
2908 bco->literals = (StgArrWords *)evacuate((StgClosure *)bco->literals);
2909 bco->ptrs = (StgMutArrPtrs *)evacuate((StgClosure *)bco->ptrs);
2910 bco->itbls = (StgArrWords *)evacuate((StgClosure *)bco->itbls);
2911 p += bco_sizeW(bco);
2912 break;
2913 }
2914
2915 case IND_PERM:
2916 if (stp->gen->no != 0) {
2917 #ifdef PROFILING
2918 // @LDV profiling
2919 // No need to call LDV_recordDead_FILL_SLOP_DYNAMIC() because an
2920 // IND_OLDGEN_PERM closure is larger than an IND_PERM closure.
2921 LDV_recordDead((StgClosure *)p, sizeofW(StgInd));
2922 #endif
2923 //
2924 // Todo: maybe use SET_HDR() and remove LDV_RECORD_CREATE()?
2925 //
2926 SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info);
2927
2928 // We pretend that p has just been created.
2929 LDV_RECORD_CREATE((StgClosure *)p);
2930 }
2931 // fall through
2932 case IND_OLDGEN_PERM:
2933 ((StgInd *)p)->indirectee = evacuate(((StgInd *)p)->indirectee);
2934 p += sizeofW(StgInd);
2935 break;
2936
2937 case MUT_VAR_CLEAN:
2938 case MUT_VAR_DIRTY: {
2939 rtsBool saved_eager_promotion = eager_promotion;
2940
2941 eager_promotion = rtsFalse;
2942 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2943 eager_promotion = saved_eager_promotion;
2944
2945 if (failed_to_evac) {
2946 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
2947 } else {
2948 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
2949 }
2950 p += sizeofW(StgMutVar);
2951 break;
2952 }
2953
2954 case CAF_BLACKHOLE:
2955 case SE_CAF_BLACKHOLE:
2956 case SE_BLACKHOLE:
2957 case BLACKHOLE:
2958 p += BLACKHOLE_sizeW();
2959 break;
2960
2961 case THUNK_SELECTOR:
2962 {
2963 StgSelector *s = (StgSelector *)p;
2964 s->selectee = evacuate(s->selectee);
2965 p += THUNK_SELECTOR_sizeW();
2966 break;
2967 }
2968
2969 // A chunk of stack saved in a heap object
2970 case AP_STACK:
2971 {
2972 StgAP_STACK *ap = (StgAP_STACK *)p;
2973
2974 ap->fun = evacuate(ap->fun);
2975 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
2976 p = (StgPtr)ap->payload + ap->size;
2977 break;
2978 }
2979
2980 case PAP:
2981 p = scavenge_PAP((StgPAP *)p);
2982 break;
2983
2984 case AP:
2985 p = scavenge_AP((StgAP *)p);
2986 break;
2987
2988 case ARR_WORDS:
2989 // nothing to follow
2990 p += arr_words_sizeW((StgArrWords *)p);
2991 break;
2992
2993 case MUT_ARR_PTRS_CLEAN:
2994 case MUT_ARR_PTRS_DIRTY:
2995 // follow everything
2996 {
2997 StgPtr next;
2998 rtsBool saved_eager;
2999
3000 // We don't eagerly promote objects pointed to by a mutable
3001 // array, but if we find the array only points to objects in
3002 // the same or an older generation, we mark it "clean" and
3003 // avoid traversing it during minor GCs.
3004 saved_eager = eager_promotion;
3005 eager_promotion = rtsFalse;
3006 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
3007 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
3008 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
3009 }
3010 eager_promotion = saved_eager;
3011
3012 if (failed_to_evac) {
3013 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
3014 } else {
3015 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
3016 }
3017
3018 failed_to_evac = rtsTrue; // always put it on the mutable list.
3019 break;
3020 }
3021
3022 case MUT_ARR_PTRS_FROZEN:
3023 case MUT_ARR_PTRS_FROZEN0:
3024 // follow everything
3025 {
3026 StgPtr next;
3027
3028 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
3029 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
3030 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
3031 }
3032
3033 // If we're going to put this object on the mutable list, then
3034 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
3035 if (failed_to_evac) {
3036 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
3037 } else {
3038 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
3039 }
3040 break;
3041 }
3042
3043 case TSO:
3044 {
3045 StgTSO *tso = (StgTSO *)p;
3046 rtsBool saved_eager = eager_promotion;
3047
3048 eager_promotion = rtsFalse;
3049 scavengeTSO(tso);
3050 eager_promotion = saved_eager;
3051
3052 if (failed_to_evac) {
3053 tso->flags |= TSO_DIRTY;
3054 } else {
3055 tso->flags &= ~TSO_DIRTY;
3056 }
3057
3058 failed_to_evac = rtsTrue; // always on the mutable list
3059 p += tso_sizeW(tso);
3060 break;
3061 }
3062
3063 #if defined(PAR)
3064 case RBH:
3065 {
3066 #if 0
3067 nat size, ptrs, nonptrs, vhs;
3068 char str[80];
3069 StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
3070 #endif
3071 StgRBH *rbh = (StgRBH *)p;
3072 (StgClosure *)rbh->blocking_queue =
3073 evacuate((StgClosure *)rbh->blocking_queue);
3074 failed_to_evac = rtsTrue; // mutable anyhow.
3075 IF_DEBUG(gc,
3076 debugBelch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
3077 p, info_type(p), (StgClosure *)rbh->blocking_queue));
3078 // ToDo: use size of reverted closure here!
3079 p += BLACKHOLE_sizeW();
3080 break;
3081 }
3082
3083 case BLOCKED_FETCH:
3084 {
3085 StgBlockedFetch *bf = (StgBlockedFetch *)p;
3086 // follow the pointer to the node which is being demanded
3087 (StgClosure *)bf->node =
3088 evacuate((StgClosure *)bf->node);
3089 // follow the link to the rest of the blocking queue
3090 (StgClosure *)bf->link =
3091 evacuate((StgClosure *)bf->link);
3092 IF_DEBUG(gc,
3093 debugBelch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
3094 bf, info_type((StgClosure *)bf),
3095 bf->node, info_type(bf->node)));
3096 p += sizeofW(StgBlockedFetch);
3097 break;
3098 }
3099
3100 #ifdef DIST
3101 case REMOTE_REF:
3102 #endif
3103 case FETCH_ME:
3104 p += sizeofW(StgFetchMe);
3105 break; // nothing to do in this case
3106
3107 case FETCH_ME_BQ:
3108 {
3109 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
3110 (StgClosure *)fmbq->blocking_queue =
3111 evacuate((StgClosure *)fmbq->blocking_queue);
3112 IF_DEBUG(gc,
3113 debugBelch("@@ scavenge: %p (%s) exciting, isn't it",
3114 p, info_type((StgClosure *)p)));
3115 p += sizeofW(StgFetchMeBlockingQueue);
3116 break;
3117 }
3118 #endif
3119
3120 case TVAR_WAIT_QUEUE:
3121 {
3122 StgTVarWaitQueue *wq = ((StgTVarWaitQueue *) p);
3123 evac_gen = 0;
3124 wq->waiting_tso = (StgTSO *)evacuate((StgClosure*)wq->waiting_tso);
3125 wq->next_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)wq->next_queue_entry);
3126 wq->prev_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)wq->prev_queue_entry);
3127 evac_gen = saved_evac_gen;
3128 failed_to_evac = rtsTrue; // mutable
3129 p += sizeofW(StgTVarWaitQueue);
3130 break;
3131 }
3132
3133 case TVAR:
3134 {
3135 StgTVar *tvar = ((StgTVar *) p);
3136 evac_gen = 0;
3137 tvar->current_value = evacuate((StgClosure*)tvar->current_value);
3138 tvar->first_wait_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)tvar->first_wait_queue_entry);
3139 evac_gen = saved_evac_gen;
3140 failed_to_evac = rtsTrue; // mutable
3141 p += sizeofW(StgTVar);
3142 break;
3143 }
3144
3145 case TREC_HEADER:
3146 {
3147 StgTRecHeader *trec = ((StgTRecHeader *) p);
3148 evac_gen = 0;
3149 trec->enclosing_trec = (StgTRecHeader *)evacuate((StgClosure*)trec->enclosing_trec);
3150 trec->current_chunk = (StgTRecChunk *)evacuate((StgClosure*)trec->current_chunk);
3151 evac_gen = saved_evac_gen;
3152 failed_to_evac = rtsTrue; // mutable
3153 p += sizeofW(StgTRecHeader);
3154 break;
3155 }
3156
3157 case TREC_CHUNK:
3158 {
3159 StgWord i;
3160 StgTRecChunk *tc = ((StgTRecChunk *) p);
3161 TRecEntry *e = &(tc -> entries[0]);
3162 evac_gen = 0;
3163 tc->prev_chunk = (StgTRecChunk *)evacuate((StgClosure*)tc->prev_chunk);
3164 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
3165 e->tvar = (StgTVar *)evacuate((StgClosure*)e->tvar);
3166 e->expected_value = evacuate((StgClosure*)e->expected_value);
3167 e->new_value = evacuate((StgClosure*)e->new_value);
3168 }
3169 evac_gen = saved_evac_gen;
3170 failed_to_evac = rtsTrue; // mutable
3171 p += sizeofW(StgTRecChunk);
3172 break;
3173 }
3174
3175 default:
3176 barf("scavenge: unimplemented/strange closure type %d @ %p",
3177 info->type, p);
3178 }
3179
3180 /*
3181 * We need to record the current object on the mutable list if
3182 * (a) It is actually mutable, or
3183 * (b) It contains pointers to a younger generation.
3184 * Case (b) arises if we didn't manage to promote everything that
3185 * the current object points to into the current generation.
3186 */
3187 if (failed_to_evac) {
3188 failed_to_evac = rtsFalse;
3189 if (stp->gen_no > 0) {
3190 recordMutableGen((StgClosure *)q, stp->gen);
3191 }
3192 }
3193 }
3194
3195 stp->scan_bd = bd;
3196 stp->scan = p;
3197 }
3198
3199 /* -----------------------------------------------------------------------------
3200 Scavenge everything on the mark stack.
3201
3202 This is slightly different from scavenge():
3203 - we don't walk linearly through the objects, so the scavenger
3204 doesn't need to advance the pointer on to the next object.
3205 -------------------------------------------------------------------------- */
3206
3207 static void
3208 scavenge_mark_stack(void)
3209 {
3210 StgPtr p, q;
3211 StgInfoTable *info;
3212 nat saved_evac_gen;
3213
3214 evac_gen = oldest_gen->no;
3215 saved_evac_gen = evac_gen;
3216
3217 linear_scan:
3218 while (!mark_stack_empty()) {
3219 p = pop_mark_stack();
3220
3221 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
3222 info = get_itbl((StgClosure *)p);
3223
3224 q = p;
3225 switch (info->type) {
3226
3227 case MVAR:
3228 {
3229 StgMVar *mvar = ((StgMVar *)p);
3230 evac_gen = 0;
3231 mvar->head = (StgTSO *)evacuate((StgClosure *)mvar->head);
3232 mvar->tail = (StgTSO *)evacuate((StgClosure *)mvar->tail);
3233 mvar->value = evacuate((StgClosure *)mvar->value);
3234 evac_gen = saved_evac_gen;
3235 failed_to_evac = rtsTrue; // mutable.
3236 break;
3237 }
3238
3239 case FUN_2_0:
3240 scavenge_fun_srt(info);
3241 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
3242 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
3243 break;
3244
3245 case THUNK_2_0:
3246 scavenge_thunk_srt(info);
3247 ((StgThunk *)p)->payload[1] = evacuate(((StgThunk *)p)->payload[1]);
3248 ((StgThunk *)p)->payload[0] = evacuate(((StgThunk *)p)->payload[0]);
3249 break;
3250
3251 case CONSTR_2_0:
3252 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
3253 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
3254 break;
3255
3256 case FUN_1_0:
3257 case FUN_1_1:
3258 scavenge_fun_srt(info);
3259 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
3260 break;
3261
3262 case THUNK_1_0:
3263 case THUNK_1_1:
3264 scavenge_thunk_srt(info);
3265 ((StgThunk *)p)->payload[0] = evacuate(((StgThunk *)p)->payload[0]);
3266 break;
3267
3268 case CONSTR_1_0:
3269 case CONSTR_1_1:
3270 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
3271 break;
3272
3273 case FUN_0_1:
3274 case FUN_0_2:
3275 scavenge_fun_srt(info);
3276 break;
3277
3278 case THUNK_0_1:
3279 case THUNK_0_2:
3280 scavenge_thunk_srt(info);
3281 break;
3282
3283 case CONSTR_0_1:
3284 case CONSTR_0_2:
3285 break;
3286
3287 case FUN:
3288 scavenge_fun_srt(info);
3289 goto gen_obj;
3290
3291 case THUNK:
3292 {
3293 StgPtr end;
3294
3295 scavenge_thunk_srt(info);
3296 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
3297 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
3298 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
3299 }
3300 break;
3301 }
3302
3303 gen_obj:
3304 case CONSTR:
3305 case WEAK:
3306 case STABLE_NAME:
3307 {
3308 StgPtr end;
3309
3310 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
3311 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
3312 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
3313 }
3314 break;
3315 }
3316
3317 case BCO: {
3318 StgBCO *bco = (StgBCO *)p;
3319 bco->instrs = (StgArrWords *)evacuate((StgClosure *)bco->instrs);
3320 bco->literals = (StgArrWords *)evacuate((StgClosure *)bco->literals);
3321 bco->ptrs = (StgMutArrPtrs *)evacuate((StgClosure *)bco->ptrs);
3322 bco->itbls = (StgArrWords *)evacuate((StgClosure *)bco->itbls);
3323 break;
3324 }
3325
3326 case IND_PERM:
3327 // don't need to do anything here: the only possible case
3328 // is that we're in a 1-space compacting collector, with
3329 // no "old" generation.
3330 break;
3331
3332 case IND_OLDGEN:
3333 case IND_OLDGEN_PERM:
3334 ((StgInd *)p)->indirectee =
3335 evacuate(((StgInd *)p)->indirectee);
3336 break;
3337
3338 case MUT_VAR_CLEAN:
3339 case MUT_VAR_DIRTY: {
3340 rtsBool saved_eager_promotion = eager_promotion;
3341
3342 eager_promotion = rtsFalse;
3343 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
3344 eager_promotion = saved_eager_promotion;
3345
3346 if (failed_to_evac) {
3347 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
3348 } else {
3349 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
3350 }
3351 break;
3352 }
3353
3354 case CAF_BLACKHOLE:
3355 case SE_CAF_BLACKHOLE:
3356 case SE_BLACKHOLE:
3357 case BLACKHOLE:
3358 case ARR_WORDS:
3359 break;
3360
3361 case THUNK_SELECTOR:
3362 {
3363 StgSelector *s = (StgSelector *)p;
3364 s->selectee = evacuate(s->selectee);
3365 break;
3366 }
3367
3368 // A chunk of stack saved in a heap object
3369 case AP_STACK:
3370 {
3371 StgAP_STACK *ap = (StgAP_STACK *)p;
3372
3373 ap->fun = evacuate(ap->fun);
3374 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
3375 break;
3376 }
3377
3378 case PAP:
3379 scavenge_PAP((StgPAP *)p);
3380 break;
3381
3382 case AP:
3383 scavenge_AP((StgAP *)p);
3384 break;
3385
3386 case MUT_ARR_PTRS_CLEAN:
3387 case MUT_ARR_PTRS_DIRTY:
3388 // follow everything
3389 {
3390 StgPtr next;
3391 rtsBool saved_eager;
3392
3393 // We don't eagerly promote objects pointed to by a mutable
3394 // array, but if we find the array only points to objects in
3395 // the same or an older generation, we mark it "clean" and
3396 // avoid traversing it during minor GCs.
3397 saved_eager = eager_promotion;
3398 eager_promotion = rtsFalse;
3399 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
3400 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
3401 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
3402 }
3403 eager_promotion = saved_eager;
3404
3405 if (failed_to_evac) {
3406 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
3407 } else {
3408 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
3409 }
3410
3411 failed_to_evac = rtsTrue; // mutable anyhow.
3412 break;
3413 }
3414
3415 case MUT_ARR_PTRS_FROZEN:
3416 case MUT_ARR_PTRS_FROZEN0:
3417 // follow everything
3418 {
3419 StgPtr next, q = p;
3420
3421 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
3422 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
3423 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
3424 }
3425
3426 // If we're going to put this object on the mutable list, then
3427 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
3428 if (failed_to_evac) {
3429 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
3430 } else {
3431 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
3432 }
3433 break;
3434 }
3435
3436 case TSO:
3437 {
3438 StgTSO *tso = (StgTSO *)p;
3439 rtsBool saved_eager = eager_promotion;
3440
3441 eager_promotion = rtsFalse;
3442 scavengeTSO(tso);
3443 eager_promotion = saved_eager;
3444
3445 if (failed_to_evac) {
3446 tso->flags |= TSO_DIRTY;
3447 } else {
3448 tso->flags &= ~TSO_DIRTY;
3449 }
3450
3451 failed_to_evac = rtsTrue; // always on the mutable list
3452 break;
3453 }
3454
3455 #if defined(PAR)
3456 case RBH:
3457 {
3458 #if 0
3459 nat size, ptrs, nonptrs, vhs;
3460 char str[80];
3461 StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
3462 #endif
3463 StgRBH *rbh = (StgRBH *)p;
3464 bh->blocking_queue =
3465 (StgTSO *)evacuate((StgClosure *)bh->blocking_queue);
3466 failed_to_evac = rtsTrue; // mutable anyhow.
3467 IF_DEBUG(gc,
3468 debugBelch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
3469 p, info_type(p), (StgClosure *)rbh->blocking_queue));
3470 break;
3471 }
3472
3473 case BLOCKED_FETCH:
3474 {
3475 StgBlockedFetch *bf = (StgBlockedFetch *)p;
3476 // follow the pointer to the node which is being demanded
3477 (StgClosure *)bf->node =
3478 evacuate((StgClosure *)bf->node);
3479 // follow the link to the rest of the blocking queue
3480 (StgClosure *)bf->link =
3481 evacuate((StgClosure *)bf->link);
3482 IF_DEBUG(gc,
3483 debugBelch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
3484 bf, info_type((StgClosure *)bf),
3485 bf->node, info_type(bf->node)));
3486 break;
3487 }
3488
3489 #ifdef DIST
3490 case REMOTE_REF:
3491 #endif
3492 case FETCH_ME:
3493 break; // nothing to do in this case
3494
3495 case FETCH_ME_BQ:
3496 {
3497 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
3498 (StgClosure *)fmbq->blocking_queue =
3499 evacuate((StgClosure *)fmbq->blocking_queue);
3500 IF_DEBUG(gc,
3501 debugBelch("@@ scavenge: %p (%s) exciting, isn't it",
3502 p, info_type((StgClosure *)p)));
3503 break;
3504 }
3505 #endif /* PAR */
3506
3507 case TVAR_WAIT_QUEUE:
3508 {
3509 StgTVarWaitQueue *wq = ((StgTVarWaitQueue *) p);
3510 evac_gen = 0;
3511 wq->waiting_tso = (StgTSO *)evacuate((StgClosure*)wq->waiting_tso);
3512 wq->next_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)wq->next_queue_entry);
3513 wq->prev_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)wq->prev_queue_entry);
3514 evac_gen = saved_evac_gen;
3515 failed_to_evac = rtsTrue; // mutable
3516 break;
3517 }
3518
3519 case TVAR:
3520 {
3521 StgTVar *tvar = ((StgTVar *) p);
3522 evac_gen = 0;
3523 tvar->current_value = evacuate((StgClosure*)tvar->current_value);
3524 tvar->first_wait_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)tvar->first_wait_queue_entry);
3525 evac_gen = saved_evac_gen;
3526 failed_to_evac = rtsTrue; // mutable
3527 break;
3528 }
3529
3530 case TREC_CHUNK:
3531 {
3532 StgWord i;
3533 StgTRecChunk *tc = ((StgTRecChunk *) p);
3534 TRecEntry *e = &(tc -> entries[0]);
3535 evac_gen = 0;
3536 tc->prev_chunk = (StgTRecChunk *)evacuate((StgClosure*)tc->prev_chunk);
3537 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
3538 e->tvar = (StgTVar *)evacuate((StgClosure*)e->tvar);
3539 e->expected_value = evacuate((StgClosure*)e->expected_value);
3540 e->new_value = evacuate((StgClosure*)e->new_value);
3541 }
3542 evac_gen = saved_evac_gen;
3543 failed_to_evac = rtsTrue; // mutable
3544 break;
3545 }
3546
3547 case TREC_HEADER:
3548 {
3549 StgTRecHeader *trec = ((StgTRecHeader *) p);
3550 evac_gen = 0;
3551 trec->enclosing_trec = (StgTRecHeader *)evacuate((StgClosure*)trec->enclosing_trec);
3552 trec->current_chunk = (StgTRecChunk *)evacuate((StgClosure*)trec->current_chunk);
3553 evac_gen = saved_evac_gen;
3554 failed_to_evac = rtsTrue; // mutable
3555 break;
3556 }
3557
3558 default:
3559 barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p",
3560 info->type, p);
3561 }
3562
3563 if (failed_to_evac) {
3564 failed_to_evac = rtsFalse;
3565 if (evac_gen > 0) {
3566 recordMutableGen((StgClosure *)q, &generations[evac_gen]);
3567 }
3568 }
3569
3570 // mark the next bit to indicate "scavenged"
3571 mark(q+1, Bdescr(q));
3572
3573 } // while (!mark_stack_empty())
3574
3575 // start a new linear scan if the mark stack overflowed at some point
3576 if (mark_stack_overflowed && oldgen_scan_bd == NULL) {
3577 IF_DEBUG(gc, debugBelch("scavenge_mark_stack: starting linear scan"));
3578 mark_stack_overflowed = rtsFalse;
3579 oldgen_scan_bd = oldest_gen->steps[0].old_blocks;
3580 oldgen_scan = oldgen_scan_bd->start;
3581 }
3582
3583 if (oldgen_scan_bd) {
3584 // push a new thing on the mark stack
3585 loop:
3586 // find a closure that is marked but not scavenged, and start
3587 // from there.
3588 while (oldgen_scan < oldgen_scan_bd->free
3589 && !is_marked(oldgen_scan,oldgen_scan_bd)) {
3590 oldgen_scan++;
3591 }
3592
3593 if (oldgen_scan < oldgen_scan_bd->free) {
3594
3595 // already scavenged?
3596 if (is_marked(oldgen_scan+1,oldgen_scan_bd)) {
3597 oldgen_scan += sizeofW(StgHeader) + MIN_PAYLOAD_SIZE;
3598 goto loop;
3599 }
3600 push_mark_stack(oldgen_scan);
3601 // ToDo: bump the linear scan by the actual size of the object
3602 oldgen_scan += sizeofW(StgHeader) + MIN_PAYLOAD_SIZE;
3603 goto linear_scan;
3604 }
3605
3606 oldgen_scan_bd = oldgen_scan_bd->link;
3607 if (oldgen_scan_bd != NULL) {
3608 oldgen_scan = oldgen_scan_bd->start;
3609 goto loop;
3610 }
3611 }
3612 }
3613
3614 /* -----------------------------------------------------------------------------
3615 Scavenge one object.
3616
3617 This is used for objects that are temporarily marked as mutable
3618 because they contain old-to-new generation pointers. Only certain
3619 objects can have this property.
3620 -------------------------------------------------------------------------- */
3621
3622 static rtsBool
3623 scavenge_one(StgPtr p)
3624 {
3625 const StgInfoTable *info;
3626 nat saved_evac_gen = evac_gen;
3627 rtsBool no_luck;
3628
3629 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
3630 info = get_itbl((StgClosure *)p);
3631
3632 switch (info->type) {
3633
3634 case MVAR:
3635 {
3636 StgMVar *mvar = ((StgMVar *)p);
3637 evac_gen = 0;
3638 mvar->head = (StgTSO *)evacuate((StgClosure *)mvar->head);
3639 mvar->tail = (StgTSO *)evacuate((StgClosure *)mvar->tail);
3640 mvar->value = evacuate((StgClosure *)mvar->value);
3641 evac_gen = saved_evac_gen;
3642 failed_to_evac = rtsTrue; // mutable.
3643 break;
3644 }
3645
3646 case THUNK:
3647 case THUNK_1_0:
3648 case THUNK_0_1:
3649 case THUNK_1_1:
3650 case THUNK_0_2:
3651 case THUNK_2_0:
3652 {
3653 StgPtr q, end;
3654
3655 end = (StgPtr)((StgThunk *)p)->payload + info->layout.payload.ptrs;
3656 for (q = (StgPtr)((StgThunk *)p)->payload; q < end; q++) {
3657 *q = (StgWord)(StgPtr)evacuate((StgClosure *)*q);
3658 }
3659 break;
3660 }
3661
3662 case FUN:
3663 case FUN_1_0: // hardly worth specialising these guys
3664 case FUN_0_1:
3665 case FUN_1_1:
3666 case FUN_0_2:
3667 case FUN_2_0:
3668 case CONSTR:
3669 case CONSTR_1_0:
3670 case CONSTR_0_1:
3671 case CONSTR_1_1:
3672 case CONSTR_0_2:
3673 case CONSTR_2_0:
3674 case WEAK:
3675 case IND_PERM:
3676 {
3677 StgPtr q, end;
3678
3679 end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs;
3680 for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) {
3681 *q = (StgWord)(StgPtr)evacuate((StgClosure *)*q);
3682 }
3683 break;
3684 }
3685
3686 case MUT_VAR_CLEAN:
3687 case MUT_VAR_DIRTY: {
3688 StgPtr q = p;
3689 rtsBool saved_eager_promotion = eager_promotion;
3690
3691 eager_promotion = rtsFalse;
3692 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
3693 eager_promotion = saved_eager_promotion;
3694
3695 if (failed_to_evac) {
3696 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
3697 } else {
3698 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
3699 }
3700 break;
3701 }
3702
3703 case CAF_BLACKHOLE:
3704 case SE_CAF_BLACKHOLE:
3705 case SE_BLACKHOLE:
3706 case BLACKHOLE:
3707 break;
3708
3709 case THUNK_SELECTOR:
3710 {
3711 StgSelector *s = (StgSelector *)p;
3712 s->selectee = evacuate(s->selectee);
3713 break;
3714 }
3715
3716 case AP_STACK:
3717 {
3718 StgAP_STACK *ap = (StgAP_STACK *)p;
3719
3720 ap->fun = evacuate(ap->fun);
3721 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
3722 p = (StgPtr)ap->payload + ap->size;
3723 break;
3724 }
3725
3726 case PAP:
3727 p = scavenge_PAP((StgPAP *)p);
3728 break;
3729
3730 case AP:
3731 p = scavenge_AP((StgAP *)p);
3732 break;
3733
3734 case ARR_WORDS:
3735 // nothing to follow
3736 break;
3737
3738 case MUT_ARR_PTRS_CLEAN:
3739 case MUT_ARR_PTRS_DIRTY:
3740 {
3741 StgPtr next, q;
3742 rtsBool saved_eager;
3743
3744 // We don't eagerly promote objects pointed to by a mutable
3745 // array, but if we find the array only points to objects in
3746 // the same or an older generation, we mark it "clean" and
3747 // avoid traversing it during minor GCs.
3748 saved_eager = eager_promotion;
3749 eager_promotion = rtsFalse;
3750 q = p;
3751 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
3752 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
3753 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
3754 }
3755 eager_promotion = saved_eager;
3756
3757 if (failed_to_evac) {
3758 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
3759 } else {
3760 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
3761 }
3762
3763 failed_to_evac = rtsTrue;
3764 break;
3765 }
3766
3767 case MUT_ARR_PTRS_FROZEN:
3768 case MUT_ARR_PTRS_FROZEN0:
3769 {
3770 // follow everything
3771 StgPtr next, q=p;
3772
3773 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
3774 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
3775 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
3776 }
3777
3778 // If we're going to put this object on the mutable list, then
3779 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
3780 if (failed_to_evac) {
3781 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
3782 } else {
3783 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
3784 }
3785 break;
3786 }
3787
3788 case TSO:
3789 {
3790 StgTSO *tso = (StgTSO *)p;
3791 rtsBool saved_eager = eager_promotion;
3792
3793 eager_promotion = rtsFalse;
3794 scavengeTSO(tso);
3795 eager_promotion = saved_eager;
3796
3797 if (failed_to_evac) {
3798 tso->flags |= TSO_DIRTY;
3799 } else {
3800 tso->flags &= ~TSO_DIRTY;
3801 }
3802
3803 failed_to_evac = rtsTrue; // always on the mutable list
3804 break;
3805 }
3806
3807 #if defined(PAR)
3808 case RBH:
3809 {
3810 #if 0
3811 nat size, ptrs, nonptrs, vhs;
3812 char str[80];
3813 StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
3814 #endif
3815 StgRBH *rbh = (StgRBH *)p;
3816 (StgClosure *)rbh->blocking_queue =
3817 evacuate((StgClosure *)rbh->blocking_queue);
3818 failed_to_evac = rtsTrue; // mutable anyhow.
3819 IF_DEBUG(gc,
3820 debugBelch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
3821 p, info_type(p), (StgClosure *)rbh->blocking_queue));
3822 // ToDo: use size of reverted closure here!
3823 break;
3824 }
3825
3826 case BLOCKED_FETCH:
3827 {
3828 StgBlockedFetch *bf = (StgBlockedFetch *)p;
3829 // follow the pointer to the node which is being demanded
3830 (StgClosure *)bf->node =
3831 evacuate((StgClosure *)bf->node);
3832 // follow the link to the rest of the blocking queue
3833 (StgClosure *)bf->link =
3834 evacuate((StgClosure *)bf->link);
3835 IF_DEBUG(gc,
3836 debugBelch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
3837 bf, info_type((StgClosure *)bf),
3838 bf->node, info_type(bf->node)));
3839 break;
3840 }
3841
3842 #ifdef DIST
3843 case REMOTE_REF:
3844 #endif
3845 case FETCH_ME:
3846 break; // nothing to do in this case
3847
3848 case FETCH_ME_BQ:
3849 {
3850 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
3851 (StgClosure *)fmbq->blocking_queue =
3852 evacuate((StgClosure *)fmbq->blocking_queue);
3853 IF_DEBUG(gc,
3854 debugBelch("@@ scavenge: %p (%s) exciting, isn't it",
3855 p, info_type((StgClosure *)p)));
3856 break;
3857 }
3858 #endif
3859
3860 case TVAR_WAIT_QUEUE:
3861 {
3862 StgTVarWaitQueue *wq = ((StgTVarWaitQueue *) p);
3863 evac_gen = 0;
3864 wq->waiting_tso = (StgTSO *)evacuate((StgClosure*)wq->waiting_tso);
3865 wq->next_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)wq->next_queue_entry);
3866 wq->prev_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)wq->prev_queue_entry);
3867 evac_gen = saved_evac_gen;
3868 failed_to_evac = rtsTrue; // mutable
3869 break;
3870 }
3871
3872 case TVAR:
3873 {
3874 StgTVar *tvar = ((StgTVar *) p);
3875 evac_gen = 0;
3876 tvar->current_value = evacuate((StgClosure*)tvar->current_value);
3877 tvar->first_wait_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)tvar->first_wait_queue_entry);
3878 evac_gen = saved_evac_gen;
3879 failed_to_evac = rtsTrue; // mutable
3880 break;
3881 }
3882
3883 case TREC_HEADER:
3884 {
3885 StgTRecHeader *trec = ((StgTRecHeader *) p);
3886 evac_gen = 0;
3887 trec->enclosing_trec = (StgTRecHeader *)evacuate((StgClosure*)trec->enclosing_trec);
3888 trec->current_chunk = (StgTRecChunk *)evacuate((StgClosure*)trec->current_chunk);
3889 evac_gen = saved_evac_gen;
3890 failed_to_evac = rtsTrue; // mutable
3891 break;
3892 }
3893
3894 case TREC_CHUNK:
3895 {
3896 StgWord i;
3897 StgTRecChunk *tc = ((StgTRecChunk *) p);
3898 TRecEntry *e = &(tc -> entries[0]);
3899 evac_gen = 0;
3900 tc->prev_chunk = (StgTRecChunk *)evacuate((StgClosure*)tc->prev_chunk);
3901 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
3902 e->tvar = (StgTVar *)evacuate((StgClosure*)e->tvar);
3903 e->expected_value = evacuate((StgClosure*)e->expected_value);
3904 e->new_value = evacuate((StgClosure*)e->new_value);
3905 }
3906 evac_gen = saved_evac_gen;
3907 failed_to_evac = rtsTrue; // mutable
3908 break;
3909 }
3910
3911 case IND_OLDGEN:
3912 case IND_OLDGEN_PERM:
3913 case IND_STATIC:
3914 {
3915 /* Careful here: a THUNK can be on the mutable list because
3916 * it contains pointers to young gen objects. If such a thunk
3917 * is updated, the IND_OLDGEN will be added to the mutable
3918 * list again, and we'll scavenge it twice. evacuate()
3919 * doesn't check whether the object has already been
3920 * evacuated, so we perform that check here.
3921 */
3922 StgClosure *q = ((StgInd *)p)->indirectee;
3923 if (HEAP_ALLOCED(q) && Bdescr((StgPtr)q)->flags & BF_EVACUATED) {
3924 break;
3925 }
3926 ((StgInd *)p)->indirectee = evacuate(q);
3927 }
3928
3929 #if 0 && defined(DEBUG)
3930 if (RtsFlags.DebugFlags.gc)
3931 /* Debugging code to print out the size of the thing we just
3932 * promoted
3933 */
3934 {
3935 StgPtr start = gen->steps[0].scan;
3936 bdescr *start_bd = gen->steps[0].scan_bd;
3937 nat size = 0;
3938 scavenge(&gen->steps[0]);
3939 if (start_bd != gen->steps[0].scan_bd) {
3940 size += (P_)BLOCK_ROUND_UP(start) - start;
3941 start_bd = start_bd->link;
3942 while (start_bd != gen->steps[0].scan_bd) {
3943 size += BLOCK_SIZE_W;
3944 start_bd = start_bd->link;
3945 }
3946 size += gen->steps[0].scan -
3947 (P_)BLOCK_ROUND_DOWN(gen->steps[0].scan);
3948 } else {
3949 size = gen->steps[0].scan - start;
3950 }
3951 debugBelch("evac IND_OLDGEN: %ld bytes", size * sizeof(W_));
3952 }
3953 #endif
3954 break;
3955
3956 default:
3957 barf("scavenge_one: strange object %d", (int)(info->type));
3958 }
3959
3960 no_luck = failed_to_evac;
3961 failed_to_evac = rtsFalse;
3962 return (no_luck);
3963 }
3964
3965 /* -----------------------------------------------------------------------------
3966 Scavenging mutable lists.
3967
3968 We treat the mutable list of each generation > N (i.e. all the
3969 generations older than the one being collected) as roots. We also
3970 remove non-mutable objects from the mutable list at this point.
3971 -------------------------------------------------------------------------- */
3972
3973 static void
3974 scavenge_mutable_list(generation *gen)
3975 {
3976 bdescr *bd;
3977 StgPtr p, q;
3978
3979 bd = gen->saved_mut_list;
3980
3981 evac_gen = gen->no;
3982 for (; bd != NULL; bd = bd->link) {
3983 for (q = bd->start; q < bd->free; q++) {
3984 p = (StgPtr)*q;
3985 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
3986
3987 #ifdef DEBUG
3988 switch (get_itbl((StgClosure *)p)->type) {
3989 case MUT_VAR_CLEAN:
3990 barf("MUT_VAR_CLEAN on mutable list");
3991 case MUT_VAR_DIRTY:
3992 mutlist_MUTVARS++; break;
3993 case MUT_ARR_PTRS_CLEAN:
3994 case MUT_ARR_PTRS_DIRTY:
3995 case MUT_ARR_PTRS_FROZEN:
3996 case MUT_ARR_PTRS_FROZEN0:
3997 mutlist_MUTARRS++; break;
3998 default:
3999 mutlist_OTHERS++; break;
4000 }
4001 #endif
4002
4003 // Check whether this object is "clean", that is it
4004 // definitely doesn't point into a young generation.
4005 // Clean objects don't need to be scavenged. Some clean
4006 // objects (MUT_VAR_CLEAN) are not kept on the mutable
4007 // list at all; others, such as MUT_ARR_PTRS_CLEAN and
4008 // TSO, are always on the mutable list.
4009 //
4010 switch (get_itbl((StgClosure *)p)->type) {
4011 case MUT_ARR_PTRS_CLEAN:
4012 recordMutableGen((StgClosure *)p,gen);
4013 continue;
4014 case TSO: {
4015 StgTSO *tso = (StgTSO *)p;
4016 if ((tso->flags & TSO_DIRTY) == 0) {
4017 // A clean TSO: we don't have to traverse its
4018 // stack. However, we *do* follow the link field:
4019 // we don't want to have to mark a TSO dirty just
4020 // because we put it on a different queue.
4021 if (tso->why_blocked != BlockedOnBlackHole) {
4022 tso->link = (StgTSO *)evacuate((StgClosure *)tso->link);
4023 }
4024 recordMutableGen((StgClosure *)p,gen);
4025 continue;
4026 }
4027 }
4028 default:
4029 ;
4030 }
4031
4032 if (scavenge_one(p)) {
4033 // didn't manage to promote everything, so put the
4034 // object back on the list.
4035 recordMutableGen((StgClosure *)p,gen);
4036 }
4037 }
4038 }
4039
4040 // free the old mut_list
4041 freeChain(gen->saved_mut_list);
4042 gen->saved_mut_list = NULL;
4043 }
4044
4045
4046 static void
4047 scavenge_static(void)
4048 {
4049 StgClosure* p = static_objects;
4050 const StgInfoTable *info;
4051
4052 /* Always evacuate straight to the oldest generation for static
4053 * objects */
4054 evac_gen = oldest_gen->no;
4055
4056 /* keep going until we've scavenged all the objects on the linked
4057 list... */
4058 while (p != END_OF_STATIC_LIST) {
4059
4060 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
4061 info = get_itbl(p);
4062 /*
4063 if (info->type==RBH)
4064 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
4065 */
4066 // make sure the info pointer is into text space
4067
4068 /* Take this object *off* the static_objects list,
4069 * and put it on the scavenged_static_objects list.
4070 */
4071 static_objects = *STATIC_LINK(info,p);
4072 *STATIC_LINK(info,p) = scavenged_static_objects;
4073 scavenged_static_objects = p;
4074
4075 switch (info -> type) {
4076
4077 case IND_STATIC:
4078 {
4079 StgInd *ind = (StgInd *)p;
4080 ind->indirectee = evacuate(ind->indirectee);
4081
4082 /* might fail to evacuate it, in which case we have to pop it
4083 * back on the mutable list of the oldest generation. We
4084 * leave it *on* the scavenged_static_objects list, though,
4085 * in case we visit this object again.
4086 */
4087 if (failed_to_evac) {
4088 failed_to_evac = rtsFalse;
4089 recordMutableGen((StgClosure *)p,oldest_gen);
4090 }
4091 break;
4092 }
4093
4094 case THUNK_STATIC:
4095 scavenge_thunk_srt(info);
4096 break;
4097
4098 case FUN_STATIC:
4099 scavenge_fun_srt(info);
4100 break;
4101
4102 case CONSTR_STATIC:
4103 {
4104 StgPtr q, next;
4105
4106 next = (P_)p->payload + info->layout.payload.ptrs;
4107 // evacuate the pointers
4108 for (q = (P_)p->payload; q < next; q++) {
4109 *q = (StgWord)(StgPtr)evacuate((StgClosure *)*q);
4110 }
4111 break;
4112 }
4113