New tracing interface
[ghc.git] / rts / GC.c
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team 1998-2003
4 *
5 * Generational garbage collector
6 *
7 * ---------------------------------------------------------------------------*/
8
9 #include "PosixSource.h"
10 #include "Rts.h"
11 #include "RtsFlags.h"
12 #include "RtsUtils.h"
13 #include "Apply.h"
14 #include "OSThreads.h"
15 #include "Storage.h"
16 #include "LdvProfile.h"
17 #include "Updates.h"
18 #include "Stats.h"
19 #include "Schedule.h"
20 #include "Sanity.h"
21 #include "BlockAlloc.h"
22 #include "MBlock.h"
23 #include "ProfHeap.h"
24 #include "SchedAPI.h"
25 #include "Weak.h"
26 #include "Prelude.h"
27 #include "ParTicky.h" // ToDo: move into Rts.h
28 #include "GCCompact.h"
29 #include "RtsSignals.h"
30 #include "STM.h"
31 #if defined(GRAN) || defined(PAR)
32 # include "GranSimRts.h"
33 # include "ParallelRts.h"
34 # include "FetchMe.h"
35 # if defined(DEBUG)
36 # include "Printer.h"
37 # include "ParallelDebug.h"
38 # endif
39 #endif
40 #include "HsFFI.h"
41 #include "Linker.h"
42 #if defined(RTS_GTK_FRONTPANEL)
43 #include "FrontPanel.h"
44 #endif
45 #include "Trace.h"
46 #include "RetainerProfile.h"
47
48 #include <string.h>
49
50 // Turn off inlining when debugging - it obfuscates things
51 #ifdef DEBUG
52 # undef STATIC_INLINE
53 # define STATIC_INLINE static
54 #endif
55
56 /* STATIC OBJECT LIST.
57 *
58 * During GC:
59 * We maintain a linked list of static objects that are still live.
60 * The requirements for this list are:
61 *
62 * - we need to scan the list while adding to it, in order to
63 * scavenge all the static objects (in the same way that
64 * breadth-first scavenging works for dynamic objects).
65 *
66 * - we need to be able to tell whether an object is already on
67 * the list, to break loops.
68 *
69 * Each static object has a "static link field", which we use for
70 * linking objects on to the list. We use a stack-type list, consing
71 * objects on the front as they are added (this means that the
72 * scavenge phase is depth-first, not breadth-first, but that
73 * shouldn't matter).
74 *
75 * A separate list is kept for objects that have been scavenged
76 * already - this is so that we can zero all the marks afterwards.
77 *
78 * An object is on the list if its static link field is non-zero; this
79 * means that we have to mark the end of the list with '1', not NULL.
80 *
81 * Extra notes for generational GC:
82 *
83 * Each generation has a static object list associated with it. When
84 * collecting generations up to N, we treat the static object lists
85 * from generations > N as roots.
86 *
87 * We build up a static object list while collecting generations 0..N,
88 * which is then appended to the static object list of generation N+1.
89 */
90 static StgClosure* static_objects; // live static objects
91 StgClosure* scavenged_static_objects; // static objects scavenged so far
92
93 /* N is the oldest generation being collected, where the generations
94 * are numbered starting at 0. A major GC (indicated by the major_gc
95 * flag) is when we're collecting all generations. We only attempt to
96 * deal with static objects and GC CAFs when doing a major GC.
97 */
98 static nat N;
99 static rtsBool major_gc;
100
101 /* Youngest generation that objects should be evacuated to in
102 * evacuate(). (Logically an argument to evacuate, but it's static
103 * a lot of the time so we optimise it into a global variable).
104 */
105 static nat evac_gen;
106
107 /* Whether to do eager promotion or not.
108 */
109 static rtsBool eager_promotion;
110
111 /* Weak pointers
112 */
113 StgWeak *old_weak_ptr_list; // also pending finaliser list
114
115 /* Which stage of processing various kinds of weak pointer are we at?
116 * (see traverse_weak_ptr_list() below for discussion).
117 */
118 typedef enum { WeakPtrs, WeakThreads, WeakDone } WeakStage;
119 static WeakStage weak_stage;
120
121 /* List of all threads during GC
122 */
123 static StgTSO *old_all_threads;
124 StgTSO *resurrected_threads;
125
126 /* Flag indicating failure to evacuate an object to the desired
127 * generation.
128 */
129 static rtsBool failed_to_evac;
130
131 /* Saved nursery (used for 2-space collector only)
132 */
133 static bdescr *saved_nursery;
134 static nat saved_n_blocks;
135
136 /* Data used for allocation area sizing.
137 */
138 static lnat new_blocks; // blocks allocated during this GC
139 static lnat new_scavd_blocks; // ditto, but depth-first blocks
140 static lnat g0s0_pcnt_kept = 30; // percentage of g0s0 live at last minor GC
141
142 /* Used to avoid long recursion due to selector thunks
143 */
144 static lnat thunk_selector_depth = 0;
145 #define MAX_THUNK_SELECTOR_DEPTH 8
146
147 /* Mut-list stats */
148 #ifdef DEBUG
149 static nat
150 mutlist_MUTVARS,
151 mutlist_MUTARRS,
152 mutlist_OTHERS;
153 #endif
154
155 /* -----------------------------------------------------------------------------
156 Static function declarations
157 -------------------------------------------------------------------------- */
158
159 static bdescr * gc_alloc_block ( step *stp );
160 static void mark_root ( StgClosure **root );
161
162 // Use a register argument for evacuate, if available.
163 #if __GNUC__ >= 2
164 #define REGPARM1 __attribute__((regparm(1)))
165 #else
166 #define REGPARM1
167 #endif
168
169 REGPARM1 static StgClosure * evacuate (StgClosure *q);
170
171 static void zero_static_object_list ( StgClosure* first_static );
172
173 static rtsBool traverse_weak_ptr_list ( void );
174 static void mark_weak_ptr_list ( StgWeak **list );
175 static rtsBool traverse_blackhole_queue ( void );
176
177 static StgClosure * eval_thunk_selector ( nat field, StgSelector * p );
178
179
180 static void scavenge ( step * );
181 static void scavenge_mark_stack ( void );
182 static void scavenge_stack ( StgPtr p, StgPtr stack_end );
183 static rtsBool scavenge_one ( StgPtr p );
184 static void scavenge_large ( step * );
185 static void scavenge_static ( void );
186 static void scavenge_mutable_list ( generation *g );
187
188 static void scavenge_large_bitmap ( StgPtr p,
189 StgLargeBitmap *large_bitmap,
190 nat size );
191
192 #if 0 && defined(DEBUG)
193 static void gcCAFs ( void );
194 #endif
195
196 /* -----------------------------------------------------------------------------
197 inline functions etc. for dealing with the mark bitmap & stack.
198 -------------------------------------------------------------------------- */
199
200 #define MARK_STACK_BLOCKS 4
201
202 static bdescr *mark_stack_bdescr;
203 static StgPtr *mark_stack;
204 static StgPtr *mark_sp;
205 static StgPtr *mark_splim;
206
207 // Flag and pointers used for falling back to a linear scan when the
208 // mark stack overflows.
209 static rtsBool mark_stack_overflowed;
210 static bdescr *oldgen_scan_bd;
211 static StgPtr oldgen_scan;
212
213 STATIC_INLINE rtsBool
214 mark_stack_empty(void)
215 {
216 return mark_sp == mark_stack;
217 }
218
219 STATIC_INLINE rtsBool
220 mark_stack_full(void)
221 {
222 return mark_sp >= mark_splim;
223 }
224
225 STATIC_INLINE void
226 reset_mark_stack(void)
227 {
228 mark_sp = mark_stack;
229 }
230
231 STATIC_INLINE void
232 push_mark_stack(StgPtr p)
233 {
234 *mark_sp++ = p;
235 }
236
237 STATIC_INLINE StgPtr
238 pop_mark_stack(void)
239 {
240 return *--mark_sp;
241 }
242
243 /* -----------------------------------------------------------------------------
244 Allocate a new to-space block in the given step.
245 -------------------------------------------------------------------------- */
246
247 static bdescr *
248 gc_alloc_block(step *stp)
249 {
250 bdescr *bd = allocBlock();
251 bd->gen_no = stp->gen_no;
252 bd->step = stp;
253 bd->link = NULL;
254
255 // blocks in to-space in generations up to and including N
256 // get the BF_EVACUATED flag.
257 if (stp->gen_no <= N) {
258 bd->flags = BF_EVACUATED;
259 } else {
260 bd->flags = 0;
261 }
262
263 // Start a new to-space block, chain it on after the previous one.
264 if (stp->hp_bd != NULL) {
265 stp->hp_bd->free = stp->hp;
266 stp->hp_bd->link = bd;
267 }
268
269 stp->hp_bd = bd;
270 stp->hp = bd->start;
271 stp->hpLim = stp->hp + BLOCK_SIZE_W;
272
273 stp->n_blocks++;
274 new_blocks++;
275
276 return bd;
277 }
278
279 static bdescr *
280 gc_alloc_scavd_block(step *stp)
281 {
282 bdescr *bd = allocBlock();
283 bd->gen_no = stp->gen_no;
284 bd->step = stp;
285
286 // blocks in to-space in generations up to and including N
287 // get the BF_EVACUATED flag.
288 if (stp->gen_no <= N) {
289 bd->flags = BF_EVACUATED;
290 } else {
291 bd->flags = 0;
292 }
293
294 bd->link = stp->blocks;
295 stp->blocks = bd;
296
297 if (stp->scavd_hp != NULL) {
298 Bdescr(stp->scavd_hp)->free = stp->scavd_hp;
299 }
300 stp->scavd_hp = bd->start;
301 stp->scavd_hpLim = stp->scavd_hp + BLOCK_SIZE_W;
302
303 stp->n_blocks++;
304 new_scavd_blocks++;
305
306 return bd;
307 }
308
309 /* -----------------------------------------------------------------------------
310 GarbageCollect
311
312 Rough outline of the algorithm: for garbage collecting generation N
313 (and all younger generations):
314
315 - follow all pointers in the root set. the root set includes all
316 mutable objects in all generations (mutable_list).
317
318 - for each pointer, evacuate the object it points to into either
319
320 + to-space of the step given by step->to, which is the next
321 highest step in this generation or the first step in the next
322 generation if this is the last step.
323
324 + to-space of generations[evac_gen]->steps[0], if evac_gen != 0.
325 When we evacuate an object we attempt to evacuate
326 everything it points to into the same generation - this is
327 achieved by setting evac_gen to the desired generation. If
328 we can't do this, then an entry in the mut list has to
329 be made for the cross-generation pointer.
330
331 + if the object is already in a generation > N, then leave
332 it alone.
333
334 - repeatedly scavenge to-space from each step in each generation
335 being collected until no more objects can be evacuated.
336
337 - free from-space in each step, and set from-space = to-space.
338
339 Locks held: all capabilities are held throughout GarbageCollect().
340
341 -------------------------------------------------------------------------- */
342
343 void
344 GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc )
345 {
346 bdescr *bd;
347 step *stp;
348 lnat live, allocated, copied = 0, scavd_copied = 0;
349 lnat oldgen_saved_blocks = 0;
350 nat g, s, i;
351
352 ACQUIRE_SM_LOCK;
353
354 #ifdef PROFILING
355 CostCentreStack *prev_CCS;
356 #endif
357
358 debugTrace(DEBUG_gc, "starting GC");
359
360 #if defined(RTS_USER_SIGNALS)
361 // block signals
362 blockUserSignals();
363 #endif
364
365 // tell the STM to discard any cached closures its hoping to re-use
366 stmPreGCHook();
367
368 // tell the stats department that we've started a GC
369 stat_startGC();
370
371 #ifdef DEBUG
372 // check for memory leaks if DEBUG is on
373 memInventory();
374 #endif
375
376 #ifdef DEBUG
377 mutlist_MUTVARS = 0;
378 mutlist_MUTARRS = 0;
379 mutlist_OTHERS = 0;
380 #endif
381
382 // Init stats and print par specific (timing) info
383 PAR_TICKY_PAR_START();
384
385 // attribute any costs to CCS_GC
386 #ifdef PROFILING
387 prev_CCS = CCCS;
388 CCCS = CCS_GC;
389 #endif
390
391 /* Approximate how much we allocated.
392 * Todo: only when generating stats?
393 */
394 allocated = calcAllocated();
395
396 /* Figure out which generation to collect
397 */
398 if (force_major_gc) {
399 N = RtsFlags.GcFlags.generations - 1;
400 major_gc = rtsTrue;
401 } else {
402 N = 0;
403 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
404 if (generations[g].steps[0].n_blocks +
405 generations[g].steps[0].n_large_blocks
406 >= generations[g].max_blocks) {
407 N = g;
408 }
409 }
410 major_gc = (N == RtsFlags.GcFlags.generations-1);
411 }
412
413 #ifdef RTS_GTK_FRONTPANEL
414 if (RtsFlags.GcFlags.frontpanel) {
415 updateFrontPanelBeforeGC(N);
416 }
417 #endif
418
419 // check stack sanity *before* GC (ToDo: check all threads)
420 #if defined(GRAN)
421 // ToDo!: check sanity IF_DEBUG(sanity, checkTSOsSanity());
422 #endif
423 IF_DEBUG(sanity, checkFreeListSanity());
424
425 /* Initialise the static object lists
426 */
427 static_objects = END_OF_STATIC_LIST;
428 scavenged_static_objects = END_OF_STATIC_LIST;
429
430 /* Save the nursery if we're doing a two-space collection.
431 * g0s0->blocks will be used for to-space, so we need to get the
432 * nursery out of the way.
433 */
434 if (RtsFlags.GcFlags.generations == 1) {
435 saved_nursery = g0s0->blocks;
436 saved_n_blocks = g0s0->n_blocks;
437 g0s0->blocks = NULL;
438 g0s0->n_blocks = 0;
439 }
440
441 /* Keep a count of how many new blocks we allocated during this GC
442 * (used for resizing the allocation area, later).
443 */
444 new_blocks = 0;
445 new_scavd_blocks = 0;
446
447 // Initialise to-space in all the generations/steps that we're
448 // collecting.
449 //
450 for (g = 0; g <= N; g++) {
451
452 // throw away the mutable list. Invariant: the mutable list
453 // always has at least one block; this means we can avoid a check for
454 // NULL in recordMutable().
455 if (g != 0) {
456 freeChain(generations[g].mut_list);
457 generations[g].mut_list = allocBlock();
458 for (i = 0; i < n_capabilities; i++) {
459 freeChain(capabilities[i].mut_lists[g]);
460 capabilities[i].mut_lists[g] = allocBlock();
461 }
462 }
463
464 for (s = 0; s < generations[g].n_steps; s++) {
465
466 // generation 0, step 0 doesn't need to-space
467 if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
468 continue;
469 }
470
471 stp = &generations[g].steps[s];
472 ASSERT(stp->gen_no == g);
473
474 // start a new to-space for this step.
475 stp->old_blocks = stp->blocks;
476 stp->n_old_blocks = stp->n_blocks;
477
478 // allocate the first to-space block; extra blocks will be
479 // chained on as necessary.
480 stp->hp_bd = NULL;
481 bd = gc_alloc_block(stp);
482 stp->blocks = bd;
483 stp->n_blocks = 1;
484 stp->scan = bd->start;
485 stp->scan_bd = bd;
486
487 // allocate a block for "already scavenged" objects. This goes
488 // on the front of the stp->blocks list, so it won't be
489 // traversed by the scavenging sweep.
490 gc_alloc_scavd_block(stp);
491
492 // initialise the large object queues.
493 stp->new_large_objects = NULL;
494 stp->scavenged_large_objects = NULL;
495 stp->n_scavenged_large_blocks = 0;
496
497 // mark the large objects as not evacuated yet
498 for (bd = stp->large_objects; bd; bd = bd->link) {
499 bd->flags &= ~BF_EVACUATED;
500 }
501
502 // for a compacted step, we need to allocate the bitmap
503 if (stp->is_compacted) {
504 nat bitmap_size; // in bytes
505 bdescr *bitmap_bdescr;
506 StgWord *bitmap;
507
508 bitmap_size = stp->n_old_blocks * BLOCK_SIZE / (sizeof(W_)*BITS_PER_BYTE);
509
510 if (bitmap_size > 0) {
511 bitmap_bdescr = allocGroup((lnat)BLOCK_ROUND_UP(bitmap_size)
512 / BLOCK_SIZE);
513 stp->bitmap = bitmap_bdescr;
514 bitmap = bitmap_bdescr->start;
515
516 debugTrace(DEBUG_gc, "bitmap_size: %d, bitmap: %p",
517 bitmap_size, bitmap);
518
519 // don't forget to fill it with zeros!
520 memset(bitmap, 0, bitmap_size);
521
522 // For each block in this step, point to its bitmap from the
523 // block descriptor.
524 for (bd=stp->old_blocks; bd != NULL; bd = bd->link) {
525 bd->u.bitmap = bitmap;
526 bitmap += BLOCK_SIZE_W / (sizeof(W_)*BITS_PER_BYTE);
527
528 // Also at this point we set the BF_COMPACTED flag
529 // for this block. The invariant is that
530 // BF_COMPACTED is always unset, except during GC
531 // when it is set on those blocks which will be
532 // compacted.
533 bd->flags |= BF_COMPACTED;
534 }
535 }
536 }
537 }
538 }
539
540 /* make sure the older generations have at least one block to
541 * allocate into (this makes things easier for copy(), see below).
542 */
543 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
544 for (s = 0; s < generations[g].n_steps; s++) {
545 stp = &generations[g].steps[s];
546 if (stp->hp_bd == NULL) {
547 ASSERT(stp->blocks == NULL);
548 bd = gc_alloc_block(stp);
549 stp->blocks = bd;
550 stp->n_blocks = 1;
551 }
552 if (stp->scavd_hp == NULL) {
553 gc_alloc_scavd_block(stp);
554 stp->n_blocks++;
555 }
556 /* Set the scan pointer for older generations: remember we
557 * still have to scavenge objects that have been promoted. */
558 stp->scan = stp->hp;
559 stp->scan_bd = stp->hp_bd;
560 stp->new_large_objects = NULL;
561 stp->scavenged_large_objects = NULL;
562 stp->n_scavenged_large_blocks = 0;
563 }
564
565 /* Move the private mutable lists from each capability onto the
566 * main mutable list for the generation.
567 */
568 for (i = 0; i < n_capabilities; i++) {
569 for (bd = capabilities[i].mut_lists[g];
570 bd->link != NULL; bd = bd->link) {
571 /* nothing */
572 }
573 bd->link = generations[g].mut_list;
574 generations[g].mut_list = capabilities[i].mut_lists[g];
575 capabilities[i].mut_lists[g] = allocBlock();
576 }
577 }
578
579 /* Allocate a mark stack if we're doing a major collection.
580 */
581 if (major_gc) {
582 mark_stack_bdescr = allocGroup(MARK_STACK_BLOCKS);
583 mark_stack = (StgPtr *)mark_stack_bdescr->start;
584 mark_sp = mark_stack;
585 mark_splim = mark_stack + (MARK_STACK_BLOCKS * BLOCK_SIZE_W);
586 } else {
587 mark_stack_bdescr = NULL;
588 }
589
590 eager_promotion = rtsTrue; // for now
591
592 /* -----------------------------------------------------------------------
593 * follow all the roots that we know about:
594 * - mutable lists from each generation > N
595 * we want to *scavenge* these roots, not evacuate them: they're not
596 * going to move in this GC.
597 * Also: do them in reverse generation order. This is because we
598 * often want to promote objects that are pointed to by older
599 * generations early, so we don't have to repeatedly copy them.
600 * Doing the generations in reverse order ensures that we don't end
601 * up in the situation where we want to evac an object to gen 3 and
602 * it has already been evaced to gen 2.
603 */
604 {
605 int st;
606 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
607 generations[g].saved_mut_list = generations[g].mut_list;
608 generations[g].mut_list = allocBlock();
609 // mut_list always has at least one block.
610 }
611
612 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
613 IF_PAR_DEBUG(verbose, printMutableList(&generations[g]));
614 scavenge_mutable_list(&generations[g]);
615 evac_gen = g;
616 for (st = generations[g].n_steps-1; st >= 0; st--) {
617 scavenge(&generations[g].steps[st]);
618 }
619 }
620 }
621
622 /* follow roots from the CAF list (used by GHCi)
623 */
624 evac_gen = 0;
625 markCAFs(mark_root);
626
627 /* follow all the roots that the application knows about.
628 */
629 evac_gen = 0;
630 get_roots(mark_root);
631
632 #if defined(PAR)
633 /* And don't forget to mark the TSO if we got here direct from
634 * Haskell! */
635 /* Not needed in a seq version?
636 if (CurrentTSO) {
637 CurrentTSO = (StgTSO *)MarkRoot((StgClosure *)CurrentTSO);
638 }
639 */
640
641 // Mark the entries in the GALA table of the parallel system
642 markLocalGAs(major_gc);
643 // Mark all entries on the list of pending fetches
644 markPendingFetches(major_gc);
645 #endif
646
647 /* Mark the weak pointer list, and prepare to detect dead weak
648 * pointers.
649 */
650 mark_weak_ptr_list(&weak_ptr_list);
651 old_weak_ptr_list = weak_ptr_list;
652 weak_ptr_list = NULL;
653 weak_stage = WeakPtrs;
654
655 /* The all_threads list is like the weak_ptr_list.
656 * See traverse_weak_ptr_list() for the details.
657 */
658 old_all_threads = all_threads;
659 all_threads = END_TSO_QUEUE;
660 resurrected_threads = END_TSO_QUEUE;
661
662 /* Mark the stable pointer table.
663 */
664 markStablePtrTable(mark_root);
665
666 /* Mark the root pointer table.
667 */
668 markRootPtrTable(mark_root);
669
670 /* -------------------------------------------------------------------------
671 * Repeatedly scavenge all the areas we know about until there's no
672 * more scavenging to be done.
673 */
674 {
675 rtsBool flag;
676 loop:
677 flag = rtsFalse;
678
679 // scavenge static objects
680 if (major_gc && static_objects != END_OF_STATIC_LIST) {
681 IF_DEBUG(sanity, checkStaticObjects(static_objects));
682 scavenge_static();
683 }
684
685 /* When scavenging the older generations: Objects may have been
686 * evacuated from generations <= N into older generations, and we
687 * need to scavenge these objects. We're going to try to ensure that
688 * any evacuations that occur move the objects into at least the
689 * same generation as the object being scavenged, otherwise we
690 * have to create new entries on the mutable list for the older
691 * generation.
692 */
693
694 // scavenge each step in generations 0..maxgen
695 {
696 long gen;
697 int st;
698
699 loop2:
700 // scavenge objects in compacted generation
701 if (mark_stack_overflowed || oldgen_scan_bd != NULL ||
702 (mark_stack_bdescr != NULL && !mark_stack_empty())) {
703 scavenge_mark_stack();
704 flag = rtsTrue;
705 }
706
707 for (gen = RtsFlags.GcFlags.generations; --gen >= 0; ) {
708 for (st = generations[gen].n_steps; --st >= 0; ) {
709 if (gen == 0 && st == 0 && RtsFlags.GcFlags.generations > 1) {
710 continue;
711 }
712 stp = &generations[gen].steps[st];
713 evac_gen = gen;
714 if (stp->hp_bd != stp->scan_bd || stp->scan < stp->hp) {
715 scavenge(stp);
716 flag = rtsTrue;
717 goto loop2;
718 }
719 if (stp->new_large_objects != NULL) {
720 scavenge_large(stp);
721 flag = rtsTrue;
722 goto loop2;
723 }
724 }
725 }
726 }
727
728 // if any blackholes are alive, make the threads that wait on
729 // them alive too.
730 if (traverse_blackhole_queue())
731 flag = rtsTrue;
732
733 if (flag) { goto loop; }
734
735 // must be last... invariant is that everything is fully
736 // scavenged at this point.
737 if (traverse_weak_ptr_list()) { // returns rtsTrue if evaced something
738 goto loop;
739 }
740 }
741
742 /* Update the pointers from the task list - these are
743 * treated as weak pointers because we want to allow a main thread
744 * to get a BlockedOnDeadMVar exception in the same way as any other
745 * thread. Note that the threads should all have been retained by
746 * GC by virtue of being on the all_threads list, we're just
747 * updating pointers here.
748 */
749 {
750 Task *task;
751 StgTSO *tso;
752 for (task = all_tasks; task != NULL; task = task->all_link) {
753 if (!task->stopped && task->tso) {
754 ASSERT(task->tso->bound == task);
755 tso = (StgTSO *) isAlive((StgClosure *)task->tso);
756 if (tso == NULL) {
757 barf("task %p: main thread %d has been GC'd",
758 #ifdef THREADED_RTS
759 (void *)task->id,
760 #else
761 (void *)task,
762 #endif
763 task->tso->id);
764 }
765 task->tso = tso;
766 }
767 }
768 }
769
770 #if defined(PAR)
771 // Reconstruct the Global Address tables used in GUM
772 rebuildGAtables(major_gc);
773 IF_DEBUG(sanity, checkLAGAtable(rtsTrue/*check closures, too*/));
774 #endif
775
776 // Now see which stable names are still alive.
777 gcStablePtrTable();
778
779 // Tidy the end of the to-space chains
780 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
781 for (s = 0; s < generations[g].n_steps; s++) {
782 stp = &generations[g].steps[s];
783 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
784 ASSERT(Bdescr(stp->hp) == stp->hp_bd);
785 stp->hp_bd->free = stp->hp;
786 Bdescr(stp->scavd_hp)->free = stp->scavd_hp;
787 }
788 }
789 }
790
791 #ifdef PROFILING
792 // We call processHeapClosureForDead() on every closure destroyed during
793 // the current garbage collection, so we invoke LdvCensusForDead().
794 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV
795 || RtsFlags.ProfFlags.bioSelector != NULL)
796 LdvCensusForDead(N);
797 #endif
798
799 // NO MORE EVACUATION AFTER THIS POINT!
800 // Finally: compaction of the oldest generation.
801 if (major_gc && oldest_gen->steps[0].is_compacted) {
802 // save number of blocks for stats
803 oldgen_saved_blocks = oldest_gen->steps[0].n_old_blocks;
804 compact(get_roots);
805 }
806
807 IF_DEBUG(sanity, checkGlobalTSOList(rtsFalse));
808
809 /* run through all the generations/steps and tidy up
810 */
811 copied = new_blocks * BLOCK_SIZE_W;
812 scavd_copied = new_scavd_blocks * BLOCK_SIZE_W;
813 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
814
815 if (g <= N) {
816 generations[g].collections++; // for stats
817 }
818
819 // Count the mutable list as bytes "copied" for the purposes of
820 // stats. Every mutable list is copied during every GC.
821 if (g > 0) {
822 nat mut_list_size = 0;
823 for (bd = generations[g].mut_list; bd != NULL; bd = bd->link) {
824 mut_list_size += bd->free - bd->start;
825 }
826 copied += mut_list_size;
827
828 debugTrace(DEBUG_gc,
829 "mut_list_size: %ld (%d vars, %d arrays, %d others)",
830 mut_list_size * sizeof(W_),
831 mutlist_MUTVARS, mutlist_MUTARRS, mutlist_OTHERS);
832 }
833
834 for (s = 0; s < generations[g].n_steps; s++) {
835 bdescr *next;
836 stp = &generations[g].steps[s];
837
838 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
839 // stats information: how much we copied
840 if (g <= N) {
841 copied -= stp->hp_bd->start + BLOCK_SIZE_W -
842 stp->hp_bd->free;
843 scavd_copied -= (P_)(BLOCK_ROUND_UP(stp->scavd_hp)) - stp->scavd_hp;
844 }
845 }
846
847 // for generations we collected...
848 if (g <= N) {
849
850 /* free old memory and shift to-space into from-space for all
851 * the collected steps (except the allocation area). These
852 * freed blocks will probaby be quickly recycled.
853 */
854 if (!(g == 0 && s == 0)) {
855 if (stp->is_compacted) {
856 // for a compacted step, just shift the new to-space
857 // onto the front of the now-compacted existing blocks.
858 for (bd = stp->blocks; bd != NULL; bd = bd->link) {
859 bd->flags &= ~BF_EVACUATED; // now from-space
860 }
861 // tack the new blocks on the end of the existing blocks
862 if (stp->old_blocks != NULL) {
863 for (bd = stp->old_blocks; bd != NULL; bd = next) {
864 // NB. this step might not be compacted next
865 // time, so reset the BF_COMPACTED flags.
866 // They are set before GC if we're going to
867 // compact. (search for BF_COMPACTED above).
868 bd->flags &= ~BF_COMPACTED;
869 next = bd->link;
870 if (next == NULL) {
871 bd->link = stp->blocks;
872 }
873 }
874 stp->blocks = stp->old_blocks;
875 }
876 // add the new blocks to the block tally
877 stp->n_blocks += stp->n_old_blocks;
878 ASSERT(countBlocks(stp->blocks) == stp->n_blocks);
879 } else {
880 freeChain(stp->old_blocks);
881 for (bd = stp->blocks; bd != NULL; bd = bd->link) {
882 bd->flags &= ~BF_EVACUATED; // now from-space
883 }
884 }
885 stp->old_blocks = NULL;
886 stp->n_old_blocks = 0;
887 }
888
889 /* LARGE OBJECTS. The current live large objects are chained on
890 * scavenged_large, having been moved during garbage
891 * collection from large_objects. Any objects left on
892 * large_objects list are therefore dead, so we free them here.
893 */
894 for (bd = stp->large_objects; bd != NULL; bd = next) {
895 next = bd->link;
896 freeGroup(bd);
897 bd = next;
898 }
899
900 // update the count of blocks used by large objects
901 for (bd = stp->scavenged_large_objects; bd != NULL; bd = bd->link) {
902 bd->flags &= ~BF_EVACUATED;
903 }
904 stp->large_objects = stp->scavenged_large_objects;
905 stp->n_large_blocks = stp->n_scavenged_large_blocks;
906
907 } else {
908 // for older generations...
909
910 /* For older generations, we need to append the
911 * scavenged_large_object list (i.e. large objects that have been
912 * promoted during this GC) to the large_object list for that step.
913 */
914 for (bd = stp->scavenged_large_objects; bd; bd = next) {
915 next = bd->link;
916 bd->flags &= ~BF_EVACUATED;
917 dbl_link_onto(bd, &stp->large_objects);
918 }
919
920 // add the new blocks we promoted during this GC
921 stp->n_large_blocks += stp->n_scavenged_large_blocks;
922 }
923 }
924 }
925
926 /* Reset the sizes of the older generations when we do a major
927 * collection.
928 *
929 * CURRENT STRATEGY: make all generations except zero the same size.
930 * We have to stay within the maximum heap size, and leave a certain
931 * percentage of the maximum heap size available to allocate into.
932 */
933 if (major_gc && RtsFlags.GcFlags.generations > 1) {
934 nat live, size, min_alloc;
935 nat max = RtsFlags.GcFlags.maxHeapSize;
936 nat gens = RtsFlags.GcFlags.generations;
937
938 // live in the oldest generations
939 live = oldest_gen->steps[0].n_blocks +
940 oldest_gen->steps[0].n_large_blocks;
941
942 // default max size for all generations except zero
943 size = stg_max(live * RtsFlags.GcFlags.oldGenFactor,
944 RtsFlags.GcFlags.minOldGenSize);
945
946 // minimum size for generation zero
947 min_alloc = stg_max((RtsFlags.GcFlags.pcFreeHeap * max) / 200,
948 RtsFlags.GcFlags.minAllocAreaSize);
949
950 // Auto-enable compaction when the residency reaches a
951 // certain percentage of the maximum heap size (default: 30%).
952 if (RtsFlags.GcFlags.generations > 1 &&
953 (RtsFlags.GcFlags.compact ||
954 (max > 0 &&
955 oldest_gen->steps[0].n_blocks >
956 (RtsFlags.GcFlags.compactThreshold * max) / 100))) {
957 oldest_gen->steps[0].is_compacted = 1;
958 // debugBelch("compaction: on\n", live);
959 } else {
960 oldest_gen->steps[0].is_compacted = 0;
961 // debugBelch("compaction: off\n", live);
962 }
963
964 // if we're going to go over the maximum heap size, reduce the
965 // size of the generations accordingly. The calculation is
966 // different if compaction is turned on, because we don't need
967 // to double the space required to collect the old generation.
968 if (max != 0) {
969
970 // this test is necessary to ensure that the calculations
971 // below don't have any negative results - we're working
972 // with unsigned values here.
973 if (max < min_alloc) {
974 heapOverflow();
975 }
976
977 if (oldest_gen->steps[0].is_compacted) {
978 if ( (size + (size - 1) * (gens - 2) * 2) + min_alloc > max ) {
979 size = (max - min_alloc) / ((gens - 1) * 2 - 1);
980 }
981 } else {
982 if ( (size * (gens - 1) * 2) + min_alloc > max ) {
983 size = (max - min_alloc) / ((gens - 1) * 2);
984 }
985 }
986
987 if (size < live) {
988 heapOverflow();
989 }
990 }
991
992 #if 0
993 debugBelch("live: %d, min_alloc: %d, size : %d, max = %d\n", live,
994 min_alloc, size, max);
995 #endif
996
997 for (g = 0; g < gens; g++) {
998 generations[g].max_blocks = size;
999 }
1000 }
1001
1002 // Guess the amount of live data for stats.
1003 live = calcLive();
1004
1005 /* Free the small objects allocated via allocate(), since this will
1006 * all have been copied into G0S1 now.
1007 */
1008 if (small_alloc_list != NULL) {
1009 freeChain(small_alloc_list);
1010 }
1011 small_alloc_list = NULL;
1012 alloc_blocks = 0;
1013 alloc_Hp = NULL;
1014 alloc_HpLim = NULL;
1015 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
1016
1017 // Start a new pinned_object_block
1018 pinned_object_block = NULL;
1019
1020 /* Free the mark stack.
1021 */
1022 if (mark_stack_bdescr != NULL) {
1023 freeGroup(mark_stack_bdescr);
1024 }
1025
1026 /* Free any bitmaps.
1027 */
1028 for (g = 0; g <= N; g++) {
1029 for (s = 0; s < generations[g].n_steps; s++) {
1030 stp = &generations[g].steps[s];
1031 if (stp->bitmap != NULL) {
1032 freeGroup(stp->bitmap);
1033 stp->bitmap = NULL;
1034 }
1035 }
1036 }
1037
1038 /* Two-space collector:
1039 * Free the old to-space, and estimate the amount of live data.
1040 */
1041 if (RtsFlags.GcFlags.generations == 1) {
1042 nat blocks;
1043
1044 if (g0s0->old_blocks != NULL) {
1045 freeChain(g0s0->old_blocks);
1046 }
1047 for (bd = g0s0->blocks; bd != NULL; bd = bd->link) {
1048 bd->flags = 0; // now from-space
1049 }
1050 g0s0->old_blocks = g0s0->blocks;
1051 g0s0->n_old_blocks = g0s0->n_blocks;
1052 g0s0->blocks = saved_nursery;
1053 g0s0->n_blocks = saved_n_blocks;
1054
1055 /* For a two-space collector, we need to resize the nursery. */
1056
1057 /* set up a new nursery. Allocate a nursery size based on a
1058 * function of the amount of live data (by default a factor of 2)
1059 * Use the blocks from the old nursery if possible, freeing up any
1060 * left over blocks.
1061 *
1062 * If we get near the maximum heap size, then adjust our nursery
1063 * size accordingly. If the nursery is the same size as the live
1064 * data (L), then we need 3L bytes. We can reduce the size of the
1065 * nursery to bring the required memory down near 2L bytes.
1066 *
1067 * A normal 2-space collector would need 4L bytes to give the same
1068 * performance we get from 3L bytes, reducing to the same
1069 * performance at 2L bytes.
1070 */
1071 blocks = g0s0->n_old_blocks;
1072
1073 if ( RtsFlags.GcFlags.maxHeapSize != 0 &&
1074 blocks * RtsFlags.GcFlags.oldGenFactor * 2 >
1075 RtsFlags.GcFlags.maxHeapSize ) {
1076 long adjusted_blocks; // signed on purpose
1077 int pc_free;
1078
1079 adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks);
1080
1081 debugTrace(DEBUG_gc, "near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %ld",
1082 RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks);
1083
1084 pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize;
1085 if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even be < 0 */ {
1086 heapOverflow();
1087 }
1088 blocks = adjusted_blocks;
1089
1090 } else {
1091 blocks *= RtsFlags.GcFlags.oldGenFactor;
1092 if (blocks < RtsFlags.GcFlags.minAllocAreaSize) {
1093 blocks = RtsFlags.GcFlags.minAllocAreaSize;
1094 }
1095 }
1096 resizeNurseries(blocks);
1097
1098 } else {
1099 /* Generational collector:
1100 * If the user has given us a suggested heap size, adjust our
1101 * allocation area to make best use of the memory available.
1102 */
1103
1104 if (RtsFlags.GcFlags.heapSizeSuggestion) {
1105 long blocks;
1106 nat needed = calcNeeded(); // approx blocks needed at next GC
1107
1108 /* Guess how much will be live in generation 0 step 0 next time.
1109 * A good approximation is obtained by finding the
1110 * percentage of g0s0 that was live at the last minor GC.
1111 */
1112 if (N == 0) {
1113 g0s0_pcnt_kept = (new_blocks * 100) / countNurseryBlocks();
1114 }
1115
1116 /* Estimate a size for the allocation area based on the
1117 * information available. We might end up going slightly under
1118 * or over the suggested heap size, but we should be pretty
1119 * close on average.
1120 *
1121 * Formula: suggested - needed
1122 * ----------------------------
1123 * 1 + g0s0_pcnt_kept/100
1124 *
1125 * where 'needed' is the amount of memory needed at the next
1126 * collection for collecting all steps except g0s0.
1127 */
1128 blocks =
1129 (((long)RtsFlags.GcFlags.heapSizeSuggestion - (long)needed) * 100) /
1130 (100 + (long)g0s0_pcnt_kept);
1131
1132 if (blocks < (long)RtsFlags.GcFlags.minAllocAreaSize) {
1133 blocks = RtsFlags.GcFlags.minAllocAreaSize;
1134 }
1135
1136 resizeNurseries((nat)blocks);
1137
1138 } else {
1139 // we might have added extra large blocks to the nursery, so
1140 // resize back to minAllocAreaSize again.
1141 resizeNurseriesFixed(RtsFlags.GcFlags.minAllocAreaSize);
1142 }
1143 }
1144
1145 // mark the garbage collected CAFs as dead
1146 #if 0 && defined(DEBUG) // doesn't work at the moment
1147 if (major_gc) { gcCAFs(); }
1148 #endif
1149
1150 #ifdef PROFILING
1151 // resetStaticObjectForRetainerProfiling() must be called before
1152 // zeroing below.
1153 resetStaticObjectForRetainerProfiling();
1154 #endif
1155
1156 // zero the scavenged static object list
1157 if (major_gc) {
1158 zero_static_object_list(scavenged_static_objects);
1159 }
1160
1161 // Reset the nursery
1162 resetNurseries();
1163
1164 // start any pending finalizers
1165 RELEASE_SM_LOCK;
1166 scheduleFinalizers(last_free_capability, old_weak_ptr_list);
1167 ACQUIRE_SM_LOCK;
1168
1169 // send exceptions to any threads which were about to die
1170 RELEASE_SM_LOCK;
1171 resurrectThreads(resurrected_threads);
1172 ACQUIRE_SM_LOCK;
1173
1174 // Update the stable pointer hash table.
1175 updateStablePtrTable(major_gc);
1176
1177 // check sanity after GC
1178 IF_DEBUG(sanity, checkSanity());
1179
1180 // extra GC trace info
1181 IF_DEBUG(gc, statDescribeGens());
1182
1183 #ifdef DEBUG
1184 // symbol-table based profiling
1185 /* heapCensus(to_blocks); */ /* ToDo */
1186 #endif
1187
1188 // restore enclosing cost centre
1189 #ifdef PROFILING
1190 CCCS = prev_CCS;
1191 #endif
1192
1193 #ifdef DEBUG
1194 // check for memory leaks if DEBUG is on
1195 memInventory();
1196 #endif
1197
1198 #ifdef RTS_GTK_FRONTPANEL
1199 if (RtsFlags.GcFlags.frontpanel) {
1200 updateFrontPanelAfterGC( N, live );
1201 }
1202 #endif
1203
1204 // ok, GC over: tell the stats department what happened.
1205 stat_endGC(allocated, live, copied, scavd_copied, N);
1206
1207 #if defined(RTS_USER_SIGNALS)
1208 // unblock signals again
1209 unblockUserSignals();
1210 #endif
1211
1212 RELEASE_SM_LOCK;
1213
1214 //PAR_TICKY_TP();
1215 }
1216
1217
1218 /* -----------------------------------------------------------------------------
1219 Weak Pointers
1220
1221 traverse_weak_ptr_list is called possibly many times during garbage
1222 collection. It returns a flag indicating whether it did any work
1223 (i.e. called evacuate on any live pointers).
1224
1225 Invariant: traverse_weak_ptr_list is called when the heap is in an
1226 idempotent state. That means that there are no pending
1227 evacuate/scavenge operations. This invariant helps the weak
1228 pointer code decide which weak pointers are dead - if there are no
1229 new live weak pointers, then all the currently unreachable ones are
1230 dead.
1231
1232 For generational GC: we just don't try to finalize weak pointers in
1233 older generations than the one we're collecting. This could
1234 probably be optimised by keeping per-generation lists of weak
1235 pointers, but for a few weak pointers this scheme will work.
1236
1237 There are three distinct stages to processing weak pointers:
1238
1239 - weak_stage == WeakPtrs
1240
1241 We process all the weak pointers whos keys are alive (evacuate
1242 their values and finalizers), and repeat until we can find no new
1243 live keys. If no live keys are found in this pass, then we
1244 evacuate the finalizers of all the dead weak pointers in order to
1245 run them.
1246
1247 - weak_stage == WeakThreads
1248
1249 Now, we discover which *threads* are still alive. Pointers to
1250 threads from the all_threads and main thread lists are the
1251 weakest of all: a pointers from the finalizer of a dead weak
1252 pointer can keep a thread alive. Any threads found to be unreachable
1253 are evacuated and placed on the resurrected_threads list so we
1254 can send them a signal later.
1255
1256 - weak_stage == WeakDone
1257
1258 No more evacuation is done.
1259
1260 -------------------------------------------------------------------------- */
1261
1262 static rtsBool
1263 traverse_weak_ptr_list(void)
1264 {
1265 StgWeak *w, **last_w, *next_w;
1266 StgClosure *new;
1267 rtsBool flag = rtsFalse;
1268
1269 switch (weak_stage) {
1270
1271 case WeakDone:
1272 return rtsFalse;
1273
1274 case WeakPtrs:
1275 /* doesn't matter where we evacuate values/finalizers to, since
1276 * these pointers are treated as roots (iff the keys are alive).
1277 */
1278 evac_gen = 0;
1279
1280 last_w = &old_weak_ptr_list;
1281 for (w = old_weak_ptr_list; w != NULL; w = next_w) {
1282
1283 /* There might be a DEAD_WEAK on the list if finalizeWeak# was
1284 * called on a live weak pointer object. Just remove it.
1285 */
1286 if (w->header.info == &stg_DEAD_WEAK_info) {
1287 next_w = ((StgDeadWeak *)w)->link;
1288 *last_w = next_w;
1289 continue;
1290 }
1291
1292 switch (get_itbl(w)->type) {
1293
1294 case EVACUATED:
1295 next_w = (StgWeak *)((StgEvacuated *)w)->evacuee;
1296 *last_w = next_w;
1297 continue;
1298
1299 case WEAK:
1300 /* Now, check whether the key is reachable.
1301 */
1302 new = isAlive(w->key);
1303 if (new != NULL) {
1304 w->key = new;
1305 // evacuate the value and finalizer
1306 w->value = evacuate(w->value);
1307 w->finalizer = evacuate(w->finalizer);
1308 // remove this weak ptr from the old_weak_ptr list
1309 *last_w = w->link;
1310 // and put it on the new weak ptr list
1311 next_w = w->link;
1312 w->link = weak_ptr_list;
1313 weak_ptr_list = w;
1314 flag = rtsTrue;
1315
1316 debugTrace(DEBUG_weak,
1317 "weak pointer still alive at %p -> %p",
1318 w, w->key);
1319 continue;
1320 }
1321 else {
1322 last_w = &(w->link);
1323 next_w = w->link;
1324 continue;
1325 }
1326
1327 default:
1328 barf("traverse_weak_ptr_list: not WEAK");
1329 }
1330 }
1331
1332 /* If we didn't make any changes, then we can go round and kill all
1333 * the dead weak pointers. The old_weak_ptr list is used as a list
1334 * of pending finalizers later on.
1335 */
1336 if (flag == rtsFalse) {
1337 for (w = old_weak_ptr_list; w; w = w->link) {
1338 w->finalizer = evacuate(w->finalizer);
1339 }
1340
1341 // Next, move to the WeakThreads stage after fully
1342 // scavenging the finalizers we've just evacuated.
1343 weak_stage = WeakThreads;
1344 }
1345
1346 return rtsTrue;
1347
1348 case WeakThreads:
1349 /* Now deal with the all_threads list, which behaves somewhat like
1350 * the weak ptr list. If we discover any threads that are about to
1351 * become garbage, we wake them up and administer an exception.
1352 */
1353 {
1354 StgTSO *t, *tmp, *next, **prev;
1355
1356 prev = &old_all_threads;
1357 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
1358
1359 tmp = (StgTSO *)isAlive((StgClosure *)t);
1360
1361 if (tmp != NULL) {
1362 t = tmp;
1363 }
1364
1365 ASSERT(get_itbl(t)->type == TSO);
1366 switch (t->what_next) {
1367 case ThreadRelocated:
1368 next = t->link;
1369 *prev = next;
1370 continue;
1371 case ThreadKilled:
1372 case ThreadComplete:
1373 // finshed or died. The thread might still be alive, but we
1374 // don't keep it on the all_threads list. Don't forget to
1375 // stub out its global_link field.
1376 next = t->global_link;
1377 t->global_link = END_TSO_QUEUE;
1378 *prev = next;
1379 continue;
1380 default:
1381 ;
1382 }
1383
1384 if (tmp == NULL) {
1385 // not alive (yet): leave this thread on the
1386 // old_all_threads list.
1387 prev = &(t->global_link);
1388 next = t->global_link;
1389 }
1390 else {
1391 // alive: move this thread onto the all_threads list.
1392 next = t->global_link;
1393 t->global_link = all_threads;
1394 all_threads = t;
1395 *prev = next;
1396 }
1397 }
1398 }
1399
1400 /* If we evacuated any threads, we need to go back to the scavenger.
1401 */
1402 if (flag) return rtsTrue;
1403
1404 /* And resurrect any threads which were about to become garbage.
1405 */
1406 {
1407 StgTSO *t, *tmp, *next;
1408 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
1409 next = t->global_link;
1410 tmp = (StgTSO *)evacuate((StgClosure *)t);
1411 tmp->global_link = resurrected_threads;
1412 resurrected_threads = tmp;
1413 }
1414 }
1415
1416 /* Finally, we can update the blackhole_queue. This queue
1417 * simply strings together TSOs blocked on black holes, it is
1418 * not intended to keep anything alive. Hence, we do not follow
1419 * pointers on the blackhole_queue until now, when we have
1420 * determined which TSOs are otherwise reachable. We know at
1421 * this point that all TSOs have been evacuated, however.
1422 */
1423 {
1424 StgTSO **pt;
1425 for (pt = &blackhole_queue; *pt != END_TSO_QUEUE; pt = &((*pt)->link)) {
1426 *pt = (StgTSO *)isAlive((StgClosure *)*pt);
1427 ASSERT(*pt != NULL);
1428 }
1429 }
1430
1431 weak_stage = WeakDone; // *now* we're done,
1432 return rtsTrue; // but one more round of scavenging, please
1433
1434 default:
1435 barf("traverse_weak_ptr_list");
1436 return rtsTrue;
1437 }
1438
1439 }
1440
1441 /* -----------------------------------------------------------------------------
1442 The blackhole queue
1443
1444 Threads on this list behave like weak pointers during the normal
1445 phase of garbage collection: if the blackhole is reachable, then
1446 the thread is reachable too.
1447 -------------------------------------------------------------------------- */
1448 static rtsBool
1449 traverse_blackhole_queue (void)
1450 {
1451 StgTSO *prev, *t, *tmp;
1452 rtsBool flag;
1453
1454 flag = rtsFalse;
1455 prev = NULL;
1456
1457 for (t = blackhole_queue; t != END_TSO_QUEUE; prev=t, t = t->link) {
1458 if (! (tmp = (StgTSO *)isAlive((StgClosure*)t))) {
1459 if (isAlive(t->block_info.closure)) {
1460 t = (StgTSO *)evacuate((StgClosure *)t);
1461 if (prev) prev->link = t;
1462 flag = rtsTrue;
1463 }
1464 }
1465 }
1466 return flag;
1467 }
1468
1469 /* -----------------------------------------------------------------------------
1470 After GC, the live weak pointer list may have forwarding pointers
1471 on it, because a weak pointer object was evacuated after being
1472 moved to the live weak pointer list. We remove those forwarding
1473 pointers here.
1474
1475 Also, we don't consider weak pointer objects to be reachable, but
1476 we must nevertheless consider them to be "live" and retain them.
1477 Therefore any weak pointer objects which haven't as yet been
1478 evacuated need to be evacuated now.
1479 -------------------------------------------------------------------------- */
1480
1481
1482 static void
1483 mark_weak_ptr_list ( StgWeak **list )
1484 {
1485 StgWeak *w, **last_w;
1486
1487 last_w = list;
1488 for (w = *list; w; w = w->link) {
1489 // w might be WEAK, EVACUATED, or DEAD_WEAK (actually CON_STATIC) here
1490 ASSERT(w->header.info == &stg_DEAD_WEAK_info
1491 || get_itbl(w)->type == WEAK || get_itbl(w)->type == EVACUATED);
1492 w = (StgWeak *)evacuate((StgClosure *)w);
1493 *last_w = w;
1494 last_w = &(w->link);
1495 }
1496 }
1497
1498 /* -----------------------------------------------------------------------------
1499 isAlive determines whether the given closure is still alive (after
1500 a garbage collection) or not. It returns the new address of the
1501 closure if it is alive, or NULL otherwise.
1502
1503 NOTE: Use it before compaction only!
1504 -------------------------------------------------------------------------- */
1505
1506
1507 StgClosure *
1508 isAlive(StgClosure *p)
1509 {
1510 const StgInfoTable *info;
1511 bdescr *bd;
1512
1513 while (1) {
1514
1515 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1516 info = get_itbl(p);
1517
1518 // ignore static closures
1519 //
1520 // ToDo: for static closures, check the static link field.
1521 // Problem here is that we sometimes don't set the link field, eg.
1522 // for static closures with an empty SRT or CONSTR_STATIC_NOCAFs.
1523 //
1524 if (!HEAP_ALLOCED(p)) {
1525 return p;
1526 }
1527
1528 // ignore closures in generations that we're not collecting.
1529 bd = Bdescr((P_)p);
1530 if (bd->gen_no > N) {
1531 return p;
1532 }
1533
1534 // if it's a pointer into to-space, then we're done
1535 if (bd->flags & BF_EVACUATED) {
1536 return p;
1537 }
1538
1539 // large objects use the evacuated flag
1540 if (bd->flags & BF_LARGE) {
1541 return NULL;
1542 }
1543
1544 // check the mark bit for compacted steps
1545 if ((bd->flags & BF_COMPACTED) && is_marked((P_)p,bd)) {
1546 return p;
1547 }
1548
1549 switch (info->type) {
1550
1551 case IND:
1552 case IND_STATIC:
1553 case IND_PERM:
1554 case IND_OLDGEN: // rely on compatible layout with StgInd
1555 case IND_OLDGEN_PERM:
1556 // follow indirections
1557 p = ((StgInd *)p)->indirectee;
1558 continue;
1559
1560 case EVACUATED:
1561 // alive!
1562 return ((StgEvacuated *)p)->evacuee;
1563
1564 case TSO:
1565 if (((StgTSO *)p)->what_next == ThreadRelocated) {
1566 p = (StgClosure *)((StgTSO *)p)->link;
1567 continue;
1568 }
1569 return NULL;
1570
1571 default:
1572 // dead.
1573 return NULL;
1574 }
1575 }
1576 }
1577
1578 static void
1579 mark_root(StgClosure **root)
1580 {
1581 *root = evacuate(*root);
1582 }
1583
1584 STATIC_INLINE void
1585 upd_evacuee(StgClosure *p, StgClosure *dest)
1586 {
1587 // not true: (ToDo: perhaps it should be)
1588 // ASSERT(Bdescr((P_)dest)->flags & BF_EVACUATED);
1589 SET_INFO(p, &stg_EVACUATED_info);
1590 ((StgEvacuated *)p)->evacuee = dest;
1591 }
1592
1593
1594 STATIC_INLINE StgClosure *
1595 copy(StgClosure *src, nat size, step *stp)
1596 {
1597 StgPtr to, from;
1598 nat i;
1599 #ifdef PROFILING
1600 // @LDV profiling
1601 nat size_org = size;
1602 #endif
1603
1604 TICK_GC_WORDS_COPIED(size);
1605 /* Find out where we're going, using the handy "to" pointer in
1606 * the step of the source object. If it turns out we need to
1607 * evacuate to an older generation, adjust it here (see comment
1608 * by evacuate()).
1609 */
1610 if (stp->gen_no < evac_gen) {
1611 if (eager_promotion) {
1612 stp = &generations[evac_gen].steps[0];
1613 } else {
1614 failed_to_evac = rtsTrue;
1615 }
1616 }
1617
1618 /* chain a new block onto the to-space for the destination step if
1619 * necessary.
1620 */
1621 if (stp->hp + size >= stp->hpLim) {
1622 gc_alloc_block(stp);
1623 }
1624
1625 to = stp->hp;
1626 from = (StgPtr)src;
1627 stp->hp = to + size;
1628 for (i = 0; i < size; i++) { // unroll for small i
1629 to[i] = from[i];
1630 }
1631 upd_evacuee((StgClosure *)from,(StgClosure *)to);
1632
1633 #ifdef PROFILING
1634 // We store the size of the just evacuated object in the LDV word so that
1635 // the profiler can guess the position of the next object later.
1636 SET_EVACUAEE_FOR_LDV(from, size_org);
1637 #endif
1638 return (StgClosure *)to;
1639 }
1640
1641 // Same as copy() above, except the object will be allocated in memory
1642 // that will not be scavenged. Used for object that have no pointer
1643 // fields.
1644 STATIC_INLINE StgClosure *
1645 copy_noscav(StgClosure *src, nat size, step *stp)
1646 {
1647 StgPtr to, from;
1648 nat i;
1649 #ifdef PROFILING
1650 // @LDV profiling
1651 nat size_org = size;
1652 #endif
1653
1654 TICK_GC_WORDS_COPIED(size);
1655 /* Find out where we're going, using the handy "to" pointer in
1656 * the step of the source object. If it turns out we need to
1657 * evacuate to an older generation, adjust it here (see comment
1658 * by evacuate()).
1659 */
1660 if (stp->gen_no < evac_gen) {
1661 if (eager_promotion) {
1662 stp = &generations[evac_gen].steps[0];
1663 } else {
1664 failed_to_evac = rtsTrue;
1665 }
1666 }
1667
1668 /* chain a new block onto the to-space for the destination step if
1669 * necessary.
1670 */
1671 if (stp->scavd_hp + size >= stp->scavd_hpLim) {
1672 gc_alloc_scavd_block(stp);
1673 }
1674
1675 to = stp->scavd_hp;
1676 from = (StgPtr)src;
1677 stp->scavd_hp = to + size;
1678 for (i = 0; i < size; i++) { // unroll for small i
1679 to[i] = from[i];
1680 }
1681 upd_evacuee((StgClosure *)from,(StgClosure *)to);
1682
1683 #ifdef PROFILING
1684 // We store the size of the just evacuated object in the LDV word so that
1685 // the profiler can guess the position of the next object later.
1686 SET_EVACUAEE_FOR_LDV(from, size_org);
1687 #endif
1688 return (StgClosure *)to;
1689 }
1690
1691 /* Special version of copy() for when we only want to copy the info
1692 * pointer of an object, but reserve some padding after it. This is
1693 * used to optimise evacuation of BLACKHOLEs.
1694 */
1695
1696
1697 static StgClosure *
1698 copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *stp)
1699 {
1700 P_ dest, to, from;
1701 #ifdef PROFILING
1702 // @LDV profiling
1703 nat size_to_copy_org = size_to_copy;
1704 #endif
1705
1706 TICK_GC_WORDS_COPIED(size_to_copy);
1707 if (stp->gen_no < evac_gen) {
1708 if (eager_promotion) {
1709 stp = &generations[evac_gen].steps[0];
1710 } else {
1711 failed_to_evac = rtsTrue;
1712 }
1713 }
1714
1715 if (stp->hp + size_to_reserve >= stp->hpLim) {
1716 gc_alloc_block(stp);
1717 }
1718
1719 for(to = stp->hp, from = (P_)src; size_to_copy>0; --size_to_copy) {
1720 *to++ = *from++;
1721 }
1722
1723 dest = stp->hp;
1724 stp->hp += size_to_reserve;
1725 upd_evacuee(src,(StgClosure *)dest);
1726 #ifdef PROFILING
1727 // We store the size of the just evacuated object in the LDV word so that
1728 // the profiler can guess the position of the next object later.
1729 // size_to_copy_org is wrong because the closure already occupies size_to_reserve
1730 // words.
1731 SET_EVACUAEE_FOR_LDV(src, size_to_reserve);
1732 // fill the slop
1733 if (size_to_reserve - size_to_copy_org > 0)
1734 LDV_FILL_SLOP(stp->hp - 1, (int)(size_to_reserve - size_to_copy_org));
1735 #endif
1736 return (StgClosure *)dest;
1737 }
1738
1739
1740 /* -----------------------------------------------------------------------------
1741 Evacuate a large object
1742
1743 This just consists of removing the object from the (doubly-linked)
1744 step->large_objects list, and linking it on to the (singly-linked)
1745 step->new_large_objects list, from where it will be scavenged later.
1746
1747 Convention: bd->flags has BF_EVACUATED set for a large object
1748 that has been evacuated, or unset otherwise.
1749 -------------------------------------------------------------------------- */
1750
1751
1752 STATIC_INLINE void
1753 evacuate_large(StgPtr p)
1754 {
1755 bdescr *bd = Bdescr(p);
1756 step *stp;
1757
1758 // object must be at the beginning of the block (or be a ByteArray)
1759 ASSERT(get_itbl((StgClosure *)p)->type == ARR_WORDS ||
1760 (((W_)p & BLOCK_MASK) == 0));
1761
1762 // already evacuated?
1763 if (bd->flags & BF_EVACUATED) {
1764 /* Don't forget to set the failed_to_evac flag if we didn't get
1765 * the desired destination (see comments in evacuate()).
1766 */
1767 if (bd->gen_no < evac_gen) {
1768 failed_to_evac = rtsTrue;
1769 TICK_GC_FAILED_PROMOTION();
1770 }
1771 return;
1772 }
1773
1774 stp = bd->step;
1775 // remove from large_object list
1776 if (bd->u.back) {
1777 bd->u.back->link = bd->link;
1778 } else { // first object in the list
1779 stp->large_objects = bd->link;
1780 }
1781 if (bd->link) {
1782 bd->link->u.back = bd->u.back;
1783 }
1784
1785 /* link it on to the evacuated large object list of the destination step
1786 */
1787 stp = bd->step->to;
1788 if (stp->gen_no < evac_gen) {
1789 if (eager_promotion) {
1790 stp = &generations[evac_gen].steps[0];
1791 } else {
1792 failed_to_evac = rtsTrue;
1793 }
1794 }
1795
1796 bd->step = stp;
1797 bd->gen_no = stp->gen_no;
1798 bd->link = stp->new_large_objects;
1799 stp->new_large_objects = bd;
1800 bd->flags |= BF_EVACUATED;
1801 }
1802
1803 /* -----------------------------------------------------------------------------
1804 Evacuate
1805
1806 This is called (eventually) for every live object in the system.
1807
1808 The caller to evacuate specifies a desired generation in the
1809 evac_gen global variable. The following conditions apply to
1810 evacuating an object which resides in generation M when we're
1811 collecting up to generation N
1812
1813 if M >= evac_gen
1814 if M > N do nothing
1815 else evac to step->to
1816
1817 if M < evac_gen evac to evac_gen, step 0
1818
1819 if the object is already evacuated, then we check which generation
1820 it now resides in.
1821
1822 if M >= evac_gen do nothing
1823 if M < evac_gen set failed_to_evac flag to indicate that we
1824 didn't manage to evacuate this object into evac_gen.
1825
1826
1827 OPTIMISATION NOTES:
1828
1829 evacuate() is the single most important function performance-wise
1830 in the GC. Various things have been tried to speed it up, but as
1831 far as I can tell the code generated by gcc 3.2 with -O2 is about
1832 as good as it's going to get. We pass the argument to evacuate()
1833 in a register using the 'regparm' attribute (see the prototype for
1834 evacuate() near the top of this file).
1835
1836 Changing evacuate() to take an (StgClosure **) rather than
1837 returning the new pointer seems attractive, because we can avoid
1838 writing back the pointer when it hasn't changed (eg. for a static
1839 object, or an object in a generation > N). However, I tried it and
1840 it doesn't help. One reason is that the (StgClosure **) pointer
1841 gets spilled to the stack inside evacuate(), resulting in far more
1842 extra reads/writes than we save.
1843 -------------------------------------------------------------------------- */
1844
1845 REGPARM1 static StgClosure *
1846 evacuate(StgClosure *q)
1847 {
1848 #if defined(PAR)
1849 StgClosure *to;
1850 #endif
1851 bdescr *bd = NULL;
1852 step *stp;
1853 const StgInfoTable *info;
1854
1855 loop:
1856 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q));
1857
1858 if (!HEAP_ALLOCED(q)) {
1859
1860 if (!major_gc) return q;
1861
1862 info = get_itbl(q);
1863 switch (info->type) {
1864
1865 case THUNK_STATIC:
1866 if (info->srt_bitmap != 0 &&
1867 *THUNK_STATIC_LINK((StgClosure *)q) == NULL) {
1868 *THUNK_STATIC_LINK((StgClosure *)q) = static_objects;
1869 static_objects = (StgClosure *)q;
1870 }
1871 return q;
1872
1873 case FUN_STATIC:
1874 if (info->srt_bitmap != 0 &&
1875 *FUN_STATIC_LINK((StgClosure *)q) == NULL) {
1876 *FUN_STATIC_LINK((StgClosure *)q) = static_objects;
1877 static_objects = (StgClosure *)q;
1878 }
1879 return q;
1880
1881 case IND_STATIC:
1882 /* If q->saved_info != NULL, then it's a revertible CAF - it'll be
1883 * on the CAF list, so don't do anything with it here (we'll
1884 * scavenge it later).
1885 */
1886 if (((StgIndStatic *)q)->saved_info == NULL
1887 && *IND_STATIC_LINK((StgClosure *)q) == NULL) {
1888 *IND_STATIC_LINK((StgClosure *)q) = static_objects;
1889 static_objects = (StgClosure *)q;
1890 }
1891 return q;
1892
1893 case CONSTR_STATIC:
1894 if (*STATIC_LINK(info,(StgClosure *)q) == NULL) {
1895 *STATIC_LINK(info,(StgClosure *)q) = static_objects;
1896 static_objects = (StgClosure *)q;
1897 }
1898 return q;
1899
1900 case CONSTR_INTLIKE:
1901 case CONSTR_CHARLIKE:
1902 case CONSTR_NOCAF_STATIC:
1903 /* no need to put these on the static linked list, they don't need
1904 * to be scavenged.
1905 */
1906 return q;
1907
1908 default:
1909 barf("evacuate(static): strange closure type %d", (int)(info->type));
1910 }
1911 }
1912
1913 bd = Bdescr((P_)q);
1914
1915 if (bd->gen_no > N) {
1916 /* Can't evacuate this object, because it's in a generation
1917 * older than the ones we're collecting. Let's hope that it's
1918 * in evac_gen or older, or we will have to arrange to track
1919 * this pointer using the mutable list.
1920 */
1921 if (bd->gen_no < evac_gen) {
1922 // nope
1923 failed_to_evac = rtsTrue;
1924 TICK_GC_FAILED_PROMOTION();
1925 }
1926 return q;
1927 }
1928
1929 if ((bd->flags & (BF_LARGE | BF_COMPACTED | BF_EVACUATED)) != 0) {
1930
1931 /* pointer into to-space: just return it. This normally
1932 * shouldn't happen, but alllowing it makes certain things
1933 * slightly easier (eg. the mutable list can contain the same
1934 * object twice, for example).
1935 */
1936 if (bd->flags & BF_EVACUATED) {
1937 if (bd->gen_no < evac_gen) {
1938 failed_to_evac = rtsTrue;
1939 TICK_GC_FAILED_PROMOTION();
1940 }
1941 return q;
1942 }
1943
1944 /* evacuate large objects by re-linking them onto a different list.
1945 */
1946 if (bd->flags & BF_LARGE) {
1947 info = get_itbl(q);
1948 if (info->type == TSO &&
1949 ((StgTSO *)q)->what_next == ThreadRelocated) {
1950 q = (StgClosure *)((StgTSO *)q)->link;
1951 goto loop;
1952 }
1953 evacuate_large((P_)q);
1954 return q;
1955 }
1956
1957 /* If the object is in a step that we're compacting, then we
1958 * need to use an alternative evacuate procedure.
1959 */
1960 if (bd->flags & BF_COMPACTED) {
1961 if (!is_marked((P_)q,bd)) {
1962 mark((P_)q,bd);
1963 if (mark_stack_full()) {
1964 mark_stack_overflowed = rtsTrue;
1965 reset_mark_stack();
1966 }
1967 push_mark_stack((P_)q);
1968 }
1969 return q;
1970 }
1971 }
1972
1973 stp = bd->step->to;
1974
1975 info = get_itbl(q);
1976
1977 switch (info->type) {
1978
1979 case MUT_VAR_CLEAN:
1980 case MUT_VAR_DIRTY:
1981 case MVAR:
1982 return copy(q,sizeW_fromITBL(info),stp);
1983
1984 case CONSTR_0_1:
1985 {
1986 StgWord w = (StgWord)q->payload[0];
1987 if (q->header.info == Czh_con_info &&
1988 // unsigned, so always true: (StgChar)w >= MIN_CHARLIKE &&
1989 (StgChar)w <= MAX_CHARLIKE) {
1990 return (StgClosure *)CHARLIKE_CLOSURE((StgChar)w);
1991 }
1992 if (q->header.info == Izh_con_info &&
1993 (StgInt)w >= MIN_INTLIKE && (StgInt)w <= MAX_INTLIKE) {
1994 return (StgClosure *)INTLIKE_CLOSURE((StgInt)w);
1995 }
1996 // else
1997 return copy_noscav(q,sizeofW(StgHeader)+1,stp);
1998 }
1999
2000 case FUN_0_1:
2001 case FUN_1_0:
2002 case CONSTR_1_0:
2003 return copy(q,sizeofW(StgHeader)+1,stp);
2004
2005 case THUNK_1_0:
2006 case THUNK_0_1:
2007 return copy(q,sizeofW(StgThunk)+1,stp);
2008
2009 case THUNK_1_1:
2010 case THUNK_2_0:
2011 case THUNK_0_2:
2012 #ifdef NO_PROMOTE_THUNKS
2013 if (bd->gen_no == 0 &&
2014 bd->step->no != 0 &&
2015 bd->step->no == generations[bd->gen_no].n_steps-1) {
2016 stp = bd->step;
2017 }
2018 #endif
2019 return copy(q,sizeofW(StgThunk)+2,stp);
2020
2021 case FUN_1_1:
2022 case FUN_2_0:
2023 case CONSTR_1_1:
2024 case CONSTR_2_0:
2025 case FUN_0_2:
2026 return copy(q,sizeofW(StgHeader)+2,stp);
2027
2028 case CONSTR_0_2:
2029 return copy_noscav(q,sizeofW(StgHeader)+2,stp);
2030
2031 case THUNK:
2032 return copy(q,thunk_sizeW_fromITBL(info),stp);
2033
2034 case FUN:
2035 case CONSTR:
2036 case IND_PERM:
2037 case IND_OLDGEN_PERM:
2038 case WEAK:
2039 case STABLE_NAME:
2040 return copy(q,sizeW_fromITBL(info),stp);
2041
2042 case BCO:
2043 return copy(q,bco_sizeW((StgBCO *)q),stp);
2044
2045 case CAF_BLACKHOLE:
2046 case SE_CAF_BLACKHOLE:
2047 case SE_BLACKHOLE:
2048 case BLACKHOLE:
2049 return copyPart(q,BLACKHOLE_sizeW(),sizeofW(StgHeader),stp);
2050
2051 case THUNK_SELECTOR:
2052 {
2053 StgClosure *p;
2054 const StgInfoTable *info_ptr;
2055
2056 if (thunk_selector_depth > MAX_THUNK_SELECTOR_DEPTH) {
2057 return copy(q,THUNK_SELECTOR_sizeW(),stp);
2058 }
2059
2060 // stashed away for LDV profiling, see below
2061 info_ptr = q->header.info;
2062
2063 p = eval_thunk_selector(info->layout.selector_offset,
2064 (StgSelector *)q);
2065
2066 if (p == NULL) {
2067 return copy(q,THUNK_SELECTOR_sizeW(),stp);
2068 } else {
2069 StgClosure *val;
2070 // q is still BLACKHOLE'd.
2071 thunk_selector_depth++;
2072 val = evacuate(p);
2073 thunk_selector_depth--;
2074
2075 #ifdef PROFILING
2076 // For the purposes of LDV profiling, we have destroyed
2077 // the original selector thunk.
2078 SET_INFO(q, info_ptr);
2079 LDV_RECORD_DEAD_FILL_SLOP_DYNAMIC(q);
2080 #endif
2081
2082 // Update the THUNK_SELECTOR with an indirection to the
2083 // EVACUATED closure now at p. Why do this rather than
2084 // upd_evacuee(q,p)? Because we have an invariant that an
2085 // EVACUATED closure always points to an object in the
2086 // same or an older generation (required by the short-cut
2087 // test in the EVACUATED case, below).
2088 SET_INFO(q, &stg_IND_info);
2089 ((StgInd *)q)->indirectee = p;
2090
2091 // For the purposes of LDV profiling, we have created an
2092 // indirection.
2093 LDV_RECORD_CREATE(q);
2094
2095 return val;
2096 }
2097 }
2098
2099 case IND:
2100 case IND_OLDGEN:
2101 // follow chains of indirections, don't evacuate them
2102 q = ((StgInd*)q)->indirectee;
2103 goto loop;
2104
2105 case RET_BCO:
2106 case RET_SMALL:
2107 case RET_VEC_SMALL:
2108 case RET_BIG:
2109 case RET_VEC_BIG:
2110 case RET_DYN:
2111 case UPDATE_FRAME:
2112 case STOP_FRAME:
2113 case CATCH_FRAME:
2114 case CATCH_STM_FRAME:
2115 case CATCH_RETRY_FRAME:
2116 case ATOMICALLY_FRAME:
2117 // shouldn't see these
2118 barf("evacuate: stack frame at %p\n", q);
2119
2120 case PAP:
2121 return copy(q,pap_sizeW((StgPAP*)q),stp);
2122
2123 case AP:
2124 return copy(q,ap_sizeW((StgAP*)q),stp);
2125
2126 case AP_STACK:
2127 return copy(q,ap_stack_sizeW((StgAP_STACK*)q),stp);
2128
2129 case EVACUATED:
2130 /* Already evacuated, just return the forwarding address.
2131 * HOWEVER: if the requested destination generation (evac_gen) is
2132 * older than the actual generation (because the object was
2133 * already evacuated to a younger generation) then we have to
2134 * set the failed_to_evac flag to indicate that we couldn't
2135 * manage to promote the object to the desired generation.
2136 */
2137 /*
2138 * Optimisation: the check is fairly expensive, but we can often
2139 * shortcut it if either the required generation is 0, or the
2140 * current object (the EVACUATED) is in a high enough generation.
2141 * We know that an EVACUATED always points to an object in the
2142 * same or an older generation. stp is the lowest step that the
2143 * current object would be evacuated to, so we only do the full
2144 * check if stp is too low.
2145 */
2146 if (evac_gen > 0 && stp->gen_no < evac_gen) { // optimisation
2147 StgClosure *p = ((StgEvacuated*)q)->evacuee;
2148 if (HEAP_ALLOCED(p) && Bdescr((P_)p)->gen_no < evac_gen) {
2149 failed_to_evac = rtsTrue;
2150 TICK_GC_FAILED_PROMOTION();
2151 }
2152 }
2153 return ((StgEvacuated*)q)->evacuee;
2154
2155 case ARR_WORDS:
2156 // just copy the block
2157 return copy_noscav(q,arr_words_sizeW((StgArrWords *)q),stp);
2158
2159 case MUT_ARR_PTRS_CLEAN:
2160 case MUT_ARR_PTRS_DIRTY:
2161 case MUT_ARR_PTRS_FROZEN:
2162 case MUT_ARR_PTRS_FROZEN0:
2163 // just copy the block
2164 return copy(q,mut_arr_ptrs_sizeW((StgMutArrPtrs *)q),stp);
2165
2166 case TSO:
2167 {
2168 StgTSO *tso = (StgTSO *)q;
2169
2170 /* Deal with redirected TSOs (a TSO that's had its stack enlarged).
2171 */
2172 if (tso->what_next == ThreadRelocated) {
2173 q = (StgClosure *)tso->link;
2174 goto loop;
2175 }
2176
2177 /* To evacuate a small TSO, we need to relocate the update frame
2178 * list it contains.
2179 */
2180 {
2181 StgTSO *new_tso;
2182 StgPtr p, q;
2183
2184 new_tso = (StgTSO *)copyPart((StgClosure *)tso,
2185 tso_sizeW(tso),
2186 sizeofW(StgTSO), stp);
2187 move_TSO(tso, new_tso);
2188 for (p = tso->sp, q = new_tso->sp;
2189 p < tso->stack+tso->stack_size;) {
2190 *q++ = *p++;
2191 }
2192
2193 return (StgClosure *)new_tso;
2194 }
2195 }
2196
2197 #if defined(PAR)
2198 case RBH:
2199 {
2200 //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str);
2201 to = copy(q,BLACKHOLE_sizeW(),stp);
2202 //ToDo: derive size etc from reverted IP
2203 //to = copy(q,size,stp);
2204 debugTrace(DEBUG_gc, "evacuate: RBH %p (%s) to %p (%s)",
2205 q, info_type(q), to, info_type(to));
2206 return to;
2207 }
2208
2209 case BLOCKED_FETCH:
2210 ASSERT(sizeofW(StgBlockedFetch) >= MIN_PAYLOD_SIZE);
2211 to = copy(q,sizeofW(StgBlockedFetch),stp);
2212 debugTrace(DEBUG_gc, "evacuate: %p (%s) to %p (%s)",
2213 q, info_type(q), to, info_type(to));
2214 return to;
2215
2216 # ifdef DIST
2217 case REMOTE_REF:
2218 # endif
2219 case FETCH_ME:
2220 ASSERT(sizeofW(StgBlockedFetch) >= MIN_PAYLOAD_SIZE);
2221 to = copy(q,sizeofW(StgFetchMe),stp);
2222 debugTrace(DEBUG_gc, "evacuate: %p (%s) to %p (%s)",
2223 q, info_type(q), to, info_type(to)));
2224 return to;
2225
2226 case FETCH_ME_BQ:
2227 ASSERT(sizeofW(StgBlockedFetch) >= MIN_PAYLOAD_SIZE);
2228 to = copy(q,sizeofW(StgFetchMeBlockingQueue),stp);
2229 debugTrace(DEBUG_gc, "evacuate: %p (%s) to %p (%s)",
2230 q, info_type(q), to, info_type(to)));
2231 return to;
2232 #endif
2233
2234 case TREC_HEADER:
2235 return copy(q,sizeofW(StgTRecHeader),stp);
2236
2237 case TVAR_WAIT_QUEUE:
2238 return copy(q,sizeofW(StgTVarWaitQueue),stp);
2239
2240 case TVAR:
2241 return copy(q,sizeofW(StgTVar),stp);
2242
2243 case TREC_CHUNK:
2244 return copy(q,sizeofW(StgTRecChunk),stp);
2245
2246 default:
2247 barf("evacuate: strange closure type %d", (int)(info->type));
2248 }
2249
2250 barf("evacuate");
2251 }
2252
2253 /* -----------------------------------------------------------------------------
2254 Evaluate a THUNK_SELECTOR if possible.
2255
2256 returns: NULL if we couldn't evaluate this THUNK_SELECTOR, or
2257 a closure pointer if we evaluated it and this is the result. Note
2258 that "evaluating" the THUNK_SELECTOR doesn't necessarily mean
2259 reducing it to HNF, just that we have eliminated the selection.
2260 The result might be another thunk, or even another THUNK_SELECTOR.
2261
2262 If the return value is non-NULL, the original selector thunk has
2263 been BLACKHOLE'd, and should be updated with an indirection or a
2264 forwarding pointer. If the return value is NULL, then the selector
2265 thunk is unchanged.
2266
2267 ***
2268 ToDo: the treatment of THUNK_SELECTORS could be improved in the
2269 following way (from a suggestion by Ian Lynagh):
2270
2271 We can have a chain like this:
2272
2273 sel_0 --> (a,b)
2274 |
2275 |-----> sel_0 --> (a,b)
2276 |
2277 |-----> sel_0 --> ...
2278
2279 and the depth limit means we don't go all the way to the end of the
2280 chain, which results in a space leak. This affects the recursive
2281 call to evacuate() in the THUNK_SELECTOR case in evacuate(): *not*
2282 the recursive call to eval_thunk_selector() in
2283 eval_thunk_selector().
2284
2285 We could eliminate the depth bound in this case, in the following
2286 way:
2287
2288 - traverse the chain once to discover the *value* of the
2289 THUNK_SELECTOR. Mark all THUNK_SELECTORS that we
2290 visit on the way as having been visited already (somehow).
2291
2292 - in a second pass, traverse the chain again updating all
2293 THUNK_SEELCTORS that we find on the way with indirections to
2294 the value.
2295
2296 - if we encounter a "marked" THUNK_SELECTOR in a normal
2297 evacuate(), we konw it can't be updated so just evac it.
2298
2299 Program that illustrates the problem:
2300
2301 foo [] = ([], [])
2302 foo (x:xs) = let (ys, zs) = foo xs
2303 in if x >= 0 then (x:ys, zs) else (ys, x:zs)
2304
2305 main = bar [1..(100000000::Int)]
2306 bar xs = (\(ys, zs) -> print ys >> print zs) (foo xs)
2307
2308 -------------------------------------------------------------------------- */
2309
2310 static inline rtsBool
2311 is_to_space ( StgClosure *p )
2312 {
2313 bdescr *bd;
2314
2315 bd = Bdescr((StgPtr)p);
2316 if (HEAP_ALLOCED(p) &&
2317 ((bd->flags & BF_EVACUATED)
2318 || ((bd->flags & BF_COMPACTED) &&
2319 is_marked((P_)p,bd)))) {
2320 return rtsTrue;
2321 } else {
2322 return rtsFalse;
2323 }
2324 }
2325
2326 static StgClosure *
2327 eval_thunk_selector( nat field, StgSelector * p )
2328 {
2329 StgInfoTable *info;
2330 const StgInfoTable *info_ptr;
2331 StgClosure *selectee;
2332
2333 selectee = p->selectee;
2334
2335 // Save the real info pointer (NOTE: not the same as get_itbl()).
2336 info_ptr = p->header.info;
2337
2338 // If the THUNK_SELECTOR is in a generation that we are not
2339 // collecting, then bail out early. We won't be able to save any
2340 // space in any case, and updating with an indirection is trickier
2341 // in an old gen.
2342 if (Bdescr((StgPtr)p)->gen_no > N) {
2343 return NULL;
2344 }
2345
2346 // BLACKHOLE the selector thunk, since it is now under evaluation.
2347 // This is important to stop us going into an infinite loop if
2348 // this selector thunk eventually refers to itself.
2349 SET_INFO(p,&stg_BLACKHOLE_info);
2350
2351 selector_loop:
2352
2353 // We don't want to end up in to-space, because this causes
2354 // problems when the GC later tries to evacuate the result of
2355 // eval_thunk_selector(). There are various ways this could
2356 // happen:
2357 //
2358 // 1. following an IND_STATIC
2359 //
2360 // 2. when the old generation is compacted, the mark phase updates
2361 // from-space pointers to be to-space pointers, and we can't
2362 // reliably tell which we're following (eg. from an IND_STATIC).
2363 //
2364 // 3. compacting GC again: if we're looking at a constructor in
2365 // the compacted generation, it might point directly to objects
2366 // in to-space. We must bale out here, otherwise doing the selection
2367 // will result in a to-space pointer being returned.
2368 //
2369 // (1) is dealt with using a BF_EVACUATED test on the
2370 // selectee. (2) and (3): we can tell if we're looking at an
2371 // object in the compacted generation that might point to
2372 // to-space objects by testing that (a) it is BF_COMPACTED, (b)
2373 // the compacted generation is being collected, and (c) the
2374 // object is marked. Only a marked object may have pointers that
2375 // point to to-space objects, because that happens when
2376 // scavenging.
2377 //
2378 // The to-space test is now embodied in the in_to_space() inline
2379 // function, as it is re-used below.
2380 //
2381 if (is_to_space(selectee)) {
2382 goto bale_out;
2383 }
2384
2385 info = get_itbl(selectee);
2386 switch (info->type) {
2387 case CONSTR:
2388 case CONSTR_1_0:
2389 case CONSTR_0_1:
2390 case CONSTR_2_0:
2391 case CONSTR_1_1:
2392 case CONSTR_0_2:
2393 case CONSTR_STATIC:
2394 case CONSTR_NOCAF_STATIC:
2395 // check that the size is in range
2396 ASSERT(field < (StgWord32)(info->layout.payload.ptrs +
2397 info->layout.payload.nptrs));
2398
2399 // Select the right field from the constructor, and check
2400 // that the result isn't in to-space. It might be in
2401 // to-space if, for example, this constructor contains
2402 // pointers to younger-gen objects (and is on the mut-once
2403 // list).
2404 //
2405 {
2406 StgClosure *q;
2407 q = selectee->payload[field];
2408 if (is_to_space(q)) {
2409 goto bale_out;
2410 } else {
2411 return q;
2412 }
2413 }
2414
2415 case IND:
2416 case IND_PERM:
2417 case IND_OLDGEN:
2418 case IND_OLDGEN_PERM:
2419 case IND_STATIC:
2420 selectee = ((StgInd *)selectee)->indirectee;
2421 goto selector_loop;
2422
2423 case EVACUATED:
2424 // We don't follow pointers into to-space; the constructor
2425 // has already been evacuated, so we won't save any space
2426 // leaks by evaluating this selector thunk anyhow.
2427 break;
2428
2429 case THUNK_SELECTOR:
2430 {
2431 StgClosure *val;
2432
2433 // check that we don't recurse too much, re-using the
2434 // depth bound also used in evacuate().
2435 if (thunk_selector_depth >= MAX_THUNK_SELECTOR_DEPTH) {
2436 break;
2437 }
2438 thunk_selector_depth++;
2439
2440 val = eval_thunk_selector(info->layout.selector_offset,
2441 (StgSelector *)selectee);
2442
2443 thunk_selector_depth--;
2444
2445 if (val == NULL) {
2446 break;
2447 } else {
2448 // We evaluated this selector thunk, so update it with
2449 // an indirection. NOTE: we don't use UPD_IND here,
2450 // because we are guaranteed that p is in a generation
2451 // that we are collecting, and we never want to put the
2452 // indirection on a mutable list.
2453 #ifdef PROFILING
2454 // For the purposes of LDV profiling, we have destroyed
2455 // the original selector thunk.
2456 SET_INFO(p, info_ptr);
2457 LDV_RECORD_DEAD_FILL_SLOP_DYNAMIC(selectee);
2458 #endif
2459 ((StgInd *)selectee)->indirectee = val;
2460 SET_INFO(selectee,&stg_IND_info);
2461
2462 // For the purposes of LDV profiling, we have created an
2463 // indirection.
2464 LDV_RECORD_CREATE(selectee);
2465
2466 selectee = val;
2467 goto selector_loop;
2468 }
2469 }
2470
2471 case AP:
2472 case AP_STACK:
2473 case THUNK:
2474 case THUNK_1_0:
2475 case THUNK_0_1:
2476 case THUNK_2_0:
2477 case THUNK_1_1:
2478 case THUNK_0_2:
2479 case THUNK_STATIC:
2480 case CAF_BLACKHOLE:
2481 case SE_CAF_BLACKHOLE:
2482 case SE_BLACKHOLE:
2483 case BLACKHOLE:
2484 #if defined(PAR)
2485 case RBH:
2486 case BLOCKED_FETCH:
2487 # ifdef DIST
2488 case REMOTE_REF:
2489 # endif
2490 case FETCH_ME:
2491 case FETCH_ME_BQ:
2492 #endif
2493 // not evaluated yet
2494 break;
2495
2496 default:
2497 barf("eval_thunk_selector: strange selectee %d",
2498 (int)(info->type));
2499 }
2500
2501 bale_out:
2502 // We didn't manage to evaluate this thunk; restore the old info pointer
2503 SET_INFO(p, info_ptr);
2504 return NULL;
2505 }
2506
2507 /* -----------------------------------------------------------------------------
2508 move_TSO is called to update the TSO structure after it has been
2509 moved from one place to another.
2510 -------------------------------------------------------------------------- */
2511
2512 void
2513 move_TSO (StgTSO *src, StgTSO *dest)
2514 {
2515 ptrdiff_t diff;
2516
2517 // relocate the stack pointer...
2518 diff = (StgPtr)dest - (StgPtr)src; // In *words*
2519 dest->sp = (StgPtr)dest->sp + diff;
2520 }
2521
2522 /* Similar to scavenge_large_bitmap(), but we don't write back the
2523 * pointers we get back from evacuate().
2524 */
2525 static void
2526 scavenge_large_srt_bitmap( StgLargeSRT *large_srt )
2527 {
2528 nat i, b, size;
2529 StgWord bitmap;
2530 StgClosure **p;
2531
2532 b = 0;
2533 bitmap = large_srt->l.bitmap[b];
2534 size = (nat)large_srt->l.size;
2535 p = (StgClosure **)large_srt->srt;
2536 for (i = 0; i < size; ) {
2537 if ((bitmap & 1) != 0) {
2538 evacuate(*p);
2539 }
2540 i++;
2541 p++;
2542 if (i % BITS_IN(W_) == 0) {
2543 b++;
2544 bitmap = large_srt->l.bitmap[b];
2545 } else {
2546 bitmap = bitmap >> 1;
2547 }
2548 }
2549 }
2550
2551 /* evacuate the SRT. If srt_bitmap is zero, then there isn't an
2552 * srt field in the info table. That's ok, because we'll
2553 * never dereference it.
2554 */
2555 STATIC_INLINE void
2556 scavenge_srt (StgClosure **srt, nat srt_bitmap)
2557 {
2558 nat bitmap;
2559 StgClosure **p;
2560
2561 bitmap = srt_bitmap;
2562 p = srt;
2563
2564 if (bitmap == (StgHalfWord)(-1)) {
2565 scavenge_large_srt_bitmap( (StgLargeSRT *)srt );
2566 return;
2567 }
2568
2569 while (bitmap != 0) {
2570 if ((bitmap & 1) != 0) {
2571 #ifdef ENABLE_WIN32_DLL_SUPPORT
2572 // Special-case to handle references to closures hiding out in DLLs, since
2573 // double indirections required to get at those. The code generator knows
2574 // which is which when generating the SRT, so it stores the (indirect)
2575 // reference to the DLL closure in the table by first adding one to it.
2576 // We check for this here, and undo the addition before evacuating it.
2577 //
2578 // If the SRT entry hasn't got bit 0 set, the SRT entry points to a
2579 // closure that's fixed at link-time, and no extra magic is required.
2580 if ( (unsigned long)(*srt) & 0x1 ) {
2581 evacuate(*stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
2582 } else {
2583 evacuate(*p);
2584 }
2585 #else
2586 evacuate(*p);
2587 #endif
2588 }
2589 p++;
2590 bitmap = bitmap >> 1;
2591 }
2592 }
2593
2594
2595 STATIC_INLINE void
2596 scavenge_thunk_srt(const StgInfoTable *info)
2597 {
2598 StgThunkInfoTable *thunk_info;
2599
2600 if (!major_gc) return;
2601
2602 thunk_info = itbl_to_thunk_itbl(info);
2603 scavenge_srt((StgClosure **)GET_SRT(thunk_info), thunk_info->i.srt_bitmap);
2604 }
2605
2606 STATIC_INLINE void
2607 scavenge_fun_srt(const StgInfoTable *info)
2608 {
2609 StgFunInfoTable *fun_info;
2610
2611 if (!major_gc) return;
2612
2613 fun_info = itbl_to_fun_itbl(info);
2614 scavenge_srt((StgClosure **)GET_FUN_SRT(fun_info), fun_info->i.srt_bitmap);
2615 }
2616
2617 /* -----------------------------------------------------------------------------
2618 Scavenge a TSO.
2619 -------------------------------------------------------------------------- */
2620
2621 static void
2622 scavengeTSO (StgTSO *tso)
2623 {
2624 if ( tso->why_blocked == BlockedOnMVar
2625 || tso->why_blocked == BlockedOnBlackHole
2626 || tso->why_blocked == BlockedOnException
2627 #if defined(PAR)
2628 || tso->why_blocked == BlockedOnGA
2629 || tso->why_blocked == BlockedOnGA_NoSend
2630 #endif
2631 ) {
2632 tso->block_info.closure = evacuate(tso->block_info.closure);
2633 }
2634 if ( tso->blocked_exceptions != NULL ) {
2635 tso->blocked_exceptions =
2636 (StgTSO *)evacuate((StgClosure *)tso->blocked_exceptions);
2637 }
2638
2639 // We don't always chase the link field: TSOs on the blackhole
2640 // queue are not automatically alive, so the link field is a
2641 // "weak" pointer in that case.
2642 if (tso->why_blocked != BlockedOnBlackHole) {
2643 tso->link = (StgTSO *)evacuate((StgClosure *)tso->link);
2644 }
2645
2646 // scavange current transaction record
2647 tso->trec = (StgTRecHeader *)evacuate((StgClosure *)tso->trec);
2648
2649 // scavenge this thread's stack
2650 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
2651 }
2652
2653 /* -----------------------------------------------------------------------------
2654 Blocks of function args occur on the stack (at the top) and
2655 in PAPs.
2656 -------------------------------------------------------------------------- */
2657
2658 STATIC_INLINE StgPtr
2659 scavenge_arg_block (StgFunInfoTable *fun_info, StgClosure **args)
2660 {
2661 StgPtr p;
2662 StgWord bitmap;
2663 nat size;
2664
2665 p = (StgPtr)args;
2666 switch (fun_info->f.fun_type) {
2667 case ARG_GEN:
2668 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
2669 size = BITMAP_SIZE(fun_info->f.b.bitmap);
2670 goto small_bitmap;
2671 case ARG_GEN_BIG:
2672 size = GET_FUN_LARGE_BITMAP(fun_info)->size;
2673 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
2674 p += size;
2675 break;
2676 default:
2677 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
2678 size = BITMAP_SIZE(stg_arg_bitmaps[fun_info->f.fun_type]);
2679 small_bitmap:
2680 while (size > 0) {
2681 if ((bitmap & 1) == 0) {
2682 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
2683 }
2684 p++;
2685 bitmap = bitmap >> 1;
2686 size--;
2687 }
2688 break;
2689 }
2690 return p;
2691 }
2692
2693 STATIC_INLINE StgPtr
2694 scavenge_PAP_payload (StgClosure *fun, StgClosure **payload, StgWord size)
2695 {
2696 StgPtr p;
2697 StgWord bitmap;
2698 StgFunInfoTable *fun_info;
2699
2700 fun_info = get_fun_itbl(fun);
2701 ASSERT(fun_info->i.type != PAP);
2702 p = (StgPtr)payload;
2703
2704 switch (fun_info->f.fun_type) {
2705 case ARG_GEN:
2706 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
2707 goto small_bitmap;
2708 case ARG_GEN_BIG:
2709 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
2710 p += size;
2711 break;
2712 case ARG_BCO:
2713 scavenge_large_bitmap((StgPtr)payload, BCO_BITMAP(fun), size);
2714 p += size;
2715 break;
2716 default:
2717 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
2718 small_bitmap:
2719 while (size > 0) {
2720 if ((bitmap & 1) == 0) {
2721 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
2722 }
2723 p++;
2724 bitmap = bitmap >> 1;
2725 size--;
2726 }
2727 break;
2728 }
2729 return p;
2730 }
2731
2732 STATIC_INLINE StgPtr
2733 scavenge_PAP (StgPAP *pap)
2734 {
2735 pap->fun = evacuate(pap->fun);
2736 return scavenge_PAP_payload (pap->fun, pap->payload, pap->n_args);
2737 }
2738
2739 STATIC_INLINE StgPtr
2740 scavenge_AP (StgAP *ap)
2741 {
2742 ap->fun = evacuate(ap->fun);
2743 return scavenge_PAP_payload (ap->fun, ap->payload, ap->n_args);
2744 }
2745
2746 /* -----------------------------------------------------------------------------
2747 Scavenge a given step until there are no more objects in this step
2748 to scavenge.
2749
2750 evac_gen is set by the caller to be either zero (for a step in a
2751 generation < N) or G where G is the generation of the step being
2752 scavenged.
2753
2754 We sometimes temporarily change evac_gen back to zero if we're
2755 scavenging a mutable object where early promotion isn't such a good
2756 idea.
2757 -------------------------------------------------------------------------- */
2758
2759 static void
2760 scavenge(step *stp)
2761 {
2762 StgPtr p, q;
2763 StgInfoTable *info;
2764 bdescr *bd;
2765 nat saved_evac_gen = evac_gen;
2766
2767 p = stp->scan;
2768 bd = stp->scan_bd;
2769
2770 failed_to_evac = rtsFalse;
2771
2772 /* scavenge phase - standard breadth-first scavenging of the
2773 * evacuated objects
2774 */
2775
2776 while (bd != stp->hp_bd || p < stp->hp) {
2777
2778 // If we're at the end of this block, move on to the next block
2779 if (bd != stp->hp_bd && p == bd->free) {
2780 bd = bd->link;
2781 p = bd->start;
2782 continue;
2783 }
2784
2785 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
2786 info = get_itbl((StgClosure *)p);
2787
2788 ASSERT(thunk_selector_depth == 0);
2789
2790 q = p;
2791 switch (info->type) {
2792
2793 case MVAR:
2794 {
2795 StgMVar *mvar = ((StgMVar *)p);
2796 evac_gen = 0;
2797 mvar->head = (StgTSO *)evacuate((StgClosure *)mvar->head);
2798 mvar->tail = (StgTSO *)evacuate((StgClosure *)mvar->tail);
2799 mvar->value = evacuate((StgClosure *)mvar->value);
2800 evac_gen = saved_evac_gen;
2801 failed_to_evac = rtsTrue; // mutable.
2802 p += sizeofW(StgMVar);
2803 break;
2804 }
2805
2806 case FUN_2_0:
2807 scavenge_fun_srt(info);
2808 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
2809 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2810 p += sizeofW(StgHeader) + 2;
2811 break;
2812
2813 case THUNK_2_0:
2814 scavenge_thunk_srt(info);
2815 ((StgThunk *)p)->payload[1] = evacuate(((StgThunk *)p)->payload[1]);
2816 ((StgThunk *)p)->payload[0] = evacuate(((StgThunk *)p)->payload[0]);
2817 p += sizeofW(StgThunk) + 2;
2818 break;
2819
2820 case CONSTR_2_0:
2821 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
2822 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2823 p += sizeofW(StgHeader) + 2;
2824 break;
2825
2826 case THUNK_1_0:
2827 scavenge_thunk_srt(info);
2828 ((StgThunk *)p)->payload[0] = evacuate(((StgThunk *)p)->payload[0]);
2829 p += sizeofW(StgThunk) + 1;
2830 break;
2831
2832 case FUN_1_0:
2833 scavenge_fun_srt(info);
2834 case CONSTR_1_0:
2835 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2836 p += sizeofW(StgHeader) + 1;
2837 break;
2838
2839 case THUNK_0_1:
2840 scavenge_thunk_srt(info);
2841 p += sizeofW(StgThunk) + 1;
2842 break;
2843
2844 case FUN_0_1:
2845 scavenge_fun_srt(info);
2846 case CONSTR_0_1:
2847 p += sizeofW(StgHeader) + 1;
2848 break;
2849
2850 case THUNK_0_2:
2851 scavenge_thunk_srt(info);
2852 p += sizeofW(StgThunk) + 2;
2853 break;
2854
2855 case FUN_0_2:
2856 scavenge_fun_srt(info);
2857 case CONSTR_0_2:
2858 p += sizeofW(StgHeader) + 2;
2859 break;
2860
2861 case THUNK_1_1:
2862 scavenge_thunk_srt(info);
2863 ((StgThunk *)p)->payload[0] = evacuate(((StgThunk *)p)->payload[0]);
2864 p += sizeofW(StgThunk) + 2;
2865 break;
2866
2867 case FUN_1_1:
2868 scavenge_fun_srt(info);
2869 case CONSTR_1_1:
2870 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2871 p += sizeofW(StgHeader) + 2;
2872 break;
2873
2874 case FUN:
2875 scavenge_fun_srt(info);
2876 goto gen_obj;
2877
2878 case THUNK:
2879 {
2880 StgPtr end;
2881
2882 scavenge_thunk_srt(info);
2883 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
2884 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
2885 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
2886 }
2887 p += info->layout.payload.nptrs;
2888 break;
2889 }
2890
2891 gen_obj:
2892 case CONSTR:
2893 case WEAK:
2894 case STABLE_NAME:
2895 {
2896 StgPtr end;
2897
2898 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
2899 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
2900 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
2901 }
2902 p += info->layout.payload.nptrs;
2903 break;
2904 }
2905
2906 case BCO: {
2907 StgBCO *bco = (StgBCO *)p;
2908 bco->instrs = (StgArrWords *)evacuate((StgClosure *)bco->instrs);
2909 bco->literals = (StgArrWords *)evacuate((StgClosure *)bco->literals);
2910 bco->ptrs = (StgMutArrPtrs *)evacuate((StgClosure *)bco->ptrs);
2911 bco->itbls = (StgArrWords *)evacuate((StgClosure *)bco->itbls);
2912 p += bco_sizeW(bco);
2913 break;
2914 }
2915
2916 case IND_PERM:
2917 if (stp->gen->no != 0) {
2918 #ifdef PROFILING
2919 // @LDV profiling
2920 // No need to call LDV_recordDead_FILL_SLOP_DYNAMIC() because an
2921 // IND_OLDGEN_PERM closure is larger than an IND_PERM closure.
2922 LDV_recordDead((StgClosure *)p, sizeofW(StgInd));
2923 #endif
2924 //
2925 // Todo: maybe use SET_HDR() and remove LDV_RECORD_CREATE()?
2926 //
2927 SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info);
2928
2929 // We pretend that p has just been created.
2930 LDV_RECORD_CREATE((StgClosure *)p);
2931 }
2932 // fall through
2933 case IND_OLDGEN_PERM:
2934 ((StgInd *)p)->indirectee = evacuate(((StgInd *)p)->indirectee);
2935 p += sizeofW(StgInd);
2936 break;
2937
2938 case MUT_VAR_CLEAN:
2939 case MUT_VAR_DIRTY: {
2940 rtsBool saved_eager_promotion = eager_promotion;
2941
2942 eager_promotion = rtsFalse;
2943 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2944 eager_promotion = saved_eager_promotion;
2945
2946 if (failed_to_evac) {
2947 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
2948 } else {
2949 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
2950 }
2951 p += sizeofW(StgMutVar);
2952 break;
2953 }
2954
2955 case CAF_BLACKHOLE:
2956 case SE_CAF_BLACKHOLE:
2957 case SE_BLACKHOLE:
2958 case BLACKHOLE:
2959 p += BLACKHOLE_sizeW();
2960 break;
2961
2962 case THUNK_SELECTOR:
2963 {
2964 StgSelector *s = (StgSelector *)p;
2965 s->selectee = evacuate(s->selectee);
2966 p += THUNK_SELECTOR_sizeW();
2967 break;
2968 }
2969
2970 // A chunk of stack saved in a heap object
2971 case AP_STACK:
2972 {
2973 StgAP_STACK *ap = (StgAP_STACK *)p;
2974
2975 ap->fun = evacuate(ap->fun);
2976 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
2977 p = (StgPtr)ap->payload + ap->size;
2978 break;
2979 }
2980
2981 case PAP:
2982 p = scavenge_PAP((StgPAP *)p);
2983 break;
2984
2985 case AP:
2986 p = scavenge_AP((StgAP *)p);
2987 break;
2988
2989 case ARR_WORDS:
2990 // nothing to follow
2991 p += arr_words_sizeW((StgArrWords *)p);
2992 break;
2993
2994 case MUT_ARR_PTRS_CLEAN:
2995 case MUT_ARR_PTRS_DIRTY:
2996 // follow everything
2997 {
2998 StgPtr next;
2999 rtsBool saved_eager;
3000
3001 // We don't eagerly promote objects pointed to by a mutable
3002 // array, but if we find the array only points to objects in
3003 // the same or an older generation, we mark it "clean" and
3004 // avoid traversing it during minor GCs.
3005 saved_eager = eager_promotion;
3006 eager_promotion = rtsFalse;
3007 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
3008 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
3009 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
3010 }
3011 eager_promotion = saved_eager;
3012
3013 if (failed_to_evac) {
3014 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
3015 } else {
3016 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
3017 }
3018
3019 failed_to_evac = rtsTrue; // always put it on the mutable list.
3020 break;
3021 }
3022
3023 case MUT_ARR_PTRS_FROZEN:
3024 case MUT_ARR_PTRS_FROZEN0:
3025 // follow everything
3026 {
3027 StgPtr next;
3028
3029 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
3030 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
3031 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
3032 }
3033
3034 // If we're going to put this object on the mutable list, then
3035 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
3036 if (failed_to_evac) {
3037 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
3038 } else {
3039 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
3040 }
3041 break;
3042 }
3043
3044 case TSO:
3045 {
3046 StgTSO *tso = (StgTSO *)p;
3047 rtsBool saved_eager = eager_promotion;
3048
3049 eager_promotion = rtsFalse;
3050 scavengeTSO(tso);
3051 eager_promotion = saved_eager;
3052
3053 if (failed_to_evac) {
3054 tso->flags |= TSO_DIRTY;
3055 } else {
3056 tso->flags &= ~TSO_DIRTY;
3057 }
3058
3059 failed_to_evac = rtsTrue; // always on the mutable list
3060 p += tso_sizeW(tso);
3061 break;
3062 }
3063
3064 #if defined(PAR)
3065 case RBH:
3066 {
3067 #if 0
3068 nat size, ptrs, nonptrs, vhs;
3069 char str[80];
3070 StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
3071 #endif
3072 StgRBH *rbh = (StgRBH *)p;
3073 (StgClosure *)rbh->blocking_queue =
3074 evacuate((StgClosure *)rbh->blocking_queue);
3075 failed_to_evac = rtsTrue; // mutable anyhow.
3076 debugTrace(DEBUG_gc, "scavenge: RBH %p (%s) (new blocking_queue link=%p)",
3077 p, info_type(p), (StgClosure *)rbh->blocking_queue);
3078 // ToDo: use size of reverted closure here!
3079 p += BLACKHOLE_sizeW();
3080 break;
3081 }
3082
3083 case BLOCKED_FETCH:
3084 {
3085 StgBlockedFetch *bf = (StgBlockedFetch *)p;
3086 // follow the pointer to the node which is being demanded
3087 (StgClosure *)bf->node =
3088 evacuate((StgClosure *)bf->node);
3089 // follow the link to the rest of the blocking queue
3090 (StgClosure *)bf->link =
3091 evacuate((StgClosure *)bf->link);
3092 debugTrace(DEBUG_gc, "scavenge: %p (%s); node is now %p; exciting, isn't it",
3093 bf, info_type((StgClosure *)bf),
3094 bf->node, info_type(bf->node)));
3095 p += sizeofW(StgBlockedFetch);
3096 break;
3097 }
3098
3099 #ifdef DIST
3100 case REMOTE_REF:
3101 #endif
3102 case FETCH_ME:
3103 p += sizeofW(StgFetchMe);
3104 break; // nothing to do in this case
3105
3106 case FETCH_ME_BQ:
3107 {
3108 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
3109 (StgClosure *)fmbq->blocking_queue =
3110 evacuate((StgClosure *)fmbq->blocking_queue);
3111 debugTrace(DEBUG_gc, "scavenge: %p (%s) exciting, isn't it",
3112 p, info_type((StgClosure *)p)));
3113 p += sizeofW(StgFetchMeBlockingQueue);
3114 break;
3115 }
3116 #endif
3117
3118 case TVAR_WAIT_QUEUE:
3119 {
3120 StgTVarWaitQueue *wq = ((StgTVarWaitQueue *) p);
3121 evac_gen = 0;
3122 wq->waiting_tso = (StgTSO *)evacuate((StgClosure*)wq->waiting_tso);
3123 wq->next_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)wq->next_queue_entry);
3124 wq->prev_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)wq->prev_queue_entry);
3125 evac_gen = saved_evac_gen;
3126 failed_to_evac = rtsTrue; // mutable
3127 p += sizeofW(StgTVarWaitQueue);
3128 break;
3129 }
3130
3131 case TVAR:
3132 {
3133 StgTVar *tvar = ((StgTVar *) p);
3134 evac_gen = 0;
3135 tvar->current_value = evacuate((StgClosure*)tvar->current_value);
3136 tvar->first_wait_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)tvar->first_wait_queue_entry);
3137 evac_gen = saved_evac_gen;
3138 failed_to_evac = rtsTrue; // mutable
3139 p += sizeofW(StgTVar);
3140 break;
3141 }
3142
3143 case TREC_HEADER:
3144 {
3145 StgTRecHeader *trec = ((StgTRecHeader *) p);
3146 evac_gen = 0;
3147 trec->enclosing_trec = (StgTRecHeader *)evacuate((StgClosure*)trec->enclosing_trec);
3148 trec->current_chunk = (StgTRecChunk *)evacuate((StgClosure*)trec->current_chunk);
3149 evac_gen = saved_evac_gen;
3150 failed_to_evac = rtsTrue; // mutable
3151 p += sizeofW(StgTRecHeader);
3152 break;
3153 }
3154
3155 case TREC_CHUNK:
3156 {
3157 StgWord i;
3158 StgTRecChunk *tc = ((StgTRecChunk *) p);
3159 TRecEntry *e = &(tc -> entries[0]);
3160 evac_gen = 0;
3161 tc->prev_chunk = (StgTRecChunk *)evacuate((StgClosure*)tc->prev_chunk);
3162 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
3163 e->tvar = (StgTVar *)evacuate((StgClosure*)e->tvar);
3164 e->expected_value = evacuate((StgClosure*)e->expected_value);
3165 e->new_value = evacuate((StgClosure*)e->new_value);
3166 }
3167 evac_gen = saved_evac_gen;
3168 failed_to_evac = rtsTrue; // mutable
3169 p += sizeofW(StgTRecChunk);
3170 break;
3171 }
3172
3173 default:
3174 barf("scavenge: unimplemented/strange closure type %d @ %p",
3175 info->type, p);
3176 }
3177
3178 /*
3179 * We need to record the current object on the mutable list if
3180 * (a) It is actually mutable, or
3181 * (b) It contains pointers to a younger generation.
3182 * Case (b) arises if we didn't manage to promote everything that
3183 * the current object points to into the current generation.
3184 */
3185 if (failed_to_evac) {
3186 failed_to_evac = rtsFalse;
3187 if (stp->gen_no > 0) {
3188 recordMutableGen((StgClosure *)q, stp->gen);
3189 }
3190 }
3191 }
3192
3193 stp->scan_bd = bd;
3194 stp->scan = p;
3195 }
3196
3197 /* -----------------------------------------------------------------------------
3198 Scavenge everything on the mark stack.
3199
3200 This is slightly different from scavenge():
3201 - we don't walk linearly through the objects, so the scavenger
3202 doesn't need to advance the pointer on to the next object.
3203 -------------------------------------------------------------------------- */
3204
3205 static void
3206 scavenge_mark_stack(void)
3207 {
3208 StgPtr p, q;
3209 StgInfoTable *info;
3210 nat saved_evac_gen;
3211
3212 evac_gen = oldest_gen->no;
3213 saved_evac_gen = evac_gen;
3214
3215 linear_scan:
3216 while (!mark_stack_empty()) {
3217 p = pop_mark_stack();
3218
3219 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
3220 info = get_itbl((StgClosure *)p);
3221
3222 q = p;
3223 switch (info->type) {
3224
3225 case MVAR:
3226 {
3227 StgMVar *mvar = ((StgMVar *)p);
3228 evac_gen = 0;
3229 mvar->head = (StgTSO *)evacuate((StgClosure *)mvar->head);
3230 mvar->tail = (StgTSO *)evacuate((StgClosure *)mvar->tail);
3231 mvar->value = evacuate((StgClosure *)mvar->value);
3232 evac_gen = saved_evac_gen;
3233 failed_to_evac = rtsTrue; // mutable.
3234 break;
3235 }
3236
3237 case FUN_2_0:
3238 scavenge_fun_srt(info);
3239 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
3240 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
3241 break;
3242
3243 case THUNK_2_0:
3244 scavenge_thunk_srt(info);
3245 ((StgThunk *)p)->payload[1] = evacuate(((StgThunk *)p)->payload[1]);
3246 ((StgThunk *)p)->payload[0] = evacuate(((StgThunk *)p)->payload[0]);
3247 break;
3248
3249 case CONSTR_2_0:
3250 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
3251 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
3252 break;
3253
3254 case FUN_1_0:
3255 case FUN_1_1:
3256 scavenge_fun_srt(info);
3257 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
3258 break;
3259
3260 case THUNK_1_0:
3261 case THUNK_1_1:
3262 scavenge_thunk_srt(info);
3263 ((StgThunk *)p)->payload[0] = evacuate(((StgThunk *)p)->payload[0]);
3264 break;
3265
3266 case CONSTR_1_0:
3267 case CONSTR_1_1:
3268 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
3269 break;
3270
3271 case FUN_0_1:
3272 case FUN_0_2:
3273 scavenge_fun_srt(info);
3274 break;
3275
3276 case THUNK_0_1:
3277 case THUNK_0_2:
3278 scavenge_thunk_srt(info);
3279 break;
3280
3281 case CONSTR_0_1:
3282 case CONSTR_0_2:
3283 break;
3284
3285 case FUN:
3286 scavenge_fun_srt(info);
3287 goto gen_obj;
3288
3289 case THUNK:
3290 {
3291 StgPtr end;
3292
3293 scavenge_thunk_srt(info);
3294 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
3295 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
3296 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
3297 }
3298 break;
3299 }
3300
3301 gen_obj:
3302 case CONSTR:
3303 case WEAK:
3304 case STABLE_NAME:
3305 {
3306 StgPtr end;
3307
3308 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
3309 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
3310 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
3311 }
3312 break;
3313 }
3314
3315 case BCO: {
3316 StgBCO *bco = (StgBCO *)p;
3317 bco->instrs = (StgArrWords *)evacuate((StgClosure *)bco->instrs);
3318 bco->literals = (StgArrWords *)evacuate((StgClosure *)bco->literals);
3319 bco->ptrs = (StgMutArrPtrs *)evacuate((StgClosure *)bco->ptrs);
3320 bco->itbls = (StgArrWords *)evacuate((StgClosure *)bco->itbls);
3321 break;
3322 }
3323
3324 case IND_PERM:
3325 // don't need to do anything here: the only possible case
3326 // is that we're in a 1-space compacting collector, with
3327 // no "old" generation.
3328 break;
3329
3330 case IND_OLDGEN:
3331 case IND_OLDGEN_PERM:
3332 ((StgInd *)p)->indirectee =
3333 evacuate(((StgInd *)p)->indirectee);
3334 break;
3335
3336 case MUT_VAR_CLEAN:
3337 case MUT_VAR_DIRTY: {
3338 rtsBool saved_eager_promotion = eager_promotion;
3339
3340 eager_promotion = rtsFalse;
3341 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
3342 eager_promotion = saved_eager_promotion;
3343
3344 if (failed_to_evac) {
3345 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
3346 } else {
3347 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
3348 }
3349 break;
3350 }
3351
3352 case CAF_BLACKHOLE:
3353 case SE_CAF_BLACKHOLE:
3354 case SE_BLACKHOLE:
3355 case BLACKHOLE:
3356 case ARR_WORDS:
3357 break;
3358
3359 case THUNK_SELECTOR:
3360 {
3361 StgSelector *s = (StgSelector *)p;
3362 s->selectee = evacuate(s->selectee);
3363 break;
3364 }
3365
3366 // A chunk of stack saved in a heap object
3367 case AP_STACK:
3368 {
3369 StgAP_STACK *ap = (StgAP_STACK *)p;
3370
3371 ap->fun = evacuate(ap->fun);
3372 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
3373 break;
3374 }
3375
3376 case PAP:
3377 scavenge_PAP((StgPAP *)p);
3378 break;
3379
3380 case AP:
3381 scavenge_AP((StgAP *)p);
3382 break;
3383
3384 case MUT_ARR_PTRS_CLEAN:
3385 case MUT_ARR_PTRS_DIRTY:
3386 // follow everything
3387 {
3388 StgPtr next;
3389 rtsBool saved_eager;
3390
3391 // We don't eagerly promote objects pointed to by a mutable
3392 // array, but if we find the array only points to objects in
3393 // the same or an older generation, we mark it "clean" and
3394 // avoid traversing it during minor GCs.
3395 saved_eager = eager_promotion;
3396 eager_promotion = rtsFalse;
3397 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
3398 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
3399 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
3400 }
3401 eager_promotion = saved_eager;
3402
3403 if (failed_to_evac) {
3404 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
3405 } else {
3406 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
3407 }
3408
3409 failed_to_evac = rtsTrue; // mutable anyhow.
3410 break;
3411 }
3412
3413 case MUT_ARR_PTRS_FROZEN:
3414 case MUT_ARR_PTRS_FROZEN0:
3415 // follow everything
3416 {
3417 StgPtr next, q = p;
3418
3419 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
3420 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
3421 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
3422 }
3423
3424 // If we're going to put this object on the mutable list, then
3425 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
3426 if (failed_to_evac) {
3427 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
3428 } else {
3429 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
3430 }
3431 break;
3432 }
3433
3434 case TSO:
3435 {
3436 StgTSO *tso = (StgTSO *)p;
3437 rtsBool saved_eager = eager_promotion;
3438
3439 eager_promotion = rtsFalse;
3440 scavengeTSO(tso);
3441 eager_promotion = saved_eager;
3442
3443 if (failed_to_evac) {
3444 tso->flags |= TSO_DIRTY;
3445 } else {
3446 tso->flags &= ~TSO_DIRTY;
3447 }
3448
3449 failed_to_evac = rtsTrue; // always on the mutable list
3450 break;
3451 }
3452
3453 #if defined(PAR)
3454 case RBH:
3455 {
3456 #if 0
3457 nat size, ptrs, nonptrs, vhs;
3458 char str[80];
3459 StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
3460 #endif
3461 StgRBH *rbh = (StgRBH *)p;
3462 bh->blocking_queue =
3463 (StgTSO *)evacuate((StgClosure *)bh->blocking_queue);
3464 failed_to_evac = rtsTrue; // mutable anyhow.
3465 debugTrace(DEBUG_gc, "scavenge: RBH %p (%s) (new blocking_queue link=%p)",
3466 p, info_type(p), (StgClosure *)rbh->blocking_queue));
3467 break;
3468 }
3469
3470 case BLOCKED_FETCH:
3471 {
3472 StgBlockedFetch *bf = (StgBlockedFetch *)p;
3473 // follow the pointer to the node which is being demanded
3474 (StgClosure *)bf->node =
3475 evacuate((StgClosure *)bf->node);
3476 // follow the link to the rest of the blocking queue
3477 (StgClosure *)bf->link =
3478 evacuate((StgClosure *)bf->link);
3479 debugTrace(DEBUG_gc, "scavenge: %p (%s); node is now %p; exciting, isn't it",
3480 bf, info_type((StgClosure *)bf),
3481 bf->node, info_type(bf->node)));
3482 break;
3483 }
3484
3485 #ifdef DIST
3486 case REMOTE_REF:
3487 #endif
3488 case FETCH_ME:
3489 break; // nothing to do in this case
3490
3491 case FETCH_ME_BQ:
3492 {
3493 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
3494 (StgClosure *)fmbq->blocking_queue =
3495 evacuate((StgClosure *)fmbq->blocking_queue);
3496 debugTrace(DEBUG_gc, "scavenge: %p (%s) exciting, isn't it",
3497 p, info_type((StgClosure *)p)));
3498 break;
3499 }
3500 #endif /* PAR */
3501
3502 case TVAR_WAIT_QUEUE:
3503 {
3504 StgTVarWaitQueue *wq = ((StgTVarWaitQueue *) p);
3505 evac_gen = 0;
3506 wq->waiting_tso = (StgTSO *)evacuate((StgClosure*)wq->waiting_tso);
3507 wq->next_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)wq->next_queue_entry);
3508 wq->prev_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)wq->prev_queue_entry);
3509 evac_gen = saved_evac_gen;
3510 failed_to_evac = rtsTrue; // mutable
3511 break;
3512 }
3513
3514 case TVAR:
3515 {
3516 StgTVar *tvar = ((StgTVar *) p);
3517 evac_gen = 0;
3518 tvar->current_value = evacuate((StgClosure*)tvar->current_value);
3519 tvar->first_wait_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)tvar->first_wait_queue_entry);
3520 evac_gen = saved_evac_gen;
3521 failed_to_evac = rtsTrue; // mutable
3522 break;
3523 }
3524
3525 case TREC_CHUNK:
3526 {
3527 StgWord i;
3528 StgTRecChunk *tc = ((StgTRecChunk *) p);
3529 TRecEntry *e = &(tc -> entries[0]);
3530 evac_gen = 0;
3531 tc->prev_chunk = (StgTRecChunk *)evacuate((StgClosure*)tc->prev_chunk);
3532 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
3533 e->tvar = (StgTVar *)evacuate((StgClosure*)e->tvar);
3534 e->expected_value = evacuate((StgClosure*)e->expected_value);
3535 e->new_value = evacuate((StgClosure*)e->new_value);
3536 }
3537 evac_gen = saved_evac_gen;
3538 failed_to_evac = rtsTrue; // mutable
3539 break;
3540 }
3541
3542 case TREC_HEADER:
3543 {
3544 StgTRecHeader *trec = ((StgTRecHeader *) p);
3545 evac_gen = 0;
3546 trec->enclosing_trec = (StgTRecHeader *)evacuate((StgClosure*)trec->enclosing_trec);
3547 trec->current_chunk = (StgTRecChunk *)evacuate((StgClosure*)trec->current_chunk);
3548 evac_gen = saved_evac_gen;
3549 failed_to_evac = rtsTrue; // mutable
3550 break;
3551 }
3552
3553 default:
3554 barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p",
3555 info->type, p);
3556 }
3557
3558 if (failed_to_evac) {
3559 failed_to_evac = rtsFalse;
3560 if (evac_gen > 0) {
3561 recordMutableGen((StgClosure *)q, &generations[evac_gen]);
3562 }
3563 }
3564
3565 // mark the next bit to indicate "scavenged"
3566 mark(q+1, Bdescr(q));
3567
3568 } // while (!mark_stack_empty())
3569
3570 // start a new linear scan if the mark stack overflowed at some point
3571 if (mark_stack_overflowed && oldgen_scan_bd == NULL) {
3572 debugTrace(DEBUG_gc, "scavenge_mark_stack: starting linear scan");
3573 mark_stack_overflowed = rtsFalse;
3574 oldgen_scan_bd = oldest_gen->steps[0].old_blocks;
3575 oldgen_scan = oldgen_scan_bd->start;
3576 }
3577
3578 if (oldgen_scan_bd) {
3579 // push a new thing on the mark stack
3580 loop:
3581 // find a closure that is marked but not scavenged, and start
3582 // from there.
3583 while (oldgen_scan < oldgen_scan_bd->free
3584 && !is_marked(oldgen_scan,oldgen_scan_bd)) {
3585 oldgen_scan++;
3586 }
3587
3588 if (oldgen_scan < oldgen_scan_bd->free) {
3589
3590 // already scavenged?
3591 if (is_marked(oldgen_scan+1,oldgen_scan_bd)) {
3592 oldgen_scan += sizeofW(StgHeader) + MIN_PAYLOAD_SIZE;
3593 goto loop;
3594 }
3595 push_mark_stack(oldgen_scan);
3596 // ToDo: bump the linear scan by the actual size of the object
3597 oldgen_scan += sizeofW(StgHeader) + MIN_PAYLOAD_SIZE;
3598 goto linear_scan;
3599 }
3600
3601 oldgen_scan_bd = oldgen_scan_bd->link;
3602 if (oldgen_scan_bd != NULL) {
3603 oldgen_scan = oldgen_scan_bd->start;
3604 goto loop;
3605 }
3606 }
3607 }
3608
3609 /* -----------------------------------------------------------------------------
3610 Scavenge one object.
3611
3612 This is used for objects that are temporarily marked as mutable
3613 because they contain old-to-new generation pointers. Only certain
3614 objects can have this property.
3615 -------------------------------------------------------------------------- */
3616
3617 static rtsBool
3618 scavenge_one(StgPtr p)
3619 {
3620 const StgInfoTable *info;
3621 nat saved_evac_gen = evac_gen;
3622 rtsBool no_luck;
3623
3624 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
3625 info = get_itbl((StgClosure *)p);
3626
3627 switch (info->type) {
3628
3629 case MVAR:
3630 {
3631 StgMVar *mvar = ((StgMVar *)p);
3632 evac_gen = 0;
3633 mvar->head = (StgTSO *)evacuate((StgClosure *)mvar->head);
3634 mvar->tail = (StgTSO *)evacuate((StgClosure *)mvar->tail);
3635 mvar->value = evacuate((StgClosure *)mvar->value);
3636 evac_gen = saved_evac_gen;
3637 failed_to_evac = rtsTrue; // mutable.
3638 break;
3639 }
3640
3641 case THUNK:
3642 case THUNK_1_0:
3643 case THUNK_0_1:
3644 case THUNK_1_1:
3645 case THUNK_0_2:
3646 case THUNK_2_0:
3647 {
3648 StgPtr q, end;
3649
3650 end = (StgPtr)((StgThunk *)p)->payload + info->layout.payload.ptrs;
3651 for (q = (StgPtr)((StgThunk *)p)->payload; q < end; q++) {
3652 *q = (StgWord)(StgPtr)evacuate((StgClosure *)*q);
3653 }
3654 break;
3655 }
3656
3657 case FUN:
3658 case FUN_1_0: // hardly worth specialising these guys
3659 case FUN_0_1:
3660 case FUN_1_1:
3661 case FUN_0_2:
3662 case FUN_2_0:
3663 case CONSTR:
3664 case CONSTR_1_0:
3665 case CONSTR_0_1:
3666 case CONSTR_1_1:
3667 case CONSTR_0_2:
3668 case CONSTR_2_0:
3669 case WEAK:
3670 case IND_PERM:
3671 {
3672 StgPtr q, end;
3673
3674 end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs;
3675 for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) {
3676 *q = (StgWord)(StgPtr)evacuate((StgClosure *)*q);
3677 }
3678 break;
3679 }
3680
3681 case MUT_VAR_CLEAN:
3682 case MUT_VAR_DIRTY: {
3683 StgPtr q = p;
3684 rtsBool saved_eager_promotion = eager_promotion;
3685
3686 eager_promotion = rtsFalse;
3687 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
3688 eager_promotion = saved_eager_promotion;
3689
3690 if (failed_to_evac) {
3691 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
3692 } else {
3693 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
3694 }
3695 break;
3696 }
3697
3698 case CAF_BLACKHOLE:
3699 case SE_CAF_BLACKHOLE:
3700 case SE_BLACKHOLE:
3701 case BLACKHOLE:
3702 break;
3703
3704 case THUNK_SELECTOR:
3705 {
3706 StgSelector *s = (StgSelector *)p;
3707 s->selectee = evacuate(s->selectee);
3708 break;
3709 }
3710
3711 case AP_STACK:
3712 {
3713 StgAP_STACK *ap = (StgAP_STACK *)p;
3714
3715 ap->fun = evacuate(ap->fun);
3716 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
3717 p = (StgPtr)ap->payload + ap->size;
3718 break;
3719 }
3720
3721 case PAP:
3722 p = scavenge_PAP((StgPAP *)p);
3723 break;
3724
3725 case AP:
3726 p = scavenge_AP((StgAP *)p);
3727 break;
3728
3729 case ARR_WORDS:
3730 // nothing to follow
3731 break;
3732
3733 case MUT_ARR_PTRS_CLEAN:
3734 case MUT_ARR_PTRS_DIRTY:
3735 {
3736 StgPtr next, q;
3737 rtsBool saved_eager;
3738
3739 // We don't eagerly promote objects pointed to by a mutable
3740 // array, but if we find the array only points to objects in
3741 // the same or an older generation, we mark it "clean" and
3742 // avoid traversing it during minor GCs.
3743 saved_eager = eager_promotion;
3744 eager_promotion = rtsFalse;
3745 q = p;
3746 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
3747 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
3748 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
3749 }
3750 eager_promotion = saved_eager;
3751
3752 if (failed_to_evac) {
3753 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
3754 } else {
3755 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
3756 }
3757
3758 failed_to_evac = rtsTrue;
3759 break;
3760 }
3761
3762 case MUT_ARR_PTRS_FROZEN:
3763 case MUT_ARR_PTRS_FROZEN0:
3764 {
3765 // follow everything
3766 StgPtr next, q=p;
3767
3768 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
3769 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
3770 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
3771 }
3772
3773 // If we're going to put this object on the mutable list, then
3774 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
3775 if (failed_to_evac) {
3776 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
3777 } else {
3778 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
3779 }
3780 break;
3781 }
3782
3783 case TSO:
3784 {
3785 StgTSO *tso = (StgTSO *)p;
3786 rtsBool saved_eager = eager_promotion;
3787
3788 eager_promotion = rtsFalse;
3789 scavengeTSO(tso);
3790 eager_promotion = saved_eager;
3791
3792 if (failed_to_evac) {
3793 tso->flags |= TSO_DIRTY;
3794 } else {
3795 tso->flags &= ~TSO_DIRTY;
3796 }
3797
3798 failed_to_evac = rtsTrue; // always on the mutable list
3799 break;
3800 }
3801
3802 #if defined(PAR)
3803 case RBH:
3804 {
3805 #if 0
3806 nat size, ptrs, nonptrs, vhs;
3807 char str[80];
3808 StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
3809 #endif
3810 StgRBH *rbh = (StgRBH *)p;
3811 (StgClosure *)rbh->blocking_queue =
3812 evacuate((StgClosure *)rbh->blocking_queue);
3813 failed_to_evac = rtsTrue; // mutable anyhow.
3814 debugTrace(DEBUG_gc, "scavenge: RBH %p (%s) (new blocking_queue link=%p)",
3815 p, info_type(p), (StgClosure *)rbh->blocking_queue));
3816 // ToDo: use size of reverted closure here!
3817 break;
3818 }
3819
3820 case BLOCKED_FETCH:
3821 {
3822 StgBlockedFetch *bf = (StgBlockedFetch *)p;
3823 // follow the pointer to the node which is being demanded
3824 (StgClosure *)bf->node =
3825 evacuate((StgClosure *)bf->node);
3826 // follow the link to the rest of the blocking queue
3827 (StgClosure *)bf->link =
3828 evacuate((StgClosure *)bf->link);
3829 debugTrace(DEBUG_gc,
3830 "scavenge: %p (%s); node is now %p; exciting, isn't it",
3831 bf, info_type((StgClosure *)bf),
3832 bf->node, info_type(bf->node)));
3833 break;
3834 }
3835
3836 #ifdef DIST
3837 case REMOTE_REF:
3838 #endif
3839 case FETCH_ME:
3840 break; // nothing to do in this case
3841
3842 case FETCH_ME_BQ:
3843 {
3844 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
3845 (StgClosure *)fmbq->blocking_queue =
3846 evacuate((StgClosure *)fmbq->blocking_queue);
3847 debugTrace(DEBUG_gc, "scavenge: %p (%s) exciting, isn't it",
3848 p, info_type((StgClosure *)p)));
3849 break;
3850 }
3851 #endif
3852
3853 case TVAR_WAIT_QUEUE:
3854 {
3855 StgTVarWaitQueue *wq = ((StgTVarWaitQueue *) p);
3856 evac_gen = 0;
3857 wq->waiting_tso = (StgTSO *)evacuate((StgClosure*)wq->waiting_tso);
3858 wq->next_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)wq->next_queue_entry);
3859 wq->prev_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)wq->prev_queue_entry);
3860 evac_gen = saved_evac_gen;
3861 failed_to_evac = rtsTrue; // mutable
3862 break;
3863 }
3864
3865 case TVAR:
3866 {
3867 StgTVar *tvar = ((StgTVar *) p);
3868 evac_gen = 0;
3869 tvar->current_value = evacuate((StgClosure*)tvar->current_value);
3870 tvar->first_wait_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)tvar->first_wait_queue_entry);
3871 evac_gen = saved_evac_gen;
3872 failed_to_evac = rtsTrue; // mutable
3873 break;
3874 }
3875
3876 case TREC_HEADER:
3877 {
3878 StgTRecHeader *trec = ((StgTRecHeader *) p);
3879 evac_gen = 0;
3880 trec->enclosing_trec = (StgTRecHeader *)evacuate((StgClosure*)trec->enclosing_trec);
3881 trec->current_chunk = (StgTRecChunk *)evacuate((StgClosure*)trec->current_chunk);
3882 evac_gen = saved_evac_gen;
3883 failed_to_evac = rtsTrue; // mutable
3884 break;
3885 }
3886
3887 case TREC_CHUNK:
3888 {
3889 StgWord i;
3890 StgTRecChunk *tc = ((StgTRecChunk *) p);
3891 TRecEntry *e = &(tc -> entries[0]);
3892 evac_gen = 0;
3893 tc->prev_chunk = (StgTRecChunk *)evacuate((StgClosure*)tc->prev_chunk);
3894 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
3895 e->tvar = (StgTVar *)evacuate((StgClosure*)e->tvar);
3896 e->expected_value = evacuate((StgClosure*)e->expected_value);
3897 e->new_value = evacuate((StgClosure*)e->new_value);
3898 }
3899 evac_gen = saved_evac_gen;
3900 failed_to_evac = rtsTrue; // mutable
3901 break;
3902 }
3903
3904 case IND_OLDGEN:
3905 case IND_OLDGEN_PERM:
3906 case IND_STATIC:
3907 {
3908 /* Careful here: a THUNK can be on the mutable list because
3909 * it contains pointers to young gen objects. If such a thunk
3910 * is updated, the IND_OLDGEN will be added to the mutable
3911 * list again, and we'll scavenge it twice. evacuate()
3912 * doesn't check whether the object has already been
3913 * evacuated, so we perform that check here.
3914 */
3915 StgClosure *q = ((StgInd *)p)->indirectee;
3916 if (HEAP_ALLOCED(q) && Bdescr((StgPtr)q)->flags & BF_EVACUATED) {
3917 break;
3918 }
3919 ((StgInd *)p)->indirectee = evacuate(q);
3920 }
3921
3922 #if 0 && defined(DEBUG)
3923 if (RtsFlags.DebugFlags.gc)
3924 /* Debugging code to print out the size of the thing we just
3925 * promoted
3926 */
3927 {
3928 StgPtr start = gen->steps[0].scan;
3929 bdescr *start_bd = gen->steps[0].scan_bd;
3930 nat size = 0;
3931 scavenge(&gen->steps[0]);
3932 if (start_bd != gen->steps[0].scan_bd) {
3933 size += (P_)BLOCK_ROUND_UP(start) - start;
3934 start_bd = start_bd->link;
3935 while (start_bd != gen->steps[0].scan_bd) {
3936 size += BLOCK_SIZE_W;
3937 start_bd = start_bd->link;
3938 }
3939 size += gen->steps[0].scan -
3940 (P_)BLOCK_ROUND_DOWN(gen->steps[0].scan);
3941 } else {
3942 size = gen->steps[0].scan - start;
3943 }
3944 debugBelch("evac IND_OLDGEN: %ld bytes", size * sizeof(W_));
3945 }
3946 #endif
3947 break;
3948
3949 default:
3950 barf("scavenge_one: strange object %d", (int)(info->type));
3951 }
3952
3953 no_luck = failed_to_evac;
3954 failed_to_evac = rtsFalse;
3955 return (no_luck);
3956 }
3957
3958 /* -----------------------------------------------------------------------------
3959 Scavenging mutable lists.
3960
3961 We treat the mutable list of each generation > N (i.e. all the
3962 generations older than the one being collected) as roots. We also
3963 remove non-mutable objects from the mutable list at this point.
3964 -------------------------------------------------------------------------- */
3965
3966 static void
3967 scavenge_mutable_list(generation *gen)
3968 {
3969 bdescr *bd;
3970 StgPtr p, q;
3971
3972 bd = gen->saved_mut_list;
3973
3974 evac_gen = gen->no;
3975 for (; bd != NULL; bd = bd->link) {
3976 for (q = bd->start; q < bd->free; q++) {
3977 p = (StgPtr)*q;
3978 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
3979
3980 #ifdef DEBUG
3981 switch (get_itbl((StgClosure *)p)->type) {
3982 case MUT_VAR_CLEAN:
3983 barf("MUT_VAR_CLEAN on mutable list");
3984 case MUT_VAR_DIRTY:
3985 mutlist_MUTVARS++; break;
3986 case MUT_ARR_PTRS_CLEAN:
3987 case MUT_ARR_PTRS_DIRTY:
3988 case MUT_ARR_PTRS_FROZEN:
3989 case MUT_ARR_PTRS_FROZEN0:
3990 mutlist_MUTARRS++; break;
3991 default:
3992 mutlist_OTHERS++; break;
3993 }
3994 #endif
3995
3996 // Check whether this object is "clean", that is it
3997 // definitely doesn't point into a young generation.
3998 // Clean objects don't need to be scavenged. Some clean
3999 // objects (MUT_VAR_CLEAN) are not kept on the mutable
4000 // list at all; others, such as MUT_ARR_PTRS_CLEAN and
4001 // TSO, are always on the mutable list.
4002 //
4003 switch (get_itbl((StgClosure *)p)->type) {
4004 case MUT_ARR_PTRS_CLEAN:
4005 recordMutableGen((StgClosure *)p,gen);
4006 continue;
4007 case TSO: {
4008 StgTSO *tso = (StgTSO *)p;
4009 if ((tso->flags & TSO_DIRTY) == 0) {
4010 // A clean TSO: we don't have to traverse its
4011 // stack. However, we *do* follow the link field:
4012 // we don't want to have to mark a TSO dirty just
4013 // because we put it on a different queue.
4014 if (tso->why_blocked != BlockedOnBlackHole) {
4015 tso->link = (StgTSO *)evacuate((StgClosure *)tso->link);
4016 }
4017 recordMutableGen((StgClosure *)p,gen);
4018 continue;
4019 }
4020 }
4021 default:
4022 ;
4023 }
4024
4025 if (scavenge_one(p)) {
4026 // didn't manage to promote everything, so put the
4027 // object back on the list.
4028 recordMutableGen((StgClosure *)p,gen);
4029 }
4030 }
4031 }
4032
4033 // free the old mut_list
4034 freeChain(gen->saved_mut_list);
4035 gen->saved_mut_list = NULL;
4036 }
4037
4038
4039 static void
4040 scavenge_static(void)
4041 {
4042 StgClosure* p = static_objects;
4043 const StgInfoTable *info;
4044
4045 /* Always evacuate straight to the oldest generation for static
4046 * objects */
4047 evac_gen = oldest_gen->no;
4048
4049 /* keep going until we've scavenged all the objects on the linked
4050 list... */
4051 while (p != END_OF_STATIC_LIST) {
4052
4053 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
4054 info = get_itbl(p);
4055 /*
4056 if (info->type==RBH)
4057 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
4058 */
4059 // make sure the info pointer is into text space
4060
4061 /* Take this object *off* the static_objects list,
4062 * and put it on the scavenged_static_objects list.
4063 */
4064 static_objects = *STATIC_LINK(info,p);
4065 *STATIC_LINK(info,p) = scavenged_static_objects;
4066 scavenged_static_objects = p;
4067
4068 switch (info -> type) {
4069
4070 case IND_STATIC:
4071 {
4072 StgInd *ind = (StgInd *)p;
4073 ind->indirectee = evacuate(ind->indirectee);
4074
4075 /* might fail to evacuate it, in which case we have to pop it
4076 * back on the mutable list of the oldest generation. We
4077 * leave it *on* the scavenged_static_objects list, though,
4078 * in case we visit this object again.
4079 */
4080 if (failed_to_evac) {
4081 failed_to_evac = rtsFalse;
4082 recordMutableGen((StgClosure *)p,oldest_gen);
4083 }
4084 break;
4085 }
4086
4087 case THUNK_STATIC:
4088 scavenge_thunk_srt(info);
4089 break;
4090
4091 case FUN_STATIC:
4092 scavenge_fun_srt(info);
4093 break;
4094
4095 case CONSTR_STATIC:
4096 {
4097 StgPtr q, next;
4098
4099 next = (P_)p->payload + info->layout.payload.ptrs;
4100 // evacuate the pointers
4101 for (q = (P_)p->payload; q < next; q++) {
4102 *q = (StgWord)(StgPtr)evacuate((StgClosure *)*q);
4103 }
4104 break;
4105 }
4106
4107 default:
4108 barf("scavenge_static: strange closure %d", (int)(info->type));
4109 }
4110
4111 ASSERT(failed_to_evac == rtsFalse);
4112
4113 /* get the next static object from the list. Remember, there might
4114 * be more stuff on this list now that we've done some evacuating!
4115 * (static_objects is a global)
4116 */
4117 p = static_objects;
4118 }
4119 }
4120
4121 /* -----------------------------------------------------------------------------
4122 scavenge a chunk of memory described by a bitmap
4123 -------------------------------------------------------------------------- */
4124
4125 static void
4126 scavenge_large_bitmap( StgPtr p, StgLargeBitmap *large_bitmap, nat size )
4127 {
4128 nat i, b;
4129 StgWord bitmap;
4130
4131 b = 0;
4132 bitmap = large_bitmap->bitmap[b];