Disable aging when doing deadlock detection GC
[ghc.git] / rts / sm / Evac.c
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team 1998-2008
4 *
5 * Generational garbage collector: evacuation functions
6 *
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
9 *
10 * https://gitlab.haskell.org/ghc/ghc/wikis/commentary/rts/storage/gc
11 *
12 * ---------------------------------------------------------------------------*/
13
14 #include "PosixSource.h"
15 #include "Rts.h"
16
17 #include "Evac.h"
18 #include "Storage.h"
19 #include "GC.h"
20 #include "GCThread.h"
21 #include "GCTDecl.h"
22 #include "GCUtils.h"
23 #include "Compact.h"
24 #include "MarkStack.h"
25 #include "Prelude.h"
26 #include "Trace.h"
27 #include "LdvProfile.h"
28 #include "CNF.h"
29 #include "Scav.h"
30 #include "NonMoving.h"
31
32 #if defined(THREADED_RTS) && !defined(PARALLEL_GC)
33 #define evacuate(p) evacuate1(p)
34 #define evacuate_BLACKHOLE(p) evacuate_BLACKHOLE1(p)
35 #define HEAP_ALLOCED_GC(p) HEAP_ALLOCED(p)
36 #endif
37
38 #if !defined(PARALLEL_GC) || defined(PROFILING)
39 #define copy_tag_nolock(p, info, src, size, stp, tag) \
40 copy_tag(p, info, src, size, stp, tag)
41 #endif
42
43 /* Note [Selector optimisation depth limit]
44 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
45 *
46 * MAX_THUNK_SELECTOR_DEPTH is used to avoid long recursion of
47 * eval_thunk_selector due to nested selector thunks. Note that this *only*
48 * counts nested selector thunks, e.g. `fst (fst (... (fst x)))`. The collector
49 * will traverse interleaved selector-constructor pairs without limit, e.g.
50 *
51 * a = (fst b, _)
52 * b = (fst c, _)
53 * c = (fst d, _)
54 * d = (x, _)
55 *
56 */
57 #define MAX_THUNK_SELECTOR_DEPTH 16
58
59 static void eval_thunk_selector (StgClosure **q, StgSelector *p, bool);
60 STATIC_INLINE void evacuate_large(StgPtr p);
61
62 /* -----------------------------------------------------------------------------
63 Allocate some space in which to copy an object.
64 -------------------------------------------------------------------------- */
65
66 /* size is in words */
67 STATIC_INLINE StgPtr
68 alloc_for_copy (uint32_t size, uint32_t gen_no)
69 {
70 ASSERT(gen_no < RtsFlags.GcFlags.generations);
71
72 StgPtr to;
73 gen_workspace *ws;
74
75 /* Find out where we're going, using the handy "to" pointer in
76 * the gen of the source object. If it turns out we need to
77 * evacuate to an older generation, adjust it here (see comment
78 * by evacuate()).
79 */
80 if (gen_no < gct->evac_gen_no) {
81 if (gct->eager_promotion) {
82 gen_no = gct->evac_gen_no;
83 } else {
84 gct->failed_to_evac = true;
85 }
86 }
87
88 if (RtsFlags.GcFlags.useNonmoving) {
89 /* See Note [Deadlock detection under nonmoving collector]. */
90 if (deadlock_detect_gc)
91 gen_no = oldest_gen->no;
92
93 if (gen_no == oldest_gen->no) {
94 gct->copied += size;
95 to = nonmovingAllocate(gct->cap, size);
96
97 // Add segment to the todo list unless it's already there
98 // current->todo_link == NULL means not in todo list
99 struct NonmovingSegment *seg = nonmovingGetSegment(to);
100 if (!seg->todo_link) {
101 gen_workspace *ws = &gct->gens[oldest_gen->no];
102 seg->todo_link = ws->todo_seg;
103 ws->todo_seg = seg;
104 }
105
106 // The object which refers to this closure may have been aged (i.e.
107 // retained in a younger generation). Consequently, we must add the
108 // closure to the mark queue to ensure that it will be marked.
109 //
110 // However, if we are in a deadlock detection GC then we disable aging
111 // so there is no need.
112 if (major_gc && !deadlock_detect_gc)
113 markQueuePushClosureGC(&gct->cap->upd_rem_set.queue, (StgClosure *) to);
114 return to;
115 }
116 }
117
118 ws = &gct->gens[gen_no]; // zero memory references here
119
120 /* chain a new block onto the to-space for the destination gen if
121 * necessary.
122 */
123 to = ws->todo_free;
124 ws->todo_free += size;
125 if (ws->todo_free > ws->todo_lim) {
126 to = todo_block_full(size, ws);
127 }
128 ASSERT(ws->todo_free >= ws->todo_bd->free && ws->todo_free <= ws->todo_lim);
129
130 return to;
131 }
132
133 /* -----------------------------------------------------------------------------
134 The evacuate() code
135 -------------------------------------------------------------------------- */
136
137 /* size is in words */
138 STATIC_INLINE GNUC_ATTR_HOT void
139 copy_tag(StgClosure **p, const StgInfoTable *info,
140 StgClosure *src, uint32_t size, uint32_t gen_no, StgWord tag)
141 {
142 StgPtr to, from;
143 uint32_t i;
144
145 to = alloc_for_copy(size,gen_no);
146
147 from = (StgPtr)src;
148 to[0] = (W_)info;
149 for (i = 1; i < size; i++) { // unroll for small i
150 to[i] = from[i];
151 }
152
153 // if (to+size+2 < bd->start + BLOCK_SIZE_W) {
154 // __builtin_prefetch(to + size + 2, 1);
155 // }
156
157 #if defined(PARALLEL_GC)
158 {
159 const StgInfoTable *new_info;
160 new_info = (const StgInfoTable *)cas((StgPtr)&src->header.info, (W_)info, MK_FORWARDING_PTR(to));
161 if (new_info != info) {
162 #if defined(PROFILING)
163 // We copied this object at the same time as another
164 // thread. We'll evacuate the object again and the copy
165 // we just made will be discarded at the next GC, but we
166 // may have copied it after the other thread called
167 // SET_EVACUAEE_FOR_LDV(), which would confuse the LDV
168 // profiler when it encounters this closure in
169 // processHeapClosureForDead. So we reset the LDVW field
170 // here.
171 LDVW(to) = 0;
172 #endif
173 return evacuate(p); // does the failed_to_evac stuff
174 } else {
175 *p = TAG_CLOSURE(tag,(StgClosure*)to);
176 }
177 }
178 #else
179 src->header.info = (const StgInfoTable *)MK_FORWARDING_PTR(to);
180 *p = TAG_CLOSURE(tag,(StgClosure*)to);
181 #endif
182
183 #if defined(PROFILING)
184 // We store the size of the just evacuated object in the LDV word so that
185 // the profiler can guess the position of the next object later.
186 // This is safe only if we are sure that no other thread evacuates
187 // the object again, so we cannot use copy_tag_nolock when PROFILING.
188 SET_EVACUAEE_FOR_LDV(from, size);
189 #endif
190 }
191
192 #if defined(PARALLEL_GC) && !defined(PROFILING)
193 STATIC_INLINE void
194 copy_tag_nolock(StgClosure **p, const StgInfoTable *info,
195 StgClosure *src, uint32_t size, uint32_t gen_no, StgWord tag)
196 {
197 StgPtr to, from;
198 uint32_t i;
199
200 to = alloc_for_copy(size,gen_no);
201
202 from = (StgPtr)src;
203 to[0] = (W_)info;
204 for (i = 1; i < size; i++) { // unroll for small i
205 to[i] = from[i];
206 }
207
208 // if somebody else reads the forwarding pointer, we better make
209 // sure there's a closure at the end of it.
210 write_barrier();
211 *p = TAG_CLOSURE(tag,(StgClosure*)to);
212 src->header.info = (const StgInfoTable *)MK_FORWARDING_PTR(to);
213
214 // if (to+size+2 < bd->start + BLOCK_SIZE_W) {
215 // __builtin_prefetch(to + size + 2, 1);
216 // }
217
218 #if defined(PROFILING)
219 // We store the size of the just evacuated object in the LDV word so that
220 // the profiler can guess the position of the next object later.
221 SET_EVACUAEE_FOR_LDV(from, size);
222 #endif
223 }
224 #endif
225
226 /* Special version of copy() for when we only want to copy the info
227 * pointer of an object, but reserve some padding after it. This is
228 * used to optimise evacuation of TSOs.
229 */
230 static bool
231 copyPart(StgClosure **p, StgClosure *src, uint32_t size_to_reserve,
232 uint32_t size_to_copy, uint32_t gen_no)
233 {
234 StgPtr to, from;
235 uint32_t i;
236 StgWord info;
237
238 #if defined(PARALLEL_GC)
239 spin:
240 info = xchg((StgPtr)&src->header.info, (W_)&stg_WHITEHOLE_info);
241 if (info == (W_)&stg_WHITEHOLE_info) {
242 #if defined(PROF_SPIN)
243 whitehole_gc_spin++;
244 #endif
245 busy_wait_nop();
246 goto spin;
247 }
248 if (IS_FORWARDING_PTR(info)) {
249 src->header.info = (const StgInfoTable *)info;
250 evacuate(p); // does the failed_to_evac stuff
251 return false;
252 }
253 #else
254 info = (W_)src->header.info;
255 #endif
256
257 to = alloc_for_copy(size_to_reserve, gen_no);
258
259 from = (StgPtr)src;
260 to[0] = info;
261 for (i = 1; i < size_to_copy; i++) { // unroll for small i
262 to[i] = from[i];
263 }
264
265 write_barrier();
266 src->header.info = (const StgInfoTable*)MK_FORWARDING_PTR(to);
267 *p = (StgClosure *)to;
268
269 #if defined(PROFILING)
270 // We store the size of the just evacuated object in the LDV word so that
271 // the profiler can guess the position of the next object later.
272 SET_EVACUAEE_FOR_LDV(from, size_to_reserve);
273 // fill the slop
274 if (size_to_reserve - size_to_copy > 0)
275 LDV_FILL_SLOP(to + size_to_copy, (int)(size_to_reserve - size_to_copy));
276 #endif
277
278 return true;
279 }
280
281
282 /* Copy wrappers that don't tag the closure after copying */
283 STATIC_INLINE GNUC_ATTR_HOT void
284 copy(StgClosure **p, const StgInfoTable *info,
285 StgClosure *src, uint32_t size, uint32_t gen_no)
286 {
287 copy_tag(p,info,src,size,gen_no,0);
288 }
289
290 /* -----------------------------------------------------------------------------
291 Evacuate a large object
292
293 This just consists of removing the object from the (doubly-linked)
294 gen->large_objects list, and linking it on to the (singly-linked)
295 gct->todo_large_objects list, from where it will be scavenged later.
296
297 Convention: bd->flags has BF_EVACUATED set for a large object
298 that has been evacuated, or unset otherwise.
299 -------------------------------------------------------------------------- */
300
301 STATIC_INLINE void
302 evacuate_large(StgPtr p)
303 {
304 bdescr *bd;
305 generation *gen, *new_gen;
306 uint32_t gen_no, new_gen_no;
307 gen_workspace *ws;
308
309 bd = Bdescr(p);
310 gen = bd->gen;
311 gen_no = bd->gen_no;
312 ACQUIRE_SPIN_LOCK(&gen->sync);
313
314 // already evacuated?
315 if (bd->flags & BF_EVACUATED) {
316 /* Don't forget to set the gct->failed_to_evac flag if we didn't get
317 * the desired destination (see comments in evacuate()).
318 */
319 if (gen_no < gct->evac_gen_no) {
320 gct->failed_to_evac = true;
321 TICK_GC_FAILED_PROMOTION();
322 }
323 RELEASE_SPIN_LOCK(&gen->sync);
324 return;
325 }
326
327 // remove from large_object list
328 dbl_link_remove(bd, &gen->large_objects);
329
330 /* link it on to the evacuated large object list of the destination gen
331 */
332 new_gen_no = bd->dest_no;
333
334 if (deadlock_detect_gc) {
335 /* See Note [Deadlock detection under nonmoving collector]. */
336 new_gen_no = oldest_gen->no;
337 } else if (new_gen_no < gct->evac_gen_no) {
338 if (gct->eager_promotion) {
339 new_gen_no = gct->evac_gen_no;
340 } else {
341 gct->failed_to_evac = true;
342 }
343 }
344
345 ws = &gct->gens[new_gen_no];
346 new_gen = &generations[new_gen_no];
347
348 bd->flags |= BF_EVACUATED;
349 if (RtsFlags.GcFlags.useNonmoving && new_gen == oldest_gen) {
350 bd->flags |= BF_NONMOVING;
351 }
352 initBdescr(bd, new_gen, new_gen->to);
353
354 // If this is a block of pinned or compact objects, we don't have to scan
355 // these objects, because they aren't allowed to contain any outgoing
356 // pointers. For these blocks, we skip the scavenge stage and put
357 // them straight on the scavenged_large_objects list.
358 if (bd->flags & BF_PINNED) {
359 ASSERT(get_itbl((StgClosure *)p)->type == ARR_WORDS);
360
361 if (new_gen != gen) { ACQUIRE_SPIN_LOCK(&new_gen->sync); }
362 dbl_link_onto(bd, &new_gen->scavenged_large_objects);
363 new_gen->n_scavenged_large_blocks += bd->blocks;
364 if (new_gen != gen) { RELEASE_SPIN_LOCK(&new_gen->sync); }
365 } else {
366 bd->link = ws->todo_large_objects;
367 ws->todo_large_objects = bd;
368 }
369
370 RELEASE_SPIN_LOCK(&gen->sync);
371 }
372
373 /* ----------------------------------------------------------------------------
374 Evacuate static objects
375
376 When a static object is visited for the first time in this GC, it
377 is chained on to the gct->static_objects list.
378
379 evacuate_static_object (link_field, q)
380 - link_field must be STATIC_LINK(q)
381 ------------------------------------------------------------------------- */
382
383 STATIC_INLINE void
384 evacuate_static_object (StgClosure **link_field, StgClosure *q)
385 {
386 if (RTS_UNLIKELY(RtsFlags.GcFlags.useNonmoving)) {
387 // See Note [Static objects under the nonmoving collector] in Storage.c.
388 if (major_gc && !deadlock_detect_gc)
389 markQueuePushClosureGC(&gct->cap->upd_rem_set.queue, q);
390 return;
391 }
392
393 StgWord link = (StgWord)*link_field;
394
395 // See Note [STATIC_LINK fields] for how the link field bits work
396 if (((link & STATIC_BITS) | prev_static_flag) != 3) {
397 StgWord new_list_head = (StgWord)q | static_flag;
398 #if !defined(THREADED_RTS)
399 *link_field = gct->static_objects;
400 gct->static_objects = (StgClosure *)new_list_head;
401 #else
402 StgWord prev;
403 prev = cas((StgVolatilePtr)link_field, link,
404 (StgWord)gct->static_objects);
405 if (prev == link) {
406 gct->static_objects = (StgClosure *)new_list_head;
407 }
408 #endif
409 }
410 }
411
412 /* ----------------------------------------------------------------------------
413 Evacuate an object inside a CompactNFData
414
415 These are treated in a similar way to large objects. We remove the block
416 from the compact_objects list of the generation it is on, and link it onto
417 the live_compact_objects list of the destination generation.
418
419 It is assumed that objects in the struct live in the same generation
420 as the struct itself all the time.
421 ------------------------------------------------------------------------- */
422 STATIC_INLINE void
423 evacuate_compact (StgPtr p)
424 {
425 StgCompactNFData *str;
426 bdescr *bd;
427 generation *gen, *new_gen;
428 uint32_t gen_no, new_gen_no;
429
430 // We need to find the Compact# corresponding to this pointer, because it
431 // will give us the first block in the compact chain, which is the one we
432 // that gets linked onto the compact_objects list.
433 str = objectGetCompact((StgClosure*)p);
434 ASSERT(get_itbl((StgClosure*)str)->type == COMPACT_NFDATA);
435
436 bd = Bdescr((StgPtr)str);
437 gen_no = bd->gen_no;
438
439 // already evacuated? (we're about to do the same check,
440 // but we avoid taking the spin-lock)
441 if (bd->flags & BF_EVACUATED) {
442 /* Don't forget to set the gct->failed_to_evac flag if we didn't get
443 * the desired destination (see comments in evacuate()).
444 */
445 if (gen_no < gct->evac_gen_no) {
446 gct->failed_to_evac = true;
447 TICK_GC_FAILED_PROMOTION();
448 }
449 return;
450 }
451
452 gen = bd->gen;
453 gen_no = bd->gen_no;
454 ACQUIRE_SPIN_LOCK(&gen->sync);
455
456 // already evacuated?
457 if (bd->flags & BF_EVACUATED) {
458 /* Don't forget to set the gct->failed_to_evac flag if we didn't get
459 * the desired destination (see comments in evacuate()).
460 */
461 if (gen_no < gct->evac_gen_no) {
462 gct->failed_to_evac = true;
463 TICK_GC_FAILED_PROMOTION();
464 }
465 RELEASE_SPIN_LOCK(&gen->sync);
466 return;
467 }
468
469 // remove from compact_objects list
470 dbl_link_remove(bd, &gen->compact_objects);
471
472 /* link it on to the evacuated compact object list of the destination gen
473 */
474 new_gen_no = bd->dest_no;
475
476 if (new_gen_no < gct->evac_gen_no) {
477 if (gct->eager_promotion) {
478 new_gen_no = gct->evac_gen_no;
479 } else {
480 gct->failed_to_evac = true;
481 }
482 }
483
484 new_gen = &generations[new_gen_no];
485
486 // Note: for speed we only update the generation of the first block here
487 // This means that bdescr of subsequent blocks will think they are in
488 // the wrong generation
489 // (This should not be a problem because there is no code that checks
490 // for that - the only code touching the generation of the block is
491 // in the GC, and that should never see blocks other than the first)
492 bd->flags |= BF_EVACUATED;
493 initBdescr(bd, new_gen, new_gen->to);
494
495 if (str->hash) {
496 gen_workspace *ws = &gct->gens[new_gen_no];
497 bd->link = ws->todo_large_objects;
498 ws->todo_large_objects = bd;
499 } else {
500 if (new_gen != gen) { ACQUIRE_SPIN_LOCK(&new_gen->sync); }
501 dbl_link_onto(bd, &new_gen->live_compact_objects);
502 new_gen->n_live_compact_blocks += str->totalW / BLOCK_SIZE_W;
503 if (new_gen != gen) { RELEASE_SPIN_LOCK(&new_gen->sync); }
504 }
505
506 RELEASE_SPIN_LOCK(&gen->sync);
507
508 // Note: the object did not move in memory, because it lives
509 // in pinned (BF_COMPACT) allocation, so we do not need to rewrite it
510 // or muck with forwarding pointers
511 // Also there is no tag to worry about on the struct (tags are used
512 // for constructors and functions, but a struct is neither). There
513 // might be a tag on the object pointer, but again we don't change
514 // the pointer because we don't move the object so we don't need to
515 // rewrite the tag.
516 }
517
518 /* ----------------------------------------------------------------------------
519 Evacuate
520
521 This is called (eventually) for every live object in the system.
522
523 The caller to evacuate specifies a desired generation in the
524 gct->evac_gen thread-local variable. The following conditions apply to
525 evacuating an object which resides in generation M when we're
526 collecting up to generation N
527
528 if M >= gct->evac_gen
529 if M > N do nothing
530 else evac to gen->to
531
532 if M < gct->evac_gen evac to gct->evac_gen, step 0
533
534 if the object is already evacuated, then we check which generation
535 it now resides in.
536
537 if M >= gct->evac_gen do nothing
538 if M < gct->evac_gen set gct->failed_to_evac flag to indicate that we
539 didn't manage to evacuate this object into gct->evac_gen.
540
541
542 OPTIMISATION NOTES:
543
544 evacuate() is the single most important function performance-wise
545 in the GC. Various things have been tried to speed it up, but as
546 far as I can tell the code generated by gcc 3.2 with -O2 is about
547 as good as it's going to get. We pass the argument to evacuate()
548 in a register using the 'regparm' attribute (see the prototype for
549 evacuate() near the top of this file).
550
551 Changing evacuate() to take an (StgClosure **) rather than
552 returning the new pointer seems attractive, because we can avoid
553 writing back the pointer when it hasn't changed (eg. for a static
554 object, or an object in a generation > N). However, I tried it and
555 it doesn't help. One reason is that the (StgClosure **) pointer
556 gets spilled to the stack inside evacuate(), resulting in far more
557 extra reads/writes than we save.
558 ------------------------------------------------------------------------- */
559
560 REGPARM1 GNUC_ATTR_HOT void
561 evacuate(StgClosure **p)
562 {
563 bdescr *bd = NULL;
564 uint32_t gen_no;
565 StgClosure *q;
566 const StgInfoTable *info;
567 StgWord tag;
568
569 q = *p;
570
571 loop:
572 /* The tag and the pointer are split, to be merged after evacing */
573 tag = GET_CLOSURE_TAG(q);
574 q = UNTAG_CLOSURE(q);
575
576 ASSERTM(LOOKS_LIKE_CLOSURE_PTR(q), "invalid closure, info=%p", q->header.info);
577
578 if (!HEAP_ALLOCED_GC(q)) {
579 if (!major_gc) return;
580
581 info = get_itbl(q);
582 switch (info->type) {
583
584 case THUNK_STATIC:
585 if (info->srt != 0) {
586 evacuate_static_object(THUNK_STATIC_LINK((StgClosure *)q), q);
587 }
588 return;
589
590 case FUN_STATIC:
591 if (info->srt != 0 || info->layout.payload.ptrs != 0) {
592 evacuate_static_object(STATIC_LINK(info,(StgClosure *)q), q);
593 }
594 return;
595
596 case IND_STATIC:
597 /* If q->saved_info != NULL, then it's a revertible CAF - it'll be
598 * on the CAF list, so don't do anything with it here (we'll
599 * scavenge it later).
600 */
601 evacuate_static_object(IND_STATIC_LINK((StgClosure *)q), q);
602 return;
603
604 case CONSTR:
605 case CONSTR_1_0:
606 case CONSTR_2_0:
607 case CONSTR_1_1:
608 evacuate_static_object(STATIC_LINK(info,(StgClosure *)q), q);
609 return;
610
611 case CONSTR_0_1:
612 case CONSTR_0_2:
613 case CONSTR_NOCAF:
614 /* no need to put these on the static linked list, they don't need
615 * to be scavenged.
616 */
617 return;
618
619 default:
620 barf("evacuate(static): strange closure type %d", (int)(info->type));
621 }
622 }
623
624 bd = Bdescr((P_)q);
625
626 if ((bd->flags & (BF_LARGE | BF_MARKED | BF_EVACUATED | BF_COMPACT | BF_NONMOVING)) != 0) {
627 // Pointer to non-moving heap. Non-moving heap is collected using
628 // mark-sweep so this object should be marked and then retained in sweep.
629 if (bd->flags & BF_NONMOVING) {
630 // NOTE: large objects in nonmoving heap are also marked with
631 // BF_NONMOVING. Those are moved to scavenged_large_objects list in
632 // mark phase.
633 if (major_gc && !deadlock_detect_gc)
634 markQueuePushClosureGC(&gct->cap->upd_rem_set.queue, q);
635 return;
636 }
637
638 // pointer into to-space: just return it. It might be a pointer
639 // into a generation that we aren't collecting (> N), or it
640 // might just be a pointer into to-space. The latter doesn't
641 // happen often, but allowing it makes certain things a bit
642 // easier; e.g. scavenging an object is idempotent, so it's OK to
643 // have an object on the mutable list multiple times.
644 if (bd->flags & BF_EVACUATED) {
645 // We aren't copying this object, so we have to check
646 // whether it is already in the target generation. (this is
647 // the write barrier).
648 if (bd->gen_no < gct->evac_gen_no) {
649 gct->failed_to_evac = true;
650 TICK_GC_FAILED_PROMOTION();
651 }
652 return;
653 }
654
655 // Check for compact before checking for large, this allows doing the
656 // right thing for objects that are half way in the middle of the first
657 // block of a compact (and would be treated as large objects even though
658 // they are not)
659 if (bd->flags & BF_COMPACT) {
660 evacuate_compact((P_)q);
661
662 // We may have evacuated the block to the nonmoving generation. If so
663 // we need to make sure it is added to the mark queue since the only
664 // reference to it may be from the moving heap.
665 if (major_gc && bd->flags & BF_NONMOVING && !deadlock_detect_gc) {
666 markQueuePushClosureGC(&gct->cap->upd_rem_set.queue, q);
667 }
668 return;
669 }
670
671 /* evacuate large objects by re-linking them onto a different list.
672 */
673 if (bd->flags & BF_LARGE) {
674 evacuate_large((P_)q);
675
676 // We may have evacuated the block to the nonmoving generation. If so
677 // we need to make sure it is added to the mark queue since the only
678 // reference to it may be from the moving heap.
679 if (major_gc && bd->flags & BF_NONMOVING && !deadlock_detect_gc) {
680 markQueuePushClosureGC(&gct->cap->upd_rem_set.queue, q);
681 }
682 return;
683 }
684
685 /* If the object is in a gen that we're compacting, then we
686 * need to use an alternative evacuate procedure.
687 */
688 if (!is_marked((P_)q,bd)) {
689 mark((P_)q,bd);
690 push_mark_stack((P_)q);
691 }
692 return;
693 }
694
695 gen_no = bd->dest_no;
696
697 info = q->header.info;
698 if (IS_FORWARDING_PTR(info))
699 {
700 /* Already evacuated, just return the forwarding address.
701 * HOWEVER: if the requested destination generation (gct->evac_gen) is
702 * older than the actual generation (because the object was
703 * already evacuated to a younger generation) then we have to
704 * set the gct->failed_to_evac flag to indicate that we couldn't
705 * manage to promote the object to the desired generation.
706 */
707 /*
708 * Optimisation: the check is fairly expensive, but we can often
709 * shortcut it if either the required generation is 0, or the
710 * current object (the EVACUATED) is in a high enough generation.
711 * We know that an EVACUATED always points to an object in the
712 * same or an older generation. gen is the lowest generation that the
713 * current object would be evacuated to, so we only do the full
714 * check if gen is too low.
715 */
716 StgClosure *e = (StgClosure*)UN_FORWARDING_PTR(info);
717 *p = TAG_CLOSURE(tag,e);
718 if (gen_no < gct->evac_gen_no) { // optimisation
719 if (Bdescr((P_)e)->gen_no < gct->evac_gen_no) {
720 gct->failed_to_evac = true;
721 TICK_GC_FAILED_PROMOTION();
722 }
723 }
724 return;
725 }
726
727 switch (INFO_PTR_TO_STRUCT(info)->type) {
728
729 case WHITEHOLE:
730 goto loop;
731
732 // For ints and chars of low value, save space by replacing references to
733 // these with closures with references to common, shared ones in the RTS.
734 //
735 // * Except when compiling into Windows DLLs which don't support cross-package
736 // data references very well.
737 //
738 case CONSTR_0_1:
739 {
740 #if defined(COMPILING_WINDOWS_DLL)
741 copy_tag_nolock(p,info,q,sizeofW(StgHeader)+1,gen_no,tag);
742 #else
743 StgWord w = (StgWord)q->payload[0];
744 if (info == Czh_con_info &&
745 // unsigned, so always true: (StgChar)w >= MIN_CHARLIKE &&
746 (StgChar)w <= MAX_CHARLIKE) {
747 *p = TAG_CLOSURE(tag,
748 (StgClosure *)CHARLIKE_CLOSURE((StgChar)w)
749 );
750 }
751 else if (info == Izh_con_info &&
752 (StgInt)w >= MIN_INTLIKE && (StgInt)w <= MAX_INTLIKE) {
753 *p = TAG_CLOSURE(tag,
754 (StgClosure *)INTLIKE_CLOSURE((StgInt)w)
755 );
756 }
757 else {
758 copy_tag_nolock(p,info,q,sizeofW(StgHeader)+1,gen_no,tag);
759 }
760 #endif
761 return;
762 }
763
764 case FUN_0_1:
765 case FUN_1_0:
766 case CONSTR_1_0:
767 copy_tag_nolock(p,info,q,sizeofW(StgHeader)+1,gen_no,tag);
768 return;
769
770 case THUNK_1_0:
771 case THUNK_0_1:
772 copy(p,info,q,sizeofW(StgThunk)+1,gen_no);
773 return;
774
775 case THUNK_1_1:
776 case THUNK_2_0:
777 case THUNK_0_2:
778 copy(p,info,q,sizeofW(StgThunk)+2,gen_no);
779 return;
780
781 case FUN_1_1:
782 case FUN_2_0:
783 case FUN_0_2:
784 case CONSTR_1_1:
785 case CONSTR_2_0:
786 copy_tag_nolock(p,info,q,sizeofW(StgHeader)+2,gen_no,tag);
787 return;
788
789 case CONSTR_0_2:
790 copy_tag_nolock(p,info,q,sizeofW(StgHeader)+2,gen_no,tag);
791 return;
792
793 case THUNK:
794 copy(p,info,q,thunk_sizeW_fromITBL(INFO_PTR_TO_STRUCT(info)),gen_no);
795 return;
796
797 case FUN:
798 case CONSTR:
799 case CONSTR_NOCAF:
800 copy_tag_nolock(p,info,q,sizeW_fromITBL(INFO_PTR_TO_STRUCT(info)),gen_no,tag);
801 return;
802
803 case BLACKHOLE:
804 {
805 StgClosure *r;
806 const StgInfoTable *i;
807 r = ((StgInd*)q)->indirectee;
808 if (GET_CLOSURE_TAG(r) == 0) {
809 i = r->header.info;
810 if (IS_FORWARDING_PTR(i)) {
811 r = (StgClosure *)UN_FORWARDING_PTR(i);
812 i = r->header.info;
813 }
814 if (i == &stg_TSO_info
815 || i == &stg_WHITEHOLE_info
816 || i == &stg_BLOCKING_QUEUE_CLEAN_info
817 || i == &stg_BLOCKING_QUEUE_DIRTY_info) {
818 copy(p,info,q,sizeofW(StgInd),gen_no);
819 return;
820 }
821 // Note [BLACKHOLE pointing to IND]
822 //
823 // BLOCKING_QUEUE can be overwritten by IND (see
824 // wakeBlockingQueue()). However, when this happens we must
825 // be updating the BLACKHOLE, so the BLACKHOLE's indirectee
826 // should now point to the value.
827 //
828 // The mutator might observe an inconsistent state, because
829 // the writes are happening in another thread, so it's
830 // possible for the mutator to follow an indirectee and find
831 // an IND. But this should never happen in the GC, because
832 // the mutators are all stopped and the writes have
833 // completed.
834 ASSERT(i != &stg_IND_info);
835 }
836 q = r;
837 *p = r;
838 goto loop;
839 }
840
841 case MUT_VAR_CLEAN:
842 case MUT_VAR_DIRTY:
843 case MVAR_CLEAN:
844 case MVAR_DIRTY:
845 case TVAR:
846 case BLOCKING_QUEUE:
847 case WEAK:
848 case PRIM:
849 case MUT_PRIM:
850 copy(p,info,q,sizeW_fromITBL(INFO_PTR_TO_STRUCT(info)),gen_no);
851 return;
852
853 case BCO:
854 copy(p,info,q,bco_sizeW((StgBCO *)q),gen_no);
855 return;
856
857 case THUNK_SELECTOR:
858 eval_thunk_selector(p, (StgSelector *)q, true);
859 return;
860
861 case IND:
862 // follow chains of indirections, don't evacuate them
863 q = ((StgInd*)q)->indirectee;
864 *p = q;
865 goto loop;
866
867 case RET_BCO:
868 case RET_SMALL:
869 case RET_BIG:
870 case UPDATE_FRAME:
871 case UNDERFLOW_FRAME:
872 case STOP_FRAME:
873 case CATCH_FRAME:
874 case CATCH_STM_FRAME:
875 case CATCH_RETRY_FRAME:
876 case ATOMICALLY_FRAME:
877 // shouldn't see these
878 barf("evacuate: stack frame at %p\n", q);
879
880 case PAP:
881 copy(p,info,q,pap_sizeW((StgPAP*)q),gen_no);
882 return;
883
884 case AP:
885 copy(p,info,q,ap_sizeW((StgAP*)q),gen_no);
886 return;
887
888 case AP_STACK:
889 copy(p,info,q,ap_stack_sizeW((StgAP_STACK*)q),gen_no);
890 return;
891
892 case ARR_WORDS:
893 // just copy the block
894 copy(p,info,q,arr_words_sizeW((StgArrBytes *)q),gen_no);
895 return;
896
897 case MUT_ARR_PTRS_CLEAN:
898 case MUT_ARR_PTRS_DIRTY:
899 case MUT_ARR_PTRS_FROZEN_CLEAN:
900 case MUT_ARR_PTRS_FROZEN_DIRTY:
901 // just copy the block
902 copy(p,info,q,mut_arr_ptrs_sizeW((StgMutArrPtrs *)q),gen_no);
903 return;
904
905 case SMALL_MUT_ARR_PTRS_CLEAN:
906 case SMALL_MUT_ARR_PTRS_DIRTY:
907 case SMALL_MUT_ARR_PTRS_FROZEN_CLEAN:
908 case SMALL_MUT_ARR_PTRS_FROZEN_DIRTY:
909 // just copy the block
910 copy(p,info,q,small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs *)q),gen_no);
911 return;
912
913 case TSO:
914 copy(p,info,q,sizeofW(StgTSO),gen_no);
915 return;
916
917 case STACK:
918 {
919 StgStack *stack = (StgStack *)q;
920
921 /* To evacuate a small STACK, we need to adjust the stack pointer
922 */
923 {
924 StgStack *new_stack;
925 StgPtr r, s;
926 bool mine;
927
928 mine = copyPart(p,(StgClosure *)stack, stack_sizeW(stack),
929 sizeofW(StgStack), gen_no);
930 if (mine) {
931 new_stack = (StgStack *)*p;
932 move_STACK(stack, new_stack);
933 for (r = stack->sp, s = new_stack->sp;
934 r < stack->stack + stack->stack_size;) {
935 *s++ = *r++;
936 }
937 }
938 return;
939 }
940 }
941
942 case TREC_CHUNK:
943 copy(p,info,q,sizeofW(StgTRecChunk),gen_no);
944 return;
945
946 default:
947 barf("evacuate: strange closure type %d", (int)(INFO_PTR_TO_STRUCT(info)->type));
948 }
949
950 barf("evacuate");
951 }
952
953 /* -----------------------------------------------------------------------------
954 Evacuate a pointer that is guaranteed to point to a BLACKHOLE.
955
956 This is used for evacuating the updatee of an update frame on the stack. We
957 want to copy the blackhole even if it has been updated by another thread and
958 is now an indirection, because the original update frame still needs to
959 update it.
960
961 See also Note [upd-black-hole] in sm/Scav.c.
962 -------------------------------------------------------------------------- */
963
964 void
965 evacuate_BLACKHOLE(StgClosure **p)
966 {
967 bdescr *bd;
968 uint32_t gen_no;
969 StgClosure *q;
970 const StgInfoTable *info;
971 q = *p;
972
973 // closure is required to be a heap-allocated BLACKHOLE
974 ASSERT(HEAP_ALLOCED_GC(q));
975 ASSERT(GET_CLOSURE_TAG(q) == 0);
976
977 bd = Bdescr((P_)q);
978
979 // blackholes can't be in a compact
980 ASSERT((bd->flags & BF_COMPACT) == 0);
981
982 if (bd->flags & BF_NONMOVING) {
983 if (major_gc && !deadlock_detect_gc)
984 markQueuePushClosureGC(&gct->cap->upd_rem_set.queue, q);
985 return;
986 }
987
988 // blackholes *can* be in a large object: when raiseAsync() creates an
989 // AP_STACK the payload might be large enough to create a large object.
990 // See #14497.
991 if (bd->flags & BF_LARGE) {
992 evacuate_large((P_)q);
993 return;
994 }
995 if (bd->flags & BF_EVACUATED) {
996 if (bd->gen_no < gct->evac_gen_no) {
997 gct->failed_to_evac = true;
998 TICK_GC_FAILED_PROMOTION();
999 }
1000 return;
1001 }
1002 if (bd->flags & BF_MARKED) {
1003 if (!is_marked((P_)q,bd)) {
1004 mark((P_)q,bd);
1005 push_mark_stack((P_)q);
1006 }
1007 return;
1008 }
1009 gen_no = bd->dest_no;
1010 info = q->header.info;
1011 if (IS_FORWARDING_PTR(info))
1012 {
1013 StgClosure *e = (StgClosure*)UN_FORWARDING_PTR(info);
1014 *p = e;
1015 if (gen_no < gct->evac_gen_no) { // optimisation
1016 if (Bdescr((P_)e)->gen_no < gct->evac_gen_no) {
1017 gct->failed_to_evac = true;
1018 TICK_GC_FAILED_PROMOTION();
1019 }
1020 }
1021 return;
1022 }
1023
1024 ASSERT(INFO_PTR_TO_STRUCT(info)->type == BLACKHOLE);
1025 copy(p,info,q,sizeofW(StgInd),gen_no);
1026 }
1027
1028 /* ----------------------------------------------------------------------------
1029 Update a chain of thunk selectors with the given value. All selectors in the
1030 chain become IND pointing to the value, except when there is a loop (i.e.
1031 the value of a THUNK_SELECTOR is the THUNK_SELECTOR itself), in that case we
1032 leave the selector as-is.
1033
1034 p is the current selector to update. In eval_thunk_selector we make a list
1035 from selectors using ((StgThunk*)p)->payload[0] for the link field and use
1036 that field to traverse the chain here.
1037
1038 val is the final value of the selector chain.
1039
1040 A chain is formed when we've got something like:
1041
1042 let x = C1 { f1 = e1 }
1043 y = C2 { f2 = f1 x }
1044 z = f2 y
1045
1046 Here the chain (p) we get when evacuating z is:
1047
1048 [ f2 y, f1 x ]
1049
1050 and val is e1.
1051 -------------------------------------------------------------------------- */
1052
1053 static void
1054 unchain_thunk_selectors(StgSelector *p, StgClosure *val)
1055 {
1056 while (p)
1057 {
1058 ASSERT(p->header.info == &stg_WHITEHOLE_info);
1059 // val must be in to-space. Not always: when we recursively
1060 // invoke eval_thunk_selector(), the recursive calls will not
1061 // evacuate the value (because we want to select on the value,
1062 // not evacuate it), so in this case val is in from-space.
1063 // ASSERT(!HEAP_ALLOCED_GC(val) || Bdescr((P_)val)->gen_no > N || (Bdescr((P_)val)->flags & BF_EVACUATED));
1064
1065 StgSelector *prev = (StgSelector*)((StgClosure *)p)->payload[0];
1066
1067 // Update the THUNK_SELECTOR with an indirection to the
1068 // value. The value is still in from-space at this stage.
1069 //
1070 // (old note: Why not do upd_evacuee(q,p)? Because we have an
1071 // invariant that an EVACUATED closure always points to an
1072 // object in the same or an older generation (required by
1073 // the short-cut test in the EVACUATED case, below).
1074 if ((StgClosure *)p == val) {
1075 // must be a loop; just leave a BLACKHOLE in place. This
1076 // can happen when we have a chain of selectors that
1077 // eventually loops back on itself. We can't leave an
1078 // indirection pointing to itself, and we want the program
1079 // to deadlock if it ever enters this closure, so
1080 // BLACKHOLE is correct.
1081
1082 // XXX we do not have BLACKHOLEs any more; replace with
1083 // a THUNK_SELECTOR again. This will go into a loop if it is
1084 // entered, and should result in a NonTermination exception.
1085 ((StgThunk *)p)->payload[0] = val;
1086 write_barrier();
1087 SET_INFO((StgClosure *)p, &stg_sel_0_upd_info);
1088 } else {
1089 ((StgInd *)p)->indirectee = val;
1090 write_barrier();
1091 SET_INFO((StgClosure *)p, &stg_IND_info);
1092 }
1093
1094 // For the purposes of LDV profiling, we have created an
1095 // indirection.
1096 LDV_RECORD_CREATE(p);
1097
1098 p = prev;
1099 }
1100 }
1101
1102 /* -----------------------------------------------------------------------------
1103 Evaluate a THUNK_SELECTOR if possible.
1104
1105 p points to a THUNK_SELECTOR that we want to evaluate.
1106
1107 If the THUNK_SELECTOR could not be evaluated (its selectee is still a THUNK,
1108 for example), then the THUNK_SELECTOR itself will be evacuated depending on
1109 the evac parameter.
1110 -------------------------------------------------------------------------- */
1111
1112 static void
1113 eval_thunk_selector (StgClosure **q, StgSelector *p, bool evac)
1114 // NB. for legacy reasons, p & q are swapped around :(
1115 {
1116 uint32_t field;
1117 StgInfoTable *info;
1118 StgWord info_ptr;
1119 StgClosure *selectee;
1120 StgSelector *prev_thunk_selector;
1121 bdescr *bd;
1122
1123 prev_thunk_selector = NULL;
1124 // this is a chain of THUNK_SELECTORs that we are going to update
1125 // to point to the value of the current THUNK_SELECTOR. Each
1126 // closure on the chain is a WHITEHOLE, and points to the next in the
1127 // chain with payload[0].
1128
1129 selector_chain:
1130
1131 bd = Bdescr((StgPtr)p);
1132 if (HEAP_ALLOCED_GC(p)) {
1133 // If the THUNK_SELECTOR is in to-space or in a generation that we
1134 // are not collecting, then bale out early. We won't be able to
1135 // save any space in any case, and updating with an indirection is
1136 // trickier in a non-collected gen: we would have to update the
1137 // mutable list.
1138 if ((bd->flags & BF_EVACUATED) || (bd->flags & BF_NONMOVING)) {
1139 unchain_thunk_selectors(prev_thunk_selector, (StgClosure *)p);
1140 *q = (StgClosure *)p;
1141 // shortcut, behave as for: if (evac) evacuate(q);
1142 if (evac && bd->gen_no < gct->evac_gen_no) {
1143 gct->failed_to_evac = true;
1144 TICK_GC_FAILED_PROMOTION();
1145 }
1146 return;
1147 }
1148 // we don't update THUNK_SELECTORS in the compacted
1149 // generation, because compaction does not remove the INDs
1150 // that result, this causes confusion later
1151 // (scavenge_mark_stack doesn't deal with IND). BEWARE! This
1152 // bit is very tricky to get right. If you make changes
1153 // around here, test by compiling stage 3 with +RTS -c -RTS.
1154 if (bd->flags & BF_MARKED) {
1155 // must call evacuate() to mark this closure if evac==true
1156 *q = (StgClosure *)p;
1157 if (evac) evacuate(q);
1158 unchain_thunk_selectors(prev_thunk_selector, (StgClosure *)p);
1159 return;
1160 }
1161 }
1162
1163
1164 // WHITEHOLE the selector thunk, since it is now under evaluation.
1165 // This is important to stop us going into an infinite loop if
1166 // this selector thunk eventually refers to itself.
1167 #if defined(THREADED_RTS)
1168 // In threaded mode, we'll use WHITEHOLE to lock the selector
1169 // thunk while we evaluate it.
1170 {
1171 while(true) {
1172 info_ptr = xchg((StgPtr)&p->header.info, (W_)&stg_WHITEHOLE_info);
1173 if (info_ptr != (W_)&stg_WHITEHOLE_info) { break; }
1174 #if defined(PROF_SPIN)
1175 ++whitehole_gc_spin;
1176 #endif
1177 busy_wait_nop();
1178 }
1179
1180 // make sure someone else didn't get here first...
1181 if (IS_FORWARDING_PTR(info_ptr) ||
1182 INFO_PTR_TO_STRUCT((StgInfoTable *)info_ptr)->type != THUNK_SELECTOR) {
1183 // v. tricky now. The THUNK_SELECTOR has been evacuated
1184 // by another thread, and is now either a forwarding ptr or IND.
1185 // We need to extract ourselves from the current situation
1186 // as cleanly as possible.
1187 // - unlock the closure
1188 // - update *q, we may have done *some* evaluation
1189 // - if evac, we need to call evacuate(), because we
1190 // need the write-barrier stuff.
1191 // - undo the chain we've built to point to p.
1192 SET_INFO((StgClosure *)p, (const StgInfoTable *)info_ptr);
1193 *q = (StgClosure *)p;
1194 if (evac) evacuate(q);
1195 unchain_thunk_selectors(prev_thunk_selector, (StgClosure *)p);
1196 return;
1197 }
1198 }
1199 #else
1200 // Save the real info pointer (NOTE: not the same as get_itbl()).
1201 info_ptr = (StgWord)p->header.info;
1202 SET_INFO((StgClosure *)p,&stg_WHITEHOLE_info);
1203 #endif
1204
1205 field = INFO_PTR_TO_STRUCT((StgInfoTable *)info_ptr)->layout.selector_offset;
1206
1207 // The selectee might be a constructor closure,
1208 // so we untag the pointer.
1209 selectee = UNTAG_CLOSURE(p->selectee);
1210
1211 selector_loop:
1212 // selectee now points to the closure that we're trying to select
1213 // a field from. It may or may not be in to-space: we try not to
1214 // end up in to-space, but it's impractical to avoid it in
1215 // general. The compacting GC scatters to-space pointers in
1216 // from-space during marking, for example. We rely on the property
1217 // that evacuate() doesn't mind if it gets passed a to-space pointer.
1218
1219 info = (StgInfoTable*)selectee->header.info;
1220
1221 if (IS_FORWARDING_PTR(info)) {
1222 // We don't follow pointers into to-space; the constructor
1223 // has already been evacuated, so we won't save any space
1224 // leaks by evaluating this selector thunk anyhow.
1225 goto bale_out;
1226 }
1227
1228 info = INFO_PTR_TO_STRUCT(info);
1229 switch (info->type) {
1230 case WHITEHOLE:
1231 goto bale_out; // about to be evacuated by another thread (or a loop).
1232
1233 case CONSTR:
1234 case CONSTR_1_0:
1235 case CONSTR_0_1:
1236 case CONSTR_2_0:
1237 case CONSTR_1_1:
1238 case CONSTR_0_2:
1239 case CONSTR_NOCAF:
1240 {
1241 // check that the size is in range
1242 ASSERT(field < (StgWord32)(info->layout.payload.ptrs +
1243 info->layout.payload.nptrs));
1244
1245 // Select the right field from the constructor
1246 StgClosure *val = selectee->payload[field];
1247
1248 #if defined(PROFILING)
1249 // For the purposes of LDV profiling, we have destroyed
1250 // the original selector thunk, p.
1251 if (era > 0) {
1252 // Only modify the info pointer when LDV profiling is
1253 // enabled. Note that this is incompatible with parallel GC,
1254 // because it would allow other threads to start evaluating
1255 // the same selector thunk.
1256 SET_INFO((StgClosure*)p, (StgInfoTable *)info_ptr);
1257 OVERWRITING_CLOSURE((StgClosure*)p);
1258 SET_INFO((StgClosure*)p, &stg_WHITEHOLE_info);
1259 }
1260 #endif
1261
1262 // the closure in val is now the "value" of the
1263 // THUNK_SELECTOR in p. However, val may itself be a
1264 // THUNK_SELECTOR, in which case we want to continue
1265 // evaluating until we find the real value, and then
1266 // update the whole chain to point to the value.
1267 val_loop:
1268 info_ptr = (StgWord)UNTAG_CLOSURE(val)->header.info;
1269 if (!IS_FORWARDING_PTR(info_ptr))
1270 {
1271 info = INFO_PTR_TO_STRUCT((StgInfoTable *)info_ptr);
1272 switch (info->type) {
1273 case IND:
1274 case IND_STATIC:
1275 val = ((StgInd *)val)->indirectee;
1276 goto val_loop;
1277 case THUNK_SELECTOR:
1278 // Use payload to make a list of thunk selectors, to be
1279 // used in unchain_thunk_selectors
1280 ((StgClosure*)p)->payload[0] = (StgClosure *)prev_thunk_selector;
1281 prev_thunk_selector = p;
1282 p = (StgSelector*)val;
1283 goto selector_chain;
1284 default:
1285 break;
1286 }
1287 }
1288 ((StgClosure*)p)->payload[0] = (StgClosure *)prev_thunk_selector;
1289 prev_thunk_selector = p;
1290
1291 *q = val;
1292
1293 // update the other selectors in the chain *before*
1294 // evacuating the value. This is necessary in the case
1295 // where the value turns out to be one of the selectors
1296 // in the chain (i.e. we have a loop), and evacuating it
1297 // would corrupt the chain.
1298 unchain_thunk_selectors(prev_thunk_selector, val);
1299
1300 // evacuate() cannot recurse through
1301 // eval_thunk_selector(), because we know val is not
1302 // a THUNK_SELECTOR.
1303 if (evac) evacuate(q);
1304 return;
1305 }
1306
1307 case IND:
1308 case IND_STATIC:
1309 // Again, we might need to untag a constructor.
1310 selectee = UNTAG_CLOSURE( ((StgInd *)selectee)->indirectee );
1311 goto selector_loop;
1312
1313 case BLACKHOLE:
1314 {
1315 StgClosure *r;
1316 const StgInfoTable *i;
1317 r = ((StgInd*)selectee)->indirectee;
1318
1319 // establish whether this BH has been updated, and is now an
1320 // indirection, as in evacuate().
1321 if (GET_CLOSURE_TAG(r) == 0) {
1322 i = r->header.info;
1323 if (IS_FORWARDING_PTR(i)) {
1324 r = (StgClosure *)UN_FORWARDING_PTR(i);
1325 i = r->header.info;
1326 }
1327 if (i == &stg_TSO_info
1328 || i == &stg_WHITEHOLE_info
1329 || i == &stg_BLOCKING_QUEUE_CLEAN_info
1330 || i == &stg_BLOCKING_QUEUE_DIRTY_info) {
1331 goto bale_out;
1332 }
1333 ASSERT(i != &stg_IND_info);
1334 }
1335
1336 selectee = UNTAG_CLOSURE( ((StgInd *)selectee)->indirectee );
1337 goto selector_loop;
1338 }
1339
1340 case THUNK_SELECTOR:
1341 {
1342 StgClosure *val;
1343
1344 // recursively evaluate this selector. We don't want to
1345 // recurse indefinitely, so we impose a depth bound.
1346 // See Note [Selector optimisation depth limit].
1347 if (gct->thunk_selector_depth >= MAX_THUNK_SELECTOR_DEPTH) {
1348 goto bale_out;
1349 }
1350
1351 gct->thunk_selector_depth++;
1352 // false says "don't evacuate the result". It will,
1353 // however, update any THUNK_SELECTORs that are evaluated
1354 // along the way.
1355 eval_thunk_selector(&val, (StgSelector*)selectee, false);
1356 gct->thunk_selector_depth--;
1357
1358 // did we actually manage to evaluate it?
1359 if (val == selectee) goto bale_out;
1360
1361 // Of course this pointer might be tagged...
1362 selectee = UNTAG_CLOSURE(val);
1363 goto selector_loop;
1364 }
1365
1366 case AP:
1367 case AP_STACK:
1368 case THUNK:
1369 case THUNK_1_0:
1370 case THUNK_0_1:
1371 case THUNK_2_0:
1372 case THUNK_1_1:
1373 case THUNK_0_2:
1374 case THUNK_STATIC:
1375 // not evaluated yet
1376 goto bale_out;
1377
1378 default:
1379 barf("eval_thunk_selector: strange selectee %d",
1380 (int)(info->type));
1381 }
1382
1383 bale_out:
1384 // We didn't manage to evaluate this thunk; restore the old info
1385 // pointer. But don't forget: we still need to evacuate the thunk itself.
1386 SET_INFO((StgClosure *)p, (const StgInfoTable *)info_ptr);
1387 // THREADED_RTS: we just unlocked the thunk, so another thread
1388 // might get in and update it. copy() will lock it again and
1389 // check whether it was updated in the meantime.
1390 *q = (StgClosure *)p;
1391 if (evac) {
1392 copy(q,(const StgInfoTable *)info_ptr,(StgClosure *)p,THUNK_SELECTOR_sizeW(),bd->dest_no);
1393 }
1394 unchain_thunk_selectors(prev_thunk_selector, *q);
1395 }