4d13e0169372044fd0e374de81d5a3de68f34977
[ghc.git] / rts / sm / Evac.c
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team 1998-2008
4 *
5 * Generational garbage collector: evacuation functions
6 *
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
9 *
10 * https://gitlab.haskell.org/ghc/ghc/wikis/commentary/rts/storage/gc
11 *
12 * ---------------------------------------------------------------------------*/
13
14 #include "PosixSource.h"
15 #include "Rts.h"
16
17 #include "Evac.h"
18 #include "Storage.h"
19 #include "GC.h"
20 #include "GCThread.h"
21 #include "GCTDecl.h"
22 #include "GCUtils.h"
23 #include "Compact.h"
24 #include "MarkStack.h"
25 #include "Prelude.h"
26 #include "Trace.h"
27 #include "LdvProfile.h"
28 #include "CNF.h"
29 #include "Scav.h"
30 #include "NonMoving.h"
31
32 #if defined(THREADED_RTS) && !defined(PARALLEL_GC)
33 #define evacuate(p) evacuate1(p)
34 #define evacuate_BLACKHOLE(p) evacuate_BLACKHOLE1(p)
35 #define HEAP_ALLOCED_GC(p) HEAP_ALLOCED(p)
36 #endif
37
38 #if !defined(PARALLEL_GC) || defined(PROFILING)
39 #define copy_tag_nolock(p, info, src, size, stp, tag) \
40 copy_tag(p, info, src, size, stp, tag)
41 #endif
42
43 /* Note [Selector optimisation depth limit]
44 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
45 *
46 * MAX_THUNK_SELECTOR_DEPTH is used to avoid long recursion of
47 * eval_thunk_selector due to nested selector thunks. Note that this *only*
48 * counts nested selector thunks, e.g. `fst (fst (... (fst x)))`. The collector
49 * will traverse interleaved selector-constructor pairs without limit, e.g.
50 *
51 * a = (fst b, _)
52 * b = (fst c, _)
53 * c = (fst d, _)
54 * d = (x, _)
55 *
56 */
57 #define MAX_THUNK_SELECTOR_DEPTH 16
58
59 static void eval_thunk_selector (StgClosure **q, StgSelector *p, bool);
60 STATIC_INLINE void evacuate_large(StgPtr p);
61
62 /* -----------------------------------------------------------------------------
63 Allocate some space in which to copy an object.
64 -------------------------------------------------------------------------- */
65
66 /* size is in words */
67 STATIC_INLINE StgPtr
68 alloc_for_copy (uint32_t size, uint32_t gen_no)
69 {
70 ASSERT(gen_no < RtsFlags.GcFlags.generations);
71
72 StgPtr to;
73 gen_workspace *ws;
74
75 /* Find out where we're going, using the handy "to" pointer in
76 * the gen of the source object. If it turns out we need to
77 * evacuate to an older generation, adjust it here (see comment
78 * by evacuate()).
79 */
80 if (gen_no < gct->evac_gen_no) {
81 if (gct->eager_promotion) {
82 gen_no = gct->evac_gen_no;
83 } else {
84 gct->failed_to_evac = true;
85 }
86 }
87
88 if (RtsFlags.GcFlags.useNonmoving && gen_no == oldest_gen->no) {
89 gct->copied += size;
90 to = nonmovingAllocate(gct->cap, size);
91
92 // Add segment to the todo list unless it's already there
93 // current->todo_link == NULL means not in todo list
94 struct NonmovingSegment *seg = nonmovingGetSegment(to);
95 if (!seg->todo_link) {
96 gen_workspace *ws = &gct->gens[oldest_gen->no];
97 seg->todo_link = ws->todo_seg;
98 ws->todo_seg = seg;
99 }
100
101 if (major_gc)
102 markQueuePushClosureGC(&gct->cap->upd_rem_set.queue, (StgClosure *) to);
103 return to;
104 }
105
106 ws = &gct->gens[gen_no]; // zero memory references here
107
108 /* chain a new block onto the to-space for the destination gen if
109 * necessary.
110 */
111 to = ws->todo_free;
112 ws->todo_free += size;
113 if (ws->todo_free > ws->todo_lim) {
114 to = todo_block_full(size, ws);
115 }
116 ASSERT(ws->todo_free >= ws->todo_bd->free && ws->todo_free <= ws->todo_lim);
117
118 return to;
119 }
120
121 /* -----------------------------------------------------------------------------
122 The evacuate() code
123 -------------------------------------------------------------------------- */
124
125 /* size is in words */
126 STATIC_INLINE GNUC_ATTR_HOT void
127 copy_tag(StgClosure **p, const StgInfoTable *info,
128 StgClosure *src, uint32_t size, uint32_t gen_no, StgWord tag)
129 {
130 StgPtr to, from;
131 uint32_t i;
132
133 to = alloc_for_copy(size,gen_no);
134
135 from = (StgPtr)src;
136 to[0] = (W_)info;
137 for (i = 1; i < size; i++) { // unroll for small i
138 to[i] = from[i];
139 }
140
141 // if (to+size+2 < bd->start + BLOCK_SIZE_W) {
142 // __builtin_prefetch(to + size + 2, 1);
143 // }
144
145 #if defined(PARALLEL_GC)
146 {
147 const StgInfoTable *new_info;
148 new_info = (const StgInfoTable *)cas((StgPtr)&src->header.info, (W_)info, MK_FORWARDING_PTR(to));
149 if (new_info != info) {
150 #if defined(PROFILING)
151 // We copied this object at the same time as another
152 // thread. We'll evacuate the object again and the copy
153 // we just made will be discarded at the next GC, but we
154 // may have copied it after the other thread called
155 // SET_EVACUAEE_FOR_LDV(), which would confuse the LDV
156 // profiler when it encounters this closure in
157 // processHeapClosureForDead. So we reset the LDVW field
158 // here.
159 LDVW(to) = 0;
160 #endif
161 return evacuate(p); // does the failed_to_evac stuff
162 } else {
163 *p = TAG_CLOSURE(tag,(StgClosure*)to);
164 }
165 }
166 #else
167 src->header.info = (const StgInfoTable *)MK_FORWARDING_PTR(to);
168 *p = TAG_CLOSURE(tag,(StgClosure*)to);
169 #endif
170
171 #if defined(PROFILING)
172 // We store the size of the just evacuated object in the LDV word so that
173 // the profiler can guess the position of the next object later.
174 // This is safe only if we are sure that no other thread evacuates
175 // the object again, so we cannot use copy_tag_nolock when PROFILING.
176 SET_EVACUAEE_FOR_LDV(from, size);
177 #endif
178 }
179
180 #if defined(PARALLEL_GC) && !defined(PROFILING)
181 STATIC_INLINE void
182 copy_tag_nolock(StgClosure **p, const StgInfoTable *info,
183 StgClosure *src, uint32_t size, uint32_t gen_no, StgWord tag)
184 {
185 StgPtr to, from;
186 uint32_t i;
187
188 to = alloc_for_copy(size,gen_no);
189
190 from = (StgPtr)src;
191 to[0] = (W_)info;
192 for (i = 1; i < size; i++) { // unroll for small i
193 to[i] = from[i];
194 }
195
196 // if somebody else reads the forwarding pointer, we better make
197 // sure there's a closure at the end of it.
198 write_barrier();
199 *p = TAG_CLOSURE(tag,(StgClosure*)to);
200 src->header.info = (const StgInfoTable *)MK_FORWARDING_PTR(to);
201
202 // if (to+size+2 < bd->start + BLOCK_SIZE_W) {
203 // __builtin_prefetch(to + size + 2, 1);
204 // }
205
206 #if defined(PROFILING)
207 // We store the size of the just evacuated object in the LDV word so that
208 // the profiler can guess the position of the next object later.
209 SET_EVACUAEE_FOR_LDV(from, size);
210 #endif
211 }
212 #endif
213
214 /* Special version of copy() for when we only want to copy the info
215 * pointer of an object, but reserve some padding after it. This is
216 * used to optimise evacuation of TSOs.
217 */
218 static bool
219 copyPart(StgClosure **p, StgClosure *src, uint32_t size_to_reserve,
220 uint32_t size_to_copy, uint32_t gen_no)
221 {
222 StgPtr to, from;
223 uint32_t i;
224 StgWord info;
225
226 #if defined(PARALLEL_GC)
227 spin:
228 info = xchg((StgPtr)&src->header.info, (W_)&stg_WHITEHOLE_info);
229 if (info == (W_)&stg_WHITEHOLE_info) {
230 #if defined(PROF_SPIN)
231 whitehole_gc_spin++;
232 #endif
233 busy_wait_nop();
234 goto spin;
235 }
236 if (IS_FORWARDING_PTR(info)) {
237 src->header.info = (const StgInfoTable *)info;
238 evacuate(p); // does the failed_to_evac stuff
239 return false;
240 }
241 #else
242 info = (W_)src->header.info;
243 #endif
244
245 to = alloc_for_copy(size_to_reserve, gen_no);
246
247 from = (StgPtr)src;
248 to[0] = info;
249 for (i = 1; i < size_to_copy; i++) { // unroll for small i
250 to[i] = from[i];
251 }
252
253 write_barrier();
254 src->header.info = (const StgInfoTable*)MK_FORWARDING_PTR(to);
255 *p = (StgClosure *)to;
256
257 #if defined(PROFILING)
258 // We store the size of the just evacuated object in the LDV word so that
259 // the profiler can guess the position of the next object later.
260 SET_EVACUAEE_FOR_LDV(from, size_to_reserve);
261 // fill the slop
262 if (size_to_reserve - size_to_copy > 0)
263 LDV_FILL_SLOP(to + size_to_copy, (int)(size_to_reserve - size_to_copy));
264 #endif
265
266 return true;
267 }
268
269
270 /* Copy wrappers that don't tag the closure after copying */
271 STATIC_INLINE GNUC_ATTR_HOT void
272 copy(StgClosure **p, const StgInfoTable *info,
273 StgClosure *src, uint32_t size, uint32_t gen_no)
274 {
275 copy_tag(p,info,src,size,gen_no,0);
276 }
277
278 /* -----------------------------------------------------------------------------
279 Evacuate a large object
280
281 This just consists of removing the object from the (doubly-linked)
282 gen->large_objects list, and linking it on to the (singly-linked)
283 gct->todo_large_objects list, from where it will be scavenged later.
284
285 Convention: bd->flags has BF_EVACUATED set for a large object
286 that has been evacuated, or unset otherwise.
287 -------------------------------------------------------------------------- */
288
289 STATIC_INLINE void
290 evacuate_large(StgPtr p)
291 {
292 bdescr *bd;
293 generation *gen, *new_gen;
294 uint32_t gen_no, new_gen_no;
295 gen_workspace *ws;
296
297 bd = Bdescr(p);
298 gen = bd->gen;
299 gen_no = bd->gen_no;
300 ACQUIRE_SPIN_LOCK(&gen->sync);
301
302 // already evacuated?
303 if (bd->flags & BF_EVACUATED) {
304 /* Don't forget to set the gct->failed_to_evac flag if we didn't get
305 * the desired destination (see comments in evacuate()).
306 */
307 if (gen_no < gct->evac_gen_no) {
308 gct->failed_to_evac = true;
309 TICK_GC_FAILED_PROMOTION();
310 }
311 RELEASE_SPIN_LOCK(&gen->sync);
312 return;
313 }
314
315 // remove from large_object list
316 dbl_link_remove(bd, &gen->large_objects);
317
318 /* link it on to the evacuated large object list of the destination gen
319 */
320 new_gen_no = bd->dest_no;
321
322 if (new_gen_no < gct->evac_gen_no) {
323 if (gct->eager_promotion) {
324 new_gen_no = gct->evac_gen_no;
325 } else {
326 gct->failed_to_evac = true;
327 }
328 }
329
330 ws = &gct->gens[new_gen_no];
331 new_gen = &generations[new_gen_no];
332
333 bd->flags |= BF_EVACUATED;
334 if (RtsFlags.GcFlags.useNonmoving && new_gen == oldest_gen) {
335 bd->flags |= BF_NONMOVING;
336 }
337 initBdescr(bd, new_gen, new_gen->to);
338
339 // If this is a block of pinned or compact objects, we don't have to scan
340 // these objects, because they aren't allowed to contain any outgoing
341 // pointers. For these blocks, we skip the scavenge stage and put
342 // them straight on the scavenged_large_objects list.
343 if (bd->flags & BF_PINNED) {
344 ASSERT(get_itbl((StgClosure *)p)->type == ARR_WORDS);
345
346 if (new_gen != gen) { ACQUIRE_SPIN_LOCK(&new_gen->sync); }
347 dbl_link_onto(bd, &new_gen->scavenged_large_objects);
348 new_gen->n_scavenged_large_blocks += bd->blocks;
349 if (new_gen != gen) { RELEASE_SPIN_LOCK(&new_gen->sync); }
350 } else {
351 bd->link = ws->todo_large_objects;
352 ws->todo_large_objects = bd;
353 }
354
355 RELEASE_SPIN_LOCK(&gen->sync);
356 }
357
358 /* ----------------------------------------------------------------------------
359 Evacuate static objects
360
361 When a static object is visited for the first time in this GC, it
362 is chained on to the gct->static_objects list.
363
364 evacuate_static_object (link_field, q)
365 - link_field must be STATIC_LINK(q)
366 ------------------------------------------------------------------------- */
367
368 STATIC_INLINE void
369 evacuate_static_object (StgClosure **link_field, StgClosure *q)
370 {
371 if (RTS_UNLIKELY(RtsFlags.GcFlags.useNonmoving)) {
372 // See Note [Static objects under the nonmoving collector] in Storage.c.
373 if (major_gc)
374 markQueuePushClosureGC(&gct->cap->upd_rem_set.queue, q);
375 return;
376 }
377
378 StgWord link = (StgWord)*link_field;
379
380 // See Note [STATIC_LINK fields] for how the link field bits work
381 if (((link & STATIC_BITS) | prev_static_flag) != 3) {
382 StgWord new_list_head = (StgWord)q | static_flag;
383 #if !defined(THREADED_RTS)
384 *link_field = gct->static_objects;
385 gct->static_objects = (StgClosure *)new_list_head;
386 #else
387 StgWord prev;
388 prev = cas((StgVolatilePtr)link_field, link,
389 (StgWord)gct->static_objects);
390 if (prev == link) {
391 gct->static_objects = (StgClosure *)new_list_head;
392 }
393 #endif
394 }
395 }
396
397 /* ----------------------------------------------------------------------------
398 Evacuate an object inside a CompactNFData
399
400 These are treated in a similar way to large objects. We remove the block
401 from the compact_objects list of the generation it is on, and link it onto
402 the live_compact_objects list of the destination generation.
403
404 It is assumed that objects in the struct live in the same generation
405 as the struct itself all the time.
406 ------------------------------------------------------------------------- */
407 STATIC_INLINE void
408 evacuate_compact (StgPtr p)
409 {
410 StgCompactNFData *str;
411 bdescr *bd;
412 generation *gen, *new_gen;
413 uint32_t gen_no, new_gen_no;
414
415 // We need to find the Compact# corresponding to this pointer, because it
416 // will give us the first block in the compact chain, which is the one we
417 // that gets linked onto the compact_objects list.
418 str = objectGetCompact((StgClosure*)p);
419 ASSERT(get_itbl((StgClosure*)str)->type == COMPACT_NFDATA);
420
421 bd = Bdescr((StgPtr)str);
422 gen_no = bd->gen_no;
423
424 // already evacuated? (we're about to do the same check,
425 // but we avoid taking the spin-lock)
426 if (bd->flags & BF_EVACUATED) {
427 /* Don't forget to set the gct->failed_to_evac flag if we didn't get
428 * the desired destination (see comments in evacuate()).
429 */
430 if (gen_no < gct->evac_gen_no) {
431 gct->failed_to_evac = true;
432 TICK_GC_FAILED_PROMOTION();
433 }
434 return;
435 }
436
437 gen = bd->gen;
438 gen_no = bd->gen_no;
439 ACQUIRE_SPIN_LOCK(&gen->sync);
440
441 // already evacuated?
442 if (bd->flags & BF_EVACUATED) {
443 /* Don't forget to set the gct->failed_to_evac flag if we didn't get
444 * the desired destination (see comments in evacuate()).
445 */
446 if (gen_no < gct->evac_gen_no) {
447 gct->failed_to_evac = true;
448 TICK_GC_FAILED_PROMOTION();
449 }
450 RELEASE_SPIN_LOCK(&gen->sync);
451 return;
452 }
453
454 // remove from compact_objects list
455 dbl_link_remove(bd, &gen->compact_objects);
456
457 /* link it on to the evacuated compact object list of the destination gen
458 */
459 new_gen_no = bd->dest_no;
460
461 if (new_gen_no < gct->evac_gen_no) {
462 if (gct->eager_promotion) {
463 new_gen_no = gct->evac_gen_no;
464 } else {
465 gct->failed_to_evac = true;
466 }
467 }
468
469 new_gen = &generations[new_gen_no];
470
471 // Note: for speed we only update the generation of the first block here
472 // This means that bdescr of subsequent blocks will think they are in
473 // the wrong generation
474 // (This should not be a problem because there is no code that checks
475 // for that - the only code touching the generation of the block is
476 // in the GC, and that should never see blocks other than the first)
477 bd->flags |= BF_EVACUATED;
478 initBdescr(bd, new_gen, new_gen->to);
479
480 if (str->hash) {
481 gen_workspace *ws = &gct->gens[new_gen_no];
482 bd->link = ws->todo_large_objects;
483 ws->todo_large_objects = bd;
484 } else {
485 if (new_gen != gen) { ACQUIRE_SPIN_LOCK(&new_gen->sync); }
486 dbl_link_onto(bd, &new_gen->live_compact_objects);
487 new_gen->n_live_compact_blocks += str->totalW / BLOCK_SIZE_W;
488 if (new_gen != gen) { RELEASE_SPIN_LOCK(&new_gen->sync); }
489 }
490
491 RELEASE_SPIN_LOCK(&gen->sync);
492
493 // Note: the object did not move in memory, because it lives
494 // in pinned (BF_COMPACT) allocation, so we do not need to rewrite it
495 // or muck with forwarding pointers
496 // Also there is no tag to worry about on the struct (tags are used
497 // for constructors and functions, but a struct is neither). There
498 // might be a tag on the object pointer, but again we don't change
499 // the pointer because we don't move the object so we don't need to
500 // rewrite the tag.
501 }
502
503 /* ----------------------------------------------------------------------------
504 Evacuate
505
506 This is called (eventually) for every live object in the system.
507
508 The caller to evacuate specifies a desired generation in the
509 gct->evac_gen thread-local variable. The following conditions apply to
510 evacuating an object which resides in generation M when we're
511 collecting up to generation N
512
513 if M >= gct->evac_gen
514 if M > N do nothing
515 else evac to gen->to
516
517 if M < gct->evac_gen evac to gct->evac_gen, step 0
518
519 if the object is already evacuated, then we check which generation
520 it now resides in.
521
522 if M >= gct->evac_gen do nothing
523 if M < gct->evac_gen set gct->failed_to_evac flag to indicate that we
524 didn't manage to evacuate this object into gct->evac_gen.
525
526
527 OPTIMISATION NOTES:
528
529 evacuate() is the single most important function performance-wise
530 in the GC. Various things have been tried to speed it up, but as
531 far as I can tell the code generated by gcc 3.2 with -O2 is about
532 as good as it's going to get. We pass the argument to evacuate()
533 in a register using the 'regparm' attribute (see the prototype for
534 evacuate() near the top of this file).
535
536 Changing evacuate() to take an (StgClosure **) rather than
537 returning the new pointer seems attractive, because we can avoid
538 writing back the pointer when it hasn't changed (eg. for a static
539 object, or an object in a generation > N). However, I tried it and
540 it doesn't help. One reason is that the (StgClosure **) pointer
541 gets spilled to the stack inside evacuate(), resulting in far more
542 extra reads/writes than we save.
543 ------------------------------------------------------------------------- */
544
545 REGPARM1 GNUC_ATTR_HOT void
546 evacuate(StgClosure **p)
547 {
548 bdescr *bd = NULL;
549 uint32_t gen_no;
550 StgClosure *q;
551 const StgInfoTable *info;
552 StgWord tag;
553
554 q = *p;
555
556 loop:
557 /* The tag and the pointer are split, to be merged after evacing */
558 tag = GET_CLOSURE_TAG(q);
559 q = UNTAG_CLOSURE(q);
560
561 ASSERTM(LOOKS_LIKE_CLOSURE_PTR(q), "invalid closure, info=%p", q->header.info);
562
563 if (!HEAP_ALLOCED_GC(q)) {
564 if (!major_gc) return;
565
566 info = get_itbl(q);
567 switch (info->type) {
568
569 case THUNK_STATIC:
570 if (info->srt != 0) {
571 evacuate_static_object(THUNK_STATIC_LINK((StgClosure *)q), q);
572 }
573 return;
574
575 case FUN_STATIC:
576 if (info->srt != 0 || info->layout.payload.ptrs != 0) {
577 evacuate_static_object(STATIC_LINK(info,(StgClosure *)q), q);
578 }
579 return;
580
581 case IND_STATIC:
582 /* If q->saved_info != NULL, then it's a revertible CAF - it'll be
583 * on the CAF list, so don't do anything with it here (we'll
584 * scavenge it later).
585 */
586 evacuate_static_object(IND_STATIC_LINK((StgClosure *)q), q);
587 return;
588
589 case CONSTR:
590 case CONSTR_1_0:
591 case CONSTR_2_0:
592 case CONSTR_1_1:
593 evacuate_static_object(STATIC_LINK(info,(StgClosure *)q), q);
594 return;
595
596 case CONSTR_0_1:
597 case CONSTR_0_2:
598 case CONSTR_NOCAF:
599 /* no need to put these on the static linked list, they don't need
600 * to be scavenged.
601 */
602 return;
603
604 default:
605 barf("evacuate(static): strange closure type %d", (int)(info->type));
606 }
607 }
608
609 bd = Bdescr((P_)q);
610
611 if ((bd->flags & (BF_LARGE | BF_MARKED | BF_EVACUATED | BF_COMPACT | BF_NONMOVING)) != 0) {
612 // Pointer to non-moving heap. Non-moving heap is collected using
613 // mark-sweep so this object should be marked and then retained in sweep.
614 if (bd->flags & BF_NONMOVING) {
615 // NOTE: large objects in nonmoving heap are also marked with
616 // BF_NONMOVING. Those are moved to scavenged_large_objects list in
617 // mark phase.
618 if (major_gc)
619 markQueuePushClosureGC(&gct->cap->upd_rem_set.queue, q);
620 return;
621 }
622
623 // pointer into to-space: just return it. It might be a pointer
624 // into a generation that we aren't collecting (> N), or it
625 // might just be a pointer into to-space. The latter doesn't
626 // happen often, but allowing it makes certain things a bit
627 // easier; e.g. scavenging an object is idempotent, so it's OK to
628 // have an object on the mutable list multiple times.
629 if (bd->flags & BF_EVACUATED) {
630 // We aren't copying this object, so we have to check
631 // whether it is already in the target generation. (this is
632 // the write barrier).
633 if (bd->gen_no < gct->evac_gen_no) {
634 gct->failed_to_evac = true;
635 TICK_GC_FAILED_PROMOTION();
636 }
637 return;
638 }
639
640 // Check for compact before checking for large, this allows doing the
641 // right thing for objects that are half way in the middle of the first
642 // block of a compact (and would be treated as large objects even though
643 // they are not)
644 if (bd->flags & BF_COMPACT) {
645 evacuate_compact((P_)q);
646
647 // We may have evacuated the block to the nonmoving generation. If so
648 // we need to make sure it is added to the mark queue since the only
649 // reference to it may be from the moving heap.
650 if (major_gc && bd->flags & BF_NONMOVING) {
651 markQueuePushClosureGC(&gct->cap->upd_rem_set.queue, q);
652 }
653 return;
654 }
655
656 /* evacuate large objects by re-linking them onto a different list.
657 */
658 if (bd->flags & BF_LARGE) {
659 evacuate_large((P_)q);
660
661 // We may have evacuated the block to the nonmoving generation. If so
662 // we need to make sure it is added to the mark queue since the only
663 // reference to it may be from the moving heap.
664 if (major_gc && bd->flags & BF_NONMOVING) {
665 markQueuePushClosureGC(&gct->cap->upd_rem_set.queue, q);
666 }
667 return;
668 }
669
670 /* If the object is in a gen that we're compacting, then we
671 * need to use an alternative evacuate procedure.
672 */
673 if (!is_marked((P_)q,bd)) {
674 mark((P_)q,bd);
675 push_mark_stack((P_)q);
676 }
677 return;
678 }
679
680 gen_no = bd->dest_no;
681
682 info = q->header.info;
683 if (IS_FORWARDING_PTR(info))
684 {
685 /* Already evacuated, just return the forwarding address.
686 * HOWEVER: if the requested destination generation (gct->evac_gen) is
687 * older than the actual generation (because the object was
688 * already evacuated to a younger generation) then we have to
689 * set the gct->failed_to_evac flag to indicate that we couldn't
690 * manage to promote the object to the desired generation.
691 */
692 /*
693 * Optimisation: the check is fairly expensive, but we can often
694 * shortcut it if either the required generation is 0, or the
695 * current object (the EVACUATED) is in a high enough generation.
696 * We know that an EVACUATED always points to an object in the
697 * same or an older generation. gen is the lowest generation that the
698 * current object would be evacuated to, so we only do the full
699 * check if gen is too low.
700 */
701 StgClosure *e = (StgClosure*)UN_FORWARDING_PTR(info);
702 *p = TAG_CLOSURE(tag,e);
703 if (gen_no < gct->evac_gen_no) { // optimisation
704 if (Bdescr((P_)e)->gen_no < gct->evac_gen_no) {
705 gct->failed_to_evac = true;
706 TICK_GC_FAILED_PROMOTION();
707 }
708 }
709 return;
710 }
711
712 switch (INFO_PTR_TO_STRUCT(info)->type) {
713
714 case WHITEHOLE:
715 goto loop;
716
717 // For ints and chars of low value, save space by replacing references to
718 // these with closures with references to common, shared ones in the RTS.
719 //
720 // * Except when compiling into Windows DLLs which don't support cross-package
721 // data references very well.
722 //
723 case CONSTR_0_1:
724 {
725 #if defined(COMPILING_WINDOWS_DLL)
726 copy_tag_nolock(p,info,q,sizeofW(StgHeader)+1,gen_no,tag);
727 #else
728 StgWord w = (StgWord)q->payload[0];
729 if (info == Czh_con_info &&
730 // unsigned, so always true: (StgChar)w >= MIN_CHARLIKE &&
731 (StgChar)w <= MAX_CHARLIKE) {
732 *p = TAG_CLOSURE(tag,
733 (StgClosure *)CHARLIKE_CLOSURE((StgChar)w)
734 );
735 }
736 else if (info == Izh_con_info &&
737 (StgInt)w >= MIN_INTLIKE && (StgInt)w <= MAX_INTLIKE) {
738 *p = TAG_CLOSURE(tag,
739 (StgClosure *)INTLIKE_CLOSURE((StgInt)w)
740 );
741 }
742 else {
743 copy_tag_nolock(p,info,q,sizeofW(StgHeader)+1,gen_no,tag);
744 }
745 #endif
746 return;
747 }
748
749 case FUN_0_1:
750 case FUN_1_0:
751 case CONSTR_1_0:
752 copy_tag_nolock(p,info,q,sizeofW(StgHeader)+1,gen_no,tag);
753 return;
754
755 case THUNK_1_0:
756 case THUNK_0_1:
757 copy(p,info,q,sizeofW(StgThunk)+1,gen_no);
758 return;
759
760 case THUNK_1_1:
761 case THUNK_2_0:
762 case THUNK_0_2:
763 copy(p,info,q,sizeofW(StgThunk)+2,gen_no);
764 return;
765
766 case FUN_1_1:
767 case FUN_2_0:
768 case FUN_0_2:
769 case CONSTR_1_1:
770 case CONSTR_2_0:
771 copy_tag_nolock(p,info,q,sizeofW(StgHeader)+2,gen_no,tag);
772 return;
773
774 case CONSTR_0_2:
775 copy_tag_nolock(p,info,q,sizeofW(StgHeader)+2,gen_no,tag);
776 return;
777
778 case THUNK:
779 copy(p,info,q,thunk_sizeW_fromITBL(INFO_PTR_TO_STRUCT(info)),gen_no);
780 return;
781
782 case FUN:
783 case CONSTR:
784 case CONSTR_NOCAF:
785 copy_tag_nolock(p,info,q,sizeW_fromITBL(INFO_PTR_TO_STRUCT(info)),gen_no,tag);
786 return;
787
788 case BLACKHOLE:
789 {
790 StgClosure *r;
791 const StgInfoTable *i;
792 r = ((StgInd*)q)->indirectee;
793 if (GET_CLOSURE_TAG(r) == 0) {
794 i = r->header.info;
795 if (IS_FORWARDING_PTR(i)) {
796 r = (StgClosure *)UN_FORWARDING_PTR(i);
797 i = r->header.info;
798 }
799 if (i == &stg_TSO_info
800 || i == &stg_WHITEHOLE_info
801 || i == &stg_BLOCKING_QUEUE_CLEAN_info
802 || i == &stg_BLOCKING_QUEUE_DIRTY_info) {
803 copy(p,info,q,sizeofW(StgInd),gen_no);
804 return;
805 }
806 // Note [BLACKHOLE pointing to IND]
807 //
808 // BLOCKING_QUEUE can be overwritten by IND (see
809 // wakeBlockingQueue()). However, when this happens we must
810 // be updating the BLACKHOLE, so the BLACKHOLE's indirectee
811 // should now point to the value.
812 //
813 // The mutator might observe an inconsistent state, because
814 // the writes are happening in another thread, so it's
815 // possible for the mutator to follow an indirectee and find
816 // an IND. But this should never happen in the GC, because
817 // the mutators are all stopped and the writes have
818 // completed.
819 ASSERT(i != &stg_IND_info);
820 }
821 q = r;
822 *p = r;
823 goto loop;
824 }
825
826 case MUT_VAR_CLEAN:
827 case MUT_VAR_DIRTY:
828 case MVAR_CLEAN:
829 case MVAR_DIRTY:
830 case TVAR:
831 case BLOCKING_QUEUE:
832 case WEAK:
833 case PRIM:
834 case MUT_PRIM:
835 copy(p,info,q,sizeW_fromITBL(INFO_PTR_TO_STRUCT(info)),gen_no);
836 return;
837
838 case BCO:
839 copy(p,info,q,bco_sizeW((StgBCO *)q),gen_no);
840 return;
841
842 case THUNK_SELECTOR:
843 eval_thunk_selector(p, (StgSelector *)q, true);
844 return;
845
846 case IND:
847 // follow chains of indirections, don't evacuate them
848 q = ((StgInd*)q)->indirectee;
849 *p = q;
850 goto loop;
851
852 case RET_BCO:
853 case RET_SMALL:
854 case RET_BIG:
855 case UPDATE_FRAME:
856 case UNDERFLOW_FRAME:
857 case STOP_FRAME:
858 case CATCH_FRAME:
859 case CATCH_STM_FRAME:
860 case CATCH_RETRY_FRAME:
861 case ATOMICALLY_FRAME:
862 // shouldn't see these
863 barf("evacuate: stack frame at %p\n", q);
864
865 case PAP:
866 copy(p,info,q,pap_sizeW((StgPAP*)q),gen_no);
867 return;
868
869 case AP:
870 copy(p,info,q,ap_sizeW((StgAP*)q),gen_no);
871 return;
872
873 case AP_STACK:
874 copy(p,info,q,ap_stack_sizeW((StgAP_STACK*)q),gen_no);
875 return;
876
877 case ARR_WORDS:
878 // just copy the block
879 copy(p,info,q,arr_words_sizeW((StgArrBytes *)q),gen_no);
880 return;
881
882 case MUT_ARR_PTRS_CLEAN:
883 case MUT_ARR_PTRS_DIRTY:
884 case MUT_ARR_PTRS_FROZEN_CLEAN:
885 case MUT_ARR_PTRS_FROZEN_DIRTY:
886 // just copy the block
887 copy(p,info,q,mut_arr_ptrs_sizeW((StgMutArrPtrs *)q),gen_no);
888 return;
889
890 case SMALL_MUT_ARR_PTRS_CLEAN:
891 case SMALL_MUT_ARR_PTRS_DIRTY:
892 case SMALL_MUT_ARR_PTRS_FROZEN_CLEAN:
893 case SMALL_MUT_ARR_PTRS_FROZEN_DIRTY:
894 // just copy the block
895 copy(p,info,q,small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs *)q),gen_no);
896 return;
897
898 case TSO:
899 copy(p,info,q,sizeofW(StgTSO),gen_no);
900 return;
901
902 case STACK:
903 {
904 StgStack *stack = (StgStack *)q;
905
906 /* To evacuate a small STACK, we need to adjust the stack pointer
907 */
908 {
909 StgStack *new_stack;
910 StgPtr r, s;
911 bool mine;
912
913 mine = copyPart(p,(StgClosure *)stack, stack_sizeW(stack),
914 sizeofW(StgStack), gen_no);
915 if (mine) {
916 new_stack = (StgStack *)*p;
917 move_STACK(stack, new_stack);
918 for (r = stack->sp, s = new_stack->sp;
919 r < stack->stack + stack->stack_size;) {
920 *s++ = *r++;
921 }
922 }
923 return;
924 }
925 }
926
927 case TREC_CHUNK:
928 copy(p,info,q,sizeofW(StgTRecChunk),gen_no);
929 return;
930
931 default:
932 barf("evacuate: strange closure type %d", (int)(INFO_PTR_TO_STRUCT(info)->type));
933 }
934
935 barf("evacuate");
936 }
937
938 /* -----------------------------------------------------------------------------
939 Evacuate a pointer that is guaranteed to point to a BLACKHOLE.
940
941 This is used for evacuating the updatee of an update frame on the stack. We
942 want to copy the blackhole even if it has been updated by another thread and
943 is now an indirection, because the original update frame still needs to
944 update it.
945
946 See also Note [upd-black-hole] in sm/Scav.c.
947 -------------------------------------------------------------------------- */
948
949 void
950 evacuate_BLACKHOLE(StgClosure **p)
951 {
952 bdescr *bd;
953 uint32_t gen_no;
954 StgClosure *q;
955 const StgInfoTable *info;
956 q = *p;
957
958 // closure is required to be a heap-allocated BLACKHOLE
959 ASSERT(HEAP_ALLOCED_GC(q));
960 ASSERT(GET_CLOSURE_TAG(q) == 0);
961
962 bd = Bdescr((P_)q);
963
964 // blackholes can't be in a compact
965 ASSERT((bd->flags & BF_COMPACT) == 0);
966
967 if (bd->flags & BF_NONMOVING) {
968 if (major_gc)
969 markQueuePushClosureGC(&gct->cap->upd_rem_set.queue, q);
970 return;
971 }
972
973 // blackholes *can* be in a large object: when raiseAsync() creates an
974 // AP_STACK the payload might be large enough to create a large object.
975 // See #14497.
976 if (bd->flags & BF_LARGE) {
977 evacuate_large((P_)q);
978 return;
979 }
980 if (bd->flags & BF_EVACUATED) {
981 if (bd->gen_no < gct->evac_gen_no) {
982 gct->failed_to_evac = true;
983 TICK_GC_FAILED_PROMOTION();
984 }
985 return;
986 }
987 if (bd->flags & BF_MARKED) {
988 if (!is_marked((P_)q,bd)) {
989 mark((P_)q,bd);
990 push_mark_stack((P_)q);
991 }
992 return;
993 }
994 gen_no = bd->dest_no;
995 info = q->header.info;
996 if (IS_FORWARDING_PTR(info))
997 {
998 StgClosure *e = (StgClosure*)UN_FORWARDING_PTR(info);
999 *p = e;
1000 if (gen_no < gct->evac_gen_no) { // optimisation
1001 if (Bdescr((P_)e)->gen_no < gct->evac_gen_no) {
1002 gct->failed_to_evac = true;
1003 TICK_GC_FAILED_PROMOTION();
1004 }
1005 }
1006 return;
1007 }
1008
1009 ASSERT(INFO_PTR_TO_STRUCT(info)->type == BLACKHOLE);
1010 copy(p,info,q,sizeofW(StgInd),gen_no);
1011 }
1012
1013 /* ----------------------------------------------------------------------------
1014 Update a chain of thunk selectors with the given value. All selectors in the
1015 chain become IND pointing to the value, except when there is a loop (i.e.
1016 the value of a THUNK_SELECTOR is the THUNK_SELECTOR itself), in that case we
1017 leave the selector as-is.
1018
1019 p is the current selector to update. In eval_thunk_selector we make a list
1020 from selectors using ((StgThunk*)p)->payload[0] for the link field and use
1021 that field to traverse the chain here.
1022
1023 val is the final value of the selector chain.
1024
1025 A chain is formed when we've got something like:
1026
1027 let x = C1 { f1 = e1 }
1028 y = C2 { f2 = f1 x }
1029 z = f2 y
1030
1031 Here the chain (p) we get when evacuating z is:
1032
1033 [ f2 y, f1 x ]
1034
1035 and val is e1.
1036 -------------------------------------------------------------------------- */
1037
1038 static void
1039 unchain_thunk_selectors(StgSelector *p, StgClosure *val)
1040 {
1041 while (p)
1042 {
1043 ASSERT(p->header.info == &stg_WHITEHOLE_info);
1044 // val must be in to-space. Not always: when we recursively
1045 // invoke eval_thunk_selector(), the recursive calls will not
1046 // evacuate the value (because we want to select on the value,
1047 // not evacuate it), so in this case val is in from-space.
1048 // ASSERT(!HEAP_ALLOCED_GC(val) || Bdescr((P_)val)->gen_no > N || (Bdescr((P_)val)->flags & BF_EVACUATED));
1049
1050 StgSelector *prev = (StgSelector*)((StgClosure *)p)->payload[0];
1051
1052 // Update the THUNK_SELECTOR with an indirection to the
1053 // value. The value is still in from-space at this stage.
1054 //
1055 // (old note: Why not do upd_evacuee(q,p)? Because we have an
1056 // invariant that an EVACUATED closure always points to an
1057 // object in the same or an older generation (required by
1058 // the short-cut test in the EVACUATED case, below).
1059 if ((StgClosure *)p == val) {
1060 // must be a loop; just leave a BLACKHOLE in place. This
1061 // can happen when we have a chain of selectors that
1062 // eventually loops back on itself. We can't leave an
1063 // indirection pointing to itself, and we want the program
1064 // to deadlock if it ever enters this closure, so
1065 // BLACKHOLE is correct.
1066
1067 // XXX we do not have BLACKHOLEs any more; replace with
1068 // a THUNK_SELECTOR again. This will go into a loop if it is
1069 // entered, and should result in a NonTermination exception.
1070 ((StgThunk *)p)->payload[0] = val;
1071 write_barrier();
1072 SET_INFO((StgClosure *)p, &stg_sel_0_upd_info);
1073 } else {
1074 ((StgInd *)p)->indirectee = val;
1075 write_barrier();
1076 SET_INFO((StgClosure *)p, &stg_IND_info);
1077 }
1078
1079 // For the purposes of LDV profiling, we have created an
1080 // indirection.
1081 LDV_RECORD_CREATE(p);
1082
1083 p = prev;
1084 }
1085 }
1086
1087 /* -----------------------------------------------------------------------------
1088 Evaluate a THUNK_SELECTOR if possible.
1089
1090 p points to a THUNK_SELECTOR that we want to evaluate.
1091
1092 If the THUNK_SELECTOR could not be evaluated (its selectee is still a THUNK,
1093 for example), then the THUNK_SELECTOR itself will be evacuated depending on
1094 the evac parameter.
1095 -------------------------------------------------------------------------- */
1096
1097 static void
1098 eval_thunk_selector (StgClosure **q, StgSelector *p, bool evac)
1099 // NB. for legacy reasons, p & q are swapped around :(
1100 {
1101 uint32_t field;
1102 StgInfoTable *info;
1103 StgWord info_ptr;
1104 StgClosure *selectee;
1105 StgSelector *prev_thunk_selector;
1106 bdescr *bd;
1107
1108 prev_thunk_selector = NULL;
1109 // this is a chain of THUNK_SELECTORs that we are going to update
1110 // to point to the value of the current THUNK_SELECTOR. Each
1111 // closure on the chain is a WHITEHOLE, and points to the next in the
1112 // chain with payload[0].
1113
1114 selector_chain:
1115
1116 bd = Bdescr((StgPtr)p);
1117 if (HEAP_ALLOCED_GC(p)) {
1118 // If the THUNK_SELECTOR is in to-space or in a generation that we
1119 // are not collecting, then bale out early. We won't be able to
1120 // save any space in any case, and updating with an indirection is
1121 // trickier in a non-collected gen: we would have to update the
1122 // mutable list.
1123 if ((bd->flags & BF_EVACUATED) || (bd->flags & BF_NONMOVING)) {
1124 unchain_thunk_selectors(prev_thunk_selector, (StgClosure *)p);
1125 *q = (StgClosure *)p;
1126 // shortcut, behave as for: if (evac) evacuate(q);
1127 if (evac && bd->gen_no < gct->evac_gen_no) {
1128 gct->failed_to_evac = true;
1129 TICK_GC_FAILED_PROMOTION();
1130 }
1131 return;
1132 }
1133 // we don't update THUNK_SELECTORS in the compacted
1134 // generation, because compaction does not remove the INDs
1135 // that result, this causes confusion later
1136 // (scavenge_mark_stack doesn't deal with IND). BEWARE! This
1137 // bit is very tricky to get right. If you make changes
1138 // around here, test by compiling stage 3 with +RTS -c -RTS.
1139 if (bd->flags & BF_MARKED) {
1140 // must call evacuate() to mark this closure if evac==true
1141 *q = (StgClosure *)p;
1142 if (evac) evacuate(q);
1143 unchain_thunk_selectors(prev_thunk_selector, (StgClosure *)p);
1144 return;
1145 }
1146 }
1147
1148
1149 // WHITEHOLE the selector thunk, since it is now under evaluation.
1150 // This is important to stop us going into an infinite loop if
1151 // this selector thunk eventually refers to itself.
1152 #if defined(THREADED_RTS)
1153 // In threaded mode, we'll use WHITEHOLE to lock the selector
1154 // thunk while we evaluate it.
1155 {
1156 while(true) {
1157 info_ptr = xchg((StgPtr)&p->header.info, (W_)&stg_WHITEHOLE_info);
1158 if (info_ptr != (W_)&stg_WHITEHOLE_info) { break; }
1159 #if defined(PROF_SPIN)
1160 ++whitehole_gc_spin;
1161 #endif
1162 busy_wait_nop();
1163 }
1164
1165 // make sure someone else didn't get here first...
1166 if (IS_FORWARDING_PTR(info_ptr) ||
1167 INFO_PTR_TO_STRUCT((StgInfoTable *)info_ptr)->type != THUNK_SELECTOR) {
1168 // v. tricky now. The THUNK_SELECTOR has been evacuated
1169 // by another thread, and is now either a forwarding ptr or IND.
1170 // We need to extract ourselves from the current situation
1171 // as cleanly as possible.
1172 // - unlock the closure
1173 // - update *q, we may have done *some* evaluation
1174 // - if evac, we need to call evacuate(), because we
1175 // need the write-barrier stuff.
1176 // - undo the chain we've built to point to p.
1177 SET_INFO((StgClosure *)p, (const StgInfoTable *)info_ptr);
1178 *q = (StgClosure *)p;
1179 if (evac) evacuate(q);
1180 unchain_thunk_selectors(prev_thunk_selector, (StgClosure *)p);
1181 return;
1182 }
1183 }
1184 #else
1185 // Save the real info pointer (NOTE: not the same as get_itbl()).
1186 info_ptr = (StgWord)p->header.info;
1187 SET_INFO((StgClosure *)p,&stg_WHITEHOLE_info);
1188 #endif
1189
1190 field = INFO_PTR_TO_STRUCT((StgInfoTable *)info_ptr)->layout.selector_offset;
1191
1192 // The selectee might be a constructor closure,
1193 // so we untag the pointer.
1194 selectee = UNTAG_CLOSURE(p->selectee);
1195
1196 selector_loop:
1197 // selectee now points to the closure that we're trying to select
1198 // a field from. It may or may not be in to-space: we try not to
1199 // end up in to-space, but it's impractical to avoid it in
1200 // general. The compacting GC scatters to-space pointers in
1201 // from-space during marking, for example. We rely on the property
1202 // that evacuate() doesn't mind if it gets passed a to-space pointer.
1203
1204 info = (StgInfoTable*)selectee->header.info;
1205
1206 if (IS_FORWARDING_PTR(info)) {
1207 // We don't follow pointers into to-space; the constructor
1208 // has already been evacuated, so we won't save any space
1209 // leaks by evaluating this selector thunk anyhow.
1210 goto bale_out;
1211 }
1212
1213 info = INFO_PTR_TO_STRUCT(info);
1214 switch (info->type) {
1215 case WHITEHOLE:
1216 goto bale_out; // about to be evacuated by another thread (or a loop).
1217
1218 case CONSTR:
1219 case CONSTR_1_0:
1220 case CONSTR_0_1:
1221 case CONSTR_2_0:
1222 case CONSTR_1_1:
1223 case CONSTR_0_2:
1224 case CONSTR_NOCAF:
1225 {
1226 // check that the size is in range
1227 ASSERT(field < (StgWord32)(info->layout.payload.ptrs +
1228 info->layout.payload.nptrs));
1229
1230 // Select the right field from the constructor
1231 StgClosure *val = selectee->payload[field];
1232
1233 #if defined(PROFILING)
1234 // For the purposes of LDV profiling, we have destroyed
1235 // the original selector thunk, p.
1236 if (era > 0) {
1237 // Only modify the info pointer when LDV profiling is
1238 // enabled. Note that this is incompatible with parallel GC,
1239 // because it would allow other threads to start evaluating
1240 // the same selector thunk.
1241 SET_INFO((StgClosure*)p, (StgInfoTable *)info_ptr);
1242 OVERWRITING_CLOSURE((StgClosure*)p);
1243 SET_INFO((StgClosure*)p, &stg_WHITEHOLE_info);
1244 }
1245 #endif
1246
1247 // the closure in val is now the "value" of the
1248 // THUNK_SELECTOR in p. However, val may itself be a
1249 // THUNK_SELECTOR, in which case we want to continue
1250 // evaluating until we find the real value, and then
1251 // update the whole chain to point to the value.
1252 val_loop:
1253 info_ptr = (StgWord)UNTAG_CLOSURE(val)->header.info;
1254 if (!IS_FORWARDING_PTR(info_ptr))
1255 {
1256 info = INFO_PTR_TO_STRUCT((StgInfoTable *)info_ptr);
1257 switch (info->type) {
1258 case IND:
1259 case IND_STATIC:
1260 val = ((StgInd *)val)->indirectee;
1261 goto val_loop;
1262 case THUNK_SELECTOR:
1263 // Use payload to make a list of thunk selectors, to be
1264 // used in unchain_thunk_selectors
1265 ((StgClosure*)p)->payload[0] = (StgClosure *)prev_thunk_selector;
1266 prev_thunk_selector = p;
1267 p = (StgSelector*)val;
1268 goto selector_chain;
1269 default:
1270 break;
1271 }
1272 }
1273 ((StgClosure*)p)->payload[0] = (StgClosure *)prev_thunk_selector;
1274 prev_thunk_selector = p;
1275
1276 *q = val;
1277
1278 // update the other selectors in the chain *before*
1279 // evacuating the value. This is necessary in the case
1280 // where the value turns out to be one of the selectors
1281 // in the chain (i.e. we have a loop), and evacuating it
1282 // would corrupt the chain.
1283 unchain_thunk_selectors(prev_thunk_selector, val);
1284
1285 // evacuate() cannot recurse through
1286 // eval_thunk_selector(), because we know val is not
1287 // a THUNK_SELECTOR.
1288 if (evac) evacuate(q);
1289 return;
1290 }
1291
1292 case IND:
1293 case IND_STATIC:
1294 // Again, we might need to untag a constructor.
1295 selectee = UNTAG_CLOSURE( ((StgInd *)selectee)->indirectee );
1296 goto selector_loop;
1297
1298 case BLACKHOLE:
1299 {
1300 StgClosure *r;
1301 const StgInfoTable *i;
1302 r = ((StgInd*)selectee)->indirectee;
1303
1304 // establish whether this BH has been updated, and is now an
1305 // indirection, as in evacuate().
1306 if (GET_CLOSURE_TAG(r) == 0) {
1307 i = r->header.info;
1308 if (IS_FORWARDING_PTR(i)) {
1309 r = (StgClosure *)UN_FORWARDING_PTR(i);
1310 i = r->header.info;
1311 }
1312 if (i == &stg_TSO_info
1313 || i == &stg_WHITEHOLE_info
1314 || i == &stg_BLOCKING_QUEUE_CLEAN_info
1315 || i == &stg_BLOCKING_QUEUE_DIRTY_info) {
1316 goto bale_out;
1317 }
1318 ASSERT(i != &stg_IND_info);
1319 }
1320
1321 selectee = UNTAG_CLOSURE( ((StgInd *)selectee)->indirectee );
1322 goto selector_loop;
1323 }
1324
1325 case THUNK_SELECTOR:
1326 {
1327 StgClosure *val;
1328
1329 // recursively evaluate this selector. We don't want to
1330 // recurse indefinitely, so we impose a depth bound.
1331 // See Note [Selector optimisation depth limit].
1332 if (gct->thunk_selector_depth >= MAX_THUNK_SELECTOR_DEPTH) {
1333 goto bale_out;
1334 }
1335
1336 gct->thunk_selector_depth++;
1337 // false says "don't evacuate the result". It will,
1338 // however, update any THUNK_SELECTORs that are evaluated
1339 // along the way.
1340 eval_thunk_selector(&val, (StgSelector*)selectee, false);
1341 gct->thunk_selector_depth--;
1342
1343 // did we actually manage to evaluate it?
1344 if (val == selectee) goto bale_out;
1345
1346 // Of course this pointer might be tagged...
1347 selectee = UNTAG_CLOSURE(val);
1348 goto selector_loop;
1349 }
1350
1351 case AP:
1352 case AP_STACK:
1353 case THUNK:
1354 case THUNK_1_0:
1355 case THUNK_0_1:
1356 case THUNK_2_0:
1357 case THUNK_1_1:
1358 case THUNK_0_2:
1359 case THUNK_STATIC:
1360 // not evaluated yet
1361 goto bale_out;
1362
1363 default:
1364 barf("eval_thunk_selector: strange selectee %d",
1365 (int)(info->type));
1366 }
1367
1368 bale_out:
1369 // We didn't manage to evaluate this thunk; restore the old info
1370 // pointer. But don't forget: we still need to evacuate the thunk itself.
1371 SET_INFO((StgClosure *)p, (const StgInfoTable *)info_ptr);
1372 // THREADED_RTS: we just unlocked the thunk, so another thread
1373 // might get in and update it. copy() will lock it again and
1374 // check whether it was updated in the meantime.
1375 *q = (StgClosure *)p;
1376 if (evac) {
1377 copy(q,(const StgInfoTable *)info_ptr,(StgClosure *)p,THUNK_SELECTOR_sizeW(),bd->dest_no);
1378 }
1379 unchain_thunk_selectors(prev_thunk_selector, *q);
1380 }