375cf81d112aa06944b0d87b1a6819dbc1801eec
[ghc.git] / rts / sm / Evac.c
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team 1998-2008
4 *
5 * Generational garbage collector: evacuation functions
6 *
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
9 *
10 * http://hackage.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/GC
11 *
12 * ---------------------------------------------------------------------------*/
13
14 #include "Rts.h"
15 #include "Storage.h"
16 #include "MBlock.h"
17 #include "Evac.h"
18 #include "GC.h"
19 #include "GCThread.h"
20 #include "GCUtils.h"
21 #include "Compact.h"
22 #include "Prelude.h"
23 #include "LdvProfile.h"
24 #include "Trace.h"
25
26 #if defined(PROF_SPIN) && defined(THREADED_RTS) && defined(PARALLEL_GC)
27 StgWord64 whitehole_spin = 0;
28 #endif
29
30 #if defined(THREADED_RTS) && !defined(PARALLEL_GC)
31 #define evacuate(p) evacuate1(p)
32 #endif
33
34 #if !defined(PARALLEL_GC)
35 #define copy_tag_nolock(p, info, src, size, stp, tag) \
36 copy_tag(p, info, src, size, stp, tag)
37 #endif
38
39 /* Used to avoid long recursion due to selector thunks
40 */
41 #define MAX_THUNK_SELECTOR_DEPTH 16
42
43 static void eval_thunk_selector (StgClosure **q, StgSelector * p, rtsBool);
44 STATIC_INLINE void evacuate_large(StgPtr p);
45
46 /* -----------------------------------------------------------------------------
47 Allocate some space in which to copy an object.
48 -------------------------------------------------------------------------- */
49
50 STATIC_INLINE StgPtr
51 alloc_for_copy (nat size, step *stp)
52 {
53 StgPtr to;
54 step_workspace *ws;
55
56 /* Find out where we're going, using the handy "to" pointer in
57 * the step of the source object. If it turns out we need to
58 * evacuate to an older generation, adjust it here (see comment
59 * by evacuate()).
60 */
61 if (stp < gct->evac_step) {
62 if (gct->eager_promotion) {
63 stp = gct->evac_step;
64 } else {
65 gct->failed_to_evac = rtsTrue;
66 }
67 }
68
69 ws = &gct->steps[stp->abs_no];
70 // this compiles to a single mem access to stp->abs_no only
71
72 /* chain a new block onto the to-space for the destination step if
73 * necessary.
74 */
75 to = ws->todo_free;
76 if (to + size > ws->todo_lim) {
77 to = todo_block_full(size, ws);
78 }
79 ws->todo_free = to + size;
80 ASSERT(ws->todo_free >= ws->todo_bd->free && ws->todo_free <= ws->todo_lim);
81
82 return to;
83 }
84
85 /* -----------------------------------------------------------------------------
86 The evacuate() code
87 -------------------------------------------------------------------------- */
88
89 STATIC_INLINE GNUC_ATTR_HOT void
90 copy_tag(StgClosure **p, const StgInfoTable *info,
91 StgClosure *src, nat size, step *stp, StgWord tag)
92 {
93 StgPtr to, from;
94 nat i;
95
96 to = alloc_for_copy(size,stp);
97
98 TICK_GC_WORDS_COPIED(size);
99
100 from = (StgPtr)src;
101 to[0] = (W_)info;
102 for (i = 1; i < size; i++) { // unroll for small i
103 to[i] = from[i];
104 }
105
106 // if (to+size+2 < bd->start + BLOCK_SIZE_W) {
107 // __builtin_prefetch(to + size + 2, 1);
108 // }
109
110 #if defined(PARALLEL_GC)
111 {
112 const StgInfoTable *new_info;
113 new_info = (const StgInfoTable *)cas((StgPtr)&src->header.info, (W_)info, MK_FORWARDING_PTR(to));
114 if (new_info != info) {
115 return evacuate(p); // does the failed_to_evac stuff
116 } else {
117 *p = TAG_CLOSURE(tag,(StgClosure*)to);
118 }
119 }
120 #else
121 src->header.info = (const StgInfoTable *)MK_FORWARDING_PTR(to);
122 *p = TAG_CLOSURE(tag,(StgClosure*)to);
123 #endif
124
125 #ifdef PROFILING
126 // We store the size of the just evacuated object in the LDV word so that
127 // the profiler can guess the position of the next object later.
128 SET_EVACUAEE_FOR_LDV(from, size);
129 #endif
130 }
131
132 #if defined(PARALLEL_GC)
133 STATIC_INLINE void
134 copy_tag_nolock(StgClosure **p, const StgInfoTable *info,
135 StgClosure *src, nat size, step *stp, StgWord tag)
136 {
137 StgPtr to, from;
138 nat i;
139
140 to = alloc_for_copy(size,stp);
141 *p = TAG_CLOSURE(tag,(StgClosure*)to);
142 src->header.info = (const StgInfoTable *)MK_FORWARDING_PTR(to);
143
144 TICK_GC_WORDS_COPIED(size);
145
146 from = (StgPtr)src;
147 to[0] = (W_)info;
148 for (i = 1; i < size; i++) { // unroll for small i
149 to[i] = from[i];
150 }
151
152 // if (to+size+2 < bd->start + BLOCK_SIZE_W) {
153 // __builtin_prefetch(to + size + 2, 1);
154 // }
155
156 #ifdef PROFILING
157 // We store the size of the just evacuated object in the LDV word so that
158 // the profiler can guess the position of the next object later.
159 SET_EVACUAEE_FOR_LDV(from, size);
160 #endif
161 }
162 #endif
163
164 /* Special version of copy() for when we only want to copy the info
165 * pointer of an object, but reserve some padding after it. This is
166 * used to optimise evacuation of BLACKHOLEs.
167 */
168 static void
169 copyPart(StgClosure **p, StgClosure *src, nat size_to_reserve, nat size_to_copy, step *stp)
170 {
171 StgPtr to, from;
172 nat i;
173 StgWord info;
174
175 #if defined(PARALLEL_GC)
176 spin:
177 info = xchg((StgPtr)&src->header.info, (W_)&stg_WHITEHOLE_info);
178 if (info == (W_)&stg_WHITEHOLE_info) {
179 #ifdef PROF_SPIN
180 whitehole_spin++;
181 #endif
182 goto spin;
183 }
184 if (IS_FORWARDING_PTR(info)) {
185 src->header.info = (const StgInfoTable *)info;
186 evacuate(p); // does the failed_to_evac stuff
187 return ;
188 }
189 #else
190 info = (W_)src->header.info;
191 #endif
192
193 to = alloc_for_copy(size_to_reserve, stp);
194 *p = (StgClosure *)to;
195
196 TICK_GC_WORDS_COPIED(size_to_copy);
197
198 from = (StgPtr)src;
199 to[0] = info;
200 for (i = 1; i < size_to_copy; i++) { // unroll for small i
201 to[i] = from[i];
202 }
203
204 #if defined(PARALLEL_GC)
205 write_barrier();
206 #endif
207 src->header.info = (const StgInfoTable*)MK_FORWARDING_PTR(to);
208
209 #ifdef PROFILING
210 // We store the size of the just evacuated object in the LDV word so that
211 // the profiler can guess the position of the next object later.
212 SET_EVACUAEE_FOR_LDV(from, size_to_reserve);
213 // fill the slop
214 if (size_to_reserve - size_to_copy > 0)
215 LDV_FILL_SLOP(to + size_to_copy - 1, (int)(size_to_reserve - size_to_copy));
216 #endif
217 }
218
219
220 /* Copy wrappers that don't tag the closure after copying */
221 STATIC_INLINE GNUC_ATTR_HOT void
222 copy(StgClosure **p, const StgInfoTable *info,
223 StgClosure *src, nat size, step *stp)
224 {
225 copy_tag(p,info,src,size,stp,0);
226 }
227
228 /* ----------------------------------------------------------------------------
229 Evacuate
230
231 This is called (eventually) for every live object in the system.
232
233 The caller to evacuate specifies a desired generation in the
234 gct->evac_step thread-local variable. The following conditions apply to
235 evacuating an object which resides in generation M when we're
236 collecting up to generation N
237
238 if M >= gct->evac_step
239 if M > N do nothing
240 else evac to step->to
241
242 if M < gct->evac_step evac to gct->evac_step, step 0
243
244 if the object is already evacuated, then we check which generation
245 it now resides in.
246
247 if M >= gct->evac_step do nothing
248 if M < gct->evac_step set gct->failed_to_evac flag to indicate that we
249 didn't manage to evacuate this object into gct->evac_step.
250
251
252 OPTIMISATION NOTES:
253
254 evacuate() is the single most important function performance-wise
255 in the GC. Various things have been tried to speed it up, but as
256 far as I can tell the code generated by gcc 3.2 with -O2 is about
257 as good as it's going to get. We pass the argument to evacuate()
258 in a register using the 'regparm' attribute (see the prototype for
259 evacuate() near the top of this file).
260
261 Changing evacuate() to take an (StgClosure **) rather than
262 returning the new pointer seems attractive, because we can avoid
263 writing back the pointer when it hasn't changed (eg. for a static
264 object, or an object in a generation > N). However, I tried it and
265 it doesn't help. One reason is that the (StgClosure **) pointer
266 gets spilled to the stack inside evacuate(), resulting in far more
267 extra reads/writes than we save.
268 ------------------------------------------------------------------------- */
269
270 REGPARM1 GNUC_ATTR_HOT void
271 evacuate(StgClosure **p)
272 {
273 bdescr *bd = NULL;
274 step *stp;
275 StgClosure *q;
276 const StgInfoTable *info;
277 StgWord tag;
278
279 q = *p;
280
281 loop:
282 /* The tag and the pointer are split, to be merged after evacing */
283 tag = GET_CLOSURE_TAG(q);
284 q = UNTAG_CLOSURE(q);
285
286 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q));
287
288 if (!HEAP_ALLOCED(q)) {
289
290 if (!major_gc) return;
291
292 info = get_itbl(q);
293 switch (info->type) {
294
295 case THUNK_STATIC:
296 if (info->srt_bitmap != 0) {
297 if (*THUNK_STATIC_LINK((StgClosure *)q) == NULL) {
298 #ifndef THREADED_RTS
299 *THUNK_STATIC_LINK((StgClosure *)q) = gct->static_objects;
300 gct->static_objects = (StgClosure *)q;
301 #else
302 StgPtr link;
303 link = (StgPtr)cas((StgPtr)THUNK_STATIC_LINK((StgClosure *)q),
304 (StgWord)NULL,
305 (StgWord)gct->static_objects);
306 if (link == NULL) {
307 gct->static_objects = (StgClosure *)q;
308 }
309 #endif
310 }
311 }
312 return;
313
314 case FUN_STATIC:
315 if (info->srt_bitmap != 0 &&
316 *FUN_STATIC_LINK((StgClosure *)q) == NULL) {
317 #ifndef THREADED_RTS
318 *FUN_STATIC_LINK((StgClosure *)q) = gct->static_objects;
319 gct->static_objects = (StgClosure *)q;
320 #else
321 StgPtr link;
322 link = (StgPtr)cas((StgPtr)FUN_STATIC_LINK((StgClosure *)q),
323 (StgWord)NULL,
324 (StgWord)gct->static_objects);
325 if (link == NULL) {
326 gct->static_objects = (StgClosure *)q;
327 }
328 #endif
329 }
330 return;
331
332 case IND_STATIC:
333 /* If q->saved_info != NULL, then it's a revertible CAF - it'll be
334 * on the CAF list, so don't do anything with it here (we'll
335 * scavenge it later).
336 */
337 if (((StgIndStatic *)q)->saved_info == NULL) {
338 if (*IND_STATIC_LINK((StgClosure *)q) == NULL) {
339 #ifndef THREADED_RTS
340 *IND_STATIC_LINK((StgClosure *)q) = gct->static_objects;
341 gct->static_objects = (StgClosure *)q;
342 #else
343 StgPtr link;
344 link = (StgPtr)cas((StgPtr)IND_STATIC_LINK((StgClosure *)q),
345 (StgWord)NULL,
346 (StgWord)gct->static_objects);
347 if (link == NULL) {
348 gct->static_objects = (StgClosure *)q;
349 }
350 #endif
351 }
352 }
353 return;
354
355 case CONSTR_STATIC:
356 if (*STATIC_LINK(info,(StgClosure *)q) == NULL) {
357 #ifndef THREADED_RTS
358 *STATIC_LINK(info,(StgClosure *)q) = gct->static_objects;
359 gct->static_objects = (StgClosure *)q;
360 #else
361 StgPtr link;
362 link = (StgPtr)cas((StgPtr)STATIC_LINK(info,(StgClosure *)q),
363 (StgWord)NULL,
364 (StgWord)gct->static_objects);
365 if (link == NULL) {
366 gct->static_objects = (StgClosure *)q;
367 }
368 #endif
369 }
370 /* I am assuming that static_objects pointers are not
371 * written to other objects, and thus, no need to retag. */
372 return;
373
374 case CONSTR_NOCAF_STATIC:
375 /* no need to put these on the static linked list, they don't need
376 * to be scavenged.
377 */
378 return;
379
380 default:
381 barf("evacuate(static): strange closure type %d", (int)(info->type));
382 }
383 }
384
385 bd = Bdescr((P_)q);
386
387 if ((bd->flags & (BF_LARGE | BF_MARKED | BF_EVACUATED)) != 0) {
388
389 // pointer into to-space: just return it. It might be a pointer
390 // into a generation that we aren't collecting (> N), or it
391 // might just be a pointer into to-space. The latter doesn't
392 // happen often, but allowing it makes certain things a bit
393 // easier; e.g. scavenging an object is idempotent, so it's OK to
394 // have an object on the mutable list multiple times.
395 if (bd->flags & BF_EVACUATED) {
396 // We aren't copying this object, so we have to check
397 // whether it is already in the target generation. (this is
398 // the write barrier).
399 if (bd->step < gct->evac_step) {
400 gct->failed_to_evac = rtsTrue;
401 TICK_GC_FAILED_PROMOTION();
402 }
403 return;
404 }
405
406 /* evacuate large objects by re-linking them onto a different list.
407 */
408 if (bd->flags & BF_LARGE) {
409 info = get_itbl(q);
410 if (info->type == TSO &&
411 ((StgTSO *)q)->what_next == ThreadRelocated) {
412 q = (StgClosure *)((StgTSO *)q)->_link;
413 *p = q;
414 goto loop;
415 }
416 evacuate_large((P_)q);
417 return;
418 }
419
420 /* If the object is in a step that we're compacting, then we
421 * need to use an alternative evacuate procedure.
422 */
423 if (!is_marked((P_)q,bd)) {
424 mark((P_)q,bd);
425 if (mark_stack_full()) {
426 debugTrace(DEBUG_gc,"mark stack overflowed");
427 mark_stack_overflowed = rtsTrue;
428 reset_mark_stack();
429 }
430 push_mark_stack((P_)q);
431 }
432 return;
433 }
434
435 stp = bd->step->to;
436
437 info = q->header.info;
438 if (IS_FORWARDING_PTR(info))
439 {
440 /* Already evacuated, just return the forwarding address.
441 * HOWEVER: if the requested destination generation (gct->evac_step) is
442 * older than the actual generation (because the object was
443 * already evacuated to a younger generation) then we have to
444 * set the gct->failed_to_evac flag to indicate that we couldn't
445 * manage to promote the object to the desired generation.
446 */
447 /*
448 * Optimisation: the check is fairly expensive, but we can often
449 * shortcut it if either the required generation is 0, or the
450 * current object (the EVACUATED) is in a high enough generation.
451 * We know that an EVACUATED always points to an object in the
452 * same or an older generation. stp is the lowest step that the
453 * current object would be evacuated to, so we only do the full
454 * check if stp is too low.
455 */
456 StgClosure *e = (StgClosure*)UN_FORWARDING_PTR(info);
457 *p = TAG_CLOSURE(tag,e);
458 if (stp < gct->evac_step) { // optimisation
459 if (Bdescr((P_)e)->step < gct->evac_step) {
460 gct->failed_to_evac = rtsTrue;
461 TICK_GC_FAILED_PROMOTION();
462 }
463 }
464 return;
465 }
466
467 switch (INFO_PTR_TO_STRUCT(info)->type) {
468
469 case WHITEHOLE:
470 goto loop;
471
472 case MUT_VAR_CLEAN:
473 case MUT_VAR_DIRTY:
474 case MVAR_CLEAN:
475 case MVAR_DIRTY:
476 copy(p,info,q,sizeW_fromITBL(INFO_PTR_TO_STRUCT(info)),stp);
477 return;
478
479 case CONSTR_0_1:
480 {
481 StgWord w = (StgWord)q->payload[0];
482 if (info == Czh_con_info &&
483 // unsigned, so always true: (StgChar)w >= MIN_CHARLIKE &&
484 (StgChar)w <= MAX_CHARLIKE) {
485 *p = TAG_CLOSURE(tag,
486 (StgClosure *)CHARLIKE_CLOSURE((StgChar)w)
487 );
488 }
489 else if (info == Izh_con_info &&
490 (StgInt)w >= MIN_INTLIKE && (StgInt)w <= MAX_INTLIKE) {
491 *p = TAG_CLOSURE(tag,
492 (StgClosure *)INTLIKE_CLOSURE((StgInt)w)
493 );
494 }
495 else {
496 copy_tag_nolock(p,info,q,sizeofW(StgHeader)+1,stp,tag);
497 }
498 return;
499 }
500
501 case FUN_0_1:
502 case FUN_1_0:
503 case CONSTR_1_0:
504 copy_tag_nolock(p,info,q,sizeofW(StgHeader)+1,stp,tag);
505 return;
506
507 case THUNK_1_0:
508 case THUNK_0_1:
509 copy(p,info,q,sizeofW(StgThunk)+1,stp);
510 return;
511
512 case THUNK_1_1:
513 case THUNK_2_0:
514 case THUNK_0_2:
515 #ifdef NO_PROMOTE_THUNKS
516 if (bd->gen_no == 0 &&
517 bd->step->no != 0 &&
518 bd->step->no == generations[bd->gen_no].n_steps-1) {
519 stp = bd->step;
520 }
521 #endif
522 copy(p,info,q,sizeofW(StgThunk)+2,stp);
523 return;
524
525 case FUN_1_1:
526 case FUN_2_0:
527 case FUN_0_2:
528 case CONSTR_1_1:
529 case CONSTR_2_0:
530 copy_tag_nolock(p,info,q,sizeofW(StgHeader)+2,stp,tag);
531 return;
532
533 case CONSTR_0_2:
534 copy_tag_nolock(p,info,q,sizeofW(StgHeader)+2,stp,tag);
535 return;
536
537 case THUNK:
538 copy(p,info,q,thunk_sizeW_fromITBL(INFO_PTR_TO_STRUCT(info)),stp);
539 return;
540
541 case FUN:
542 case IND_PERM:
543 case IND_OLDGEN_PERM:
544 case CONSTR:
545 copy_tag_nolock(p,info,q,sizeW_fromITBL(INFO_PTR_TO_STRUCT(info)),stp,tag);
546 return;
547
548 case WEAK:
549 case STABLE_NAME:
550 copy_tag(p,info,q,sizeW_fromITBL(INFO_PTR_TO_STRUCT(info)),stp,tag);
551 return;
552
553 case BCO:
554 copy(p,info,q,bco_sizeW((StgBCO *)q),stp);
555 return;
556
557 case CAF_BLACKHOLE:
558 case SE_CAF_BLACKHOLE:
559 case SE_BLACKHOLE:
560 case BLACKHOLE:
561 copyPart(p,q,BLACKHOLE_sizeW(),sizeofW(StgHeader),stp);
562 return;
563
564 case THUNK_SELECTOR:
565 eval_thunk_selector(p, (StgSelector *)q, rtsTrue);
566 return;
567
568 case IND:
569 case IND_OLDGEN:
570 // follow chains of indirections, don't evacuate them
571 q = ((StgInd*)q)->indirectee;
572 *p = q;
573 goto loop;
574
575 case RET_BCO:
576 case RET_SMALL:
577 case RET_BIG:
578 case RET_DYN:
579 case UPDATE_FRAME:
580 case STOP_FRAME:
581 case CATCH_FRAME:
582 case CATCH_STM_FRAME:
583 case CATCH_RETRY_FRAME:
584 case ATOMICALLY_FRAME:
585 // shouldn't see these
586 barf("evacuate: stack frame at %p\n", q);
587
588 case PAP:
589 copy(p,info,q,pap_sizeW((StgPAP*)q),stp);
590 return;
591
592 case AP:
593 copy(p,info,q,ap_sizeW((StgAP*)q),stp);
594 return;
595
596 case AP_STACK:
597 copy(p,info,q,ap_stack_sizeW((StgAP_STACK*)q),stp);
598 return;
599
600 case ARR_WORDS:
601 // just copy the block
602 copy(p,info,q,arr_words_sizeW((StgArrWords *)q),stp);
603 return;
604
605 case MUT_ARR_PTRS_CLEAN:
606 case MUT_ARR_PTRS_DIRTY:
607 case MUT_ARR_PTRS_FROZEN:
608 case MUT_ARR_PTRS_FROZEN0:
609 // just copy the block
610 copy(p,info,q,mut_arr_ptrs_sizeW((StgMutArrPtrs *)q),stp);
611 return;
612
613 case TSO:
614 {
615 StgTSO *tso = (StgTSO *)q;
616
617 /* Deal with redirected TSOs (a TSO that's had its stack enlarged).
618 */
619 if (tso->what_next == ThreadRelocated) {
620 q = (StgClosure *)tso->_link;
621 *p = q;
622 goto loop;
623 }
624
625 /* To evacuate a small TSO, we need to relocate the update frame
626 * list it contains.
627 */
628 {
629 StgTSO *new_tso;
630 StgPtr r, s;
631
632 copyPart(p,(StgClosure *)tso, tso_sizeW(tso), sizeofW(StgTSO), stp);
633 new_tso = (StgTSO *)*p;
634 move_TSO(tso, new_tso);
635 for (r = tso->sp, s = new_tso->sp;
636 r < tso->stack+tso->stack_size;) {
637 *s++ = *r++;
638 }
639 return;
640 }
641 }
642
643 case TREC_HEADER:
644 copy(p,info,q,sizeofW(StgTRecHeader),stp);
645 return;
646
647 case TVAR_WATCH_QUEUE:
648 copy(p,info,q,sizeofW(StgTVarWatchQueue),stp);
649 return;
650
651 case TVAR:
652 copy(p,info,q,sizeofW(StgTVar),stp);
653 return;
654
655 case TREC_CHUNK:
656 copy(p,info,q,sizeofW(StgTRecChunk),stp);
657 return;
658
659 case ATOMIC_INVARIANT:
660 copy(p,info,q,sizeofW(StgAtomicInvariant),stp);
661 return;
662
663 case INVARIANT_CHECK_QUEUE:
664 copy(p,info,q,sizeofW(StgInvariantCheckQueue),stp);
665 return;
666
667 default:
668 barf("evacuate: strange closure type %d", (int)(INFO_PTR_TO_STRUCT(info)->type));
669 }
670
671 barf("evacuate");
672 }
673
674 /* -----------------------------------------------------------------------------
675 Evacuate a large object
676
677 This just consists of removing the object from the (doubly-linked)
678 step->large_objects list, and linking it on to the (singly-linked)
679 step->new_large_objects list, from where it will be scavenged later.
680
681 Convention: bd->flags has BF_EVACUATED set for a large object
682 that has been evacuated, or unset otherwise.
683 -------------------------------------------------------------------------- */
684
685 STATIC_INLINE void
686 evacuate_large(StgPtr p)
687 {
688 bdescr *bd = Bdescr(p);
689 step *stp, *new_stp;
690 step_workspace *ws;
691
692 stp = bd->step;
693 ACQUIRE_SPIN_LOCK(&stp->sync_large_objects);
694
695 // object must be at the beginning of the block (or be a ByteArray)
696 ASSERT(get_itbl((StgClosure *)p)->type == ARR_WORDS ||
697 (((W_)p & BLOCK_MASK) == 0));
698
699 // already evacuated?
700 if (bd->flags & BF_EVACUATED) {
701 /* Don't forget to set the gct->failed_to_evac flag if we didn't get
702 * the desired destination (see comments in evacuate()).
703 */
704 if (stp < gct->evac_step) {
705 gct->failed_to_evac = rtsTrue;
706 TICK_GC_FAILED_PROMOTION();
707 }
708 RELEASE_SPIN_LOCK(&stp->sync_large_objects);
709 return;
710 }
711
712 // remove from large_object list
713 if (bd->u.back) {
714 bd->u.back->link = bd->link;
715 } else { // first object in the list
716 stp->large_objects = bd->link;
717 }
718 if (bd->link) {
719 bd->link->u.back = bd->u.back;
720 }
721
722 /* link it on to the evacuated large object list of the destination step
723 */
724 new_stp = stp->to;
725 if (new_stp < gct->evac_step) {
726 if (gct->eager_promotion) {
727 new_stp = gct->evac_step;
728 } else {
729 gct->failed_to_evac = rtsTrue;
730 }
731 }
732
733 ws = &gct->steps[new_stp->abs_no];
734 bd->flags |= BF_EVACUATED;
735 bd->step = new_stp;
736 bd->gen_no = new_stp->gen_no;
737 bd->link = ws->todo_large_objects;
738 ws->todo_large_objects = bd;
739
740 RELEASE_SPIN_LOCK(&stp->sync_large_objects);
741 }
742
743 /* -----------------------------------------------------------------------------
744 Evaluate a THUNK_SELECTOR if possible.
745
746 p points to a THUNK_SELECTOR that we want to evaluate. The
747 result of "evaluating" it will be evacuated and a pointer to the
748 to-space closure will be returned.
749
750 If the THUNK_SELECTOR could not be evaluated (its selectee is still
751 a THUNK, for example), then the THUNK_SELECTOR itself will be
752 evacuated.
753 -------------------------------------------------------------------------- */
754 static void
755 unchain_thunk_selectors(StgSelector *p, StgClosure *val)
756 {
757 StgSelector *prev;
758
759 prev = NULL;
760 while (p)
761 {
762 #ifdef THREADED_RTS
763 ASSERT(p->header.info == &stg_WHITEHOLE_info);
764 #else
765 ASSERT(p->header.info == &stg_BLACKHOLE_info);
766 #endif
767 // val must be in to-space. Not always: when we recursively
768 // invoke eval_thunk_selector(), the recursive calls will not
769 // evacuate the value (because we want to select on the value,
770 // not evacuate it), so in this case val is in from-space.
771 // ASSERT(!HEAP_ALLOCED(val) || Bdescr((P_)val)->gen_no > N || (Bdescr((P_)val)->flags & BF_EVACUATED));
772
773 prev = (StgSelector*)((StgClosure *)p)->payload[0];
774
775 // Update the THUNK_SELECTOR with an indirection to the
776 // EVACUATED closure now at p. Why do this rather than
777 // upd_evacuee(q,p)? Because we have an invariant that an
778 // EVACUATED closure always points to an object in the
779 // same or an older generation (required by the short-cut
780 // test in the EVACUATED case, below).
781 ((StgInd *)p)->indirectee = val;
782 write_barrier();
783 SET_INFO(p, &stg_IND_info);
784
785 // For the purposes of LDV profiling, we have created an
786 // indirection.
787 LDV_RECORD_CREATE(p);
788
789 p = prev;
790 }
791 }
792
793 static void
794 eval_thunk_selector (StgClosure **q, StgSelector * p, rtsBool evac)
795 // NB. for legacy reasons, p & q are swapped around :(
796 {
797 nat field;
798 StgInfoTable *info;
799 StgWord info_ptr;
800 StgClosure *selectee;
801 StgSelector *prev_thunk_selector;
802 bdescr *bd;
803 StgClosure *val;
804
805 prev_thunk_selector = NULL;
806 // this is a chain of THUNK_SELECTORs that we are going to update
807 // to point to the value of the current THUNK_SELECTOR. Each
808 // closure on the chain is a BLACKHOLE, and points to the next in the
809 // chain with payload[0].
810
811 selector_chain:
812
813 bd = Bdescr((StgPtr)p);
814 if (HEAP_ALLOCED(p)) {
815 // If the THUNK_SELECTOR is in to-space or in a generation that we
816 // are not collecting, then bale out early. We won't be able to
817 // save any space in any case, and updating with an indirection is
818 // trickier in a non-collected gen: we would have to update the
819 // mutable list.
820 if (bd->flags & BF_EVACUATED) {
821 unchain_thunk_selectors(prev_thunk_selector, (StgClosure *)p);
822 *q = (StgClosure *)p;
823 return;
824 }
825 // we don't update THUNK_SELECTORS in the compacted
826 // generation, because compaction does not remove the INDs
827 // that result, this causes confusion later
828 // (scavenge_mark_stack doesn't deal with IND). BEWARE! This
829 // bit is very tricky to get right. If you make changes
830 // around here, test by compiling stage 3 with +RTS -c -RTS.
831 if (bd->flags & BF_MARKED) {
832 // must call evacuate() to mark this closure if evac==rtsTrue
833 *q = (StgClosure *)p;
834 if (evac) evacuate(q);
835 unchain_thunk_selectors(prev_thunk_selector, (StgClosure *)p);
836 return;
837 }
838 }
839
840
841 // BLACKHOLE the selector thunk, since it is now under evaluation.
842 // This is important to stop us going into an infinite loop if
843 // this selector thunk eventually refers to itself.
844 #if defined(THREADED_RTS)
845 // In threaded mode, we'll use WHITEHOLE to lock the selector
846 // thunk while we evaluate it.
847 {
848 do {
849 info_ptr = xchg((StgPtr)&p->header.info, (W_)&stg_WHITEHOLE_info);
850 } while (info_ptr == (W_)&stg_WHITEHOLE_info);
851
852 // make sure someone else didn't get here first...
853 if (IS_FORWARDING_PTR(p) ||
854 INFO_PTR_TO_STRUCT(info_ptr)->type != THUNK_SELECTOR) {
855 // v. tricky now. The THUNK_SELECTOR has been evacuated
856 // by another thread, and is now either a forwarding ptr or IND.
857 // We need to extract ourselves from the current situation
858 // as cleanly as possible.
859 // - unlock the closure
860 // - update *q, we may have done *some* evaluation
861 // - if evac, we need to call evacuate(), because we
862 // need the write-barrier stuff.
863 // - undo the chain we've built to point to p.
864 SET_INFO(p, (const StgInfoTable *)info_ptr);
865 *q = (StgClosure *)p;
866 if (evac) evacuate(q);
867 unchain_thunk_selectors(prev_thunk_selector, (StgClosure *)p);
868 return;
869 }
870 }
871 #else
872 // Save the real info pointer (NOTE: not the same as get_itbl()).
873 info_ptr = (StgWord)p->header.info;
874 SET_INFO(p,&stg_BLACKHOLE_info);
875 #endif
876
877 field = INFO_PTR_TO_STRUCT(info_ptr)->layout.selector_offset;
878
879 // The selectee might be a constructor closure,
880 // so we untag the pointer.
881 selectee = UNTAG_CLOSURE(p->selectee);
882
883 selector_loop:
884 // selectee now points to the closure that we're trying to select
885 // a field from. It may or may not be in to-space: we try not to
886 // end up in to-space, but it's impractical to avoid it in
887 // general. The compacting GC scatters to-space pointers in
888 // from-space during marking, for example. We rely on the property
889 // that evacuate() doesn't mind if it gets passed a to-space pointer.
890
891 info = (StgInfoTable*)selectee->header.info;
892
893 if (IS_FORWARDING_PTR(info)) {
894 // We don't follow pointers into to-space; the constructor
895 // has already been evacuated, so we won't save any space
896 // leaks by evaluating this selector thunk anyhow.
897 goto bale_out;
898 }
899
900 info = INFO_PTR_TO_STRUCT(info);
901 switch (info->type) {
902 case WHITEHOLE:
903 goto bale_out; // about to be evacuated by another thread (or a loop).
904
905 case CONSTR:
906 case CONSTR_1_0:
907 case CONSTR_0_1:
908 case CONSTR_2_0:
909 case CONSTR_1_1:
910 case CONSTR_0_2:
911 case CONSTR_STATIC:
912 case CONSTR_NOCAF_STATIC:
913 {
914 // check that the size is in range
915 ASSERT(field < (StgWord32)(info->layout.payload.ptrs +
916 info->layout.payload.nptrs));
917
918 // Select the right field from the constructor
919 val = selectee->payload[field];
920
921 #ifdef PROFILING
922 // For the purposes of LDV profiling, we have destroyed
923 // the original selector thunk, p.
924 SET_INFO(p, (StgInfoTable *)info_ptr);
925 LDV_RECORD_DEAD_FILL_SLOP_DYNAMIC((StgClosure *)p);
926 SET_INFO(p, &stg_BLACKHOLE_info);
927 #endif
928
929 // the closure in val is now the "value" of the
930 // THUNK_SELECTOR in p. However, val may itself be a
931 // THUNK_SELECTOR, in which case we want to continue
932 // evaluating until we find the real value, and then
933 // update the whole chain to point to the value.
934 val_loop:
935 info_ptr = (StgWord)UNTAG_CLOSURE(val)->header.info;
936 if (!IS_FORWARDING_PTR(info_ptr))
937 {
938 info = INFO_PTR_TO_STRUCT(info_ptr);
939 switch (info->type) {
940 case IND:
941 case IND_PERM:
942 case IND_OLDGEN:
943 case IND_OLDGEN_PERM:
944 case IND_STATIC:
945 val = ((StgInd *)val)->indirectee;
946 goto val_loop;
947 case THUNK_SELECTOR:
948 ((StgClosure*)p)->payload[0] = (StgClosure *)prev_thunk_selector;
949 prev_thunk_selector = p;
950 p = (StgSelector*)val;
951 goto selector_chain;
952 default:
953 break;
954 }
955 }
956 ((StgClosure*)p)->payload[0] = (StgClosure *)prev_thunk_selector;
957 prev_thunk_selector = p;
958
959 *q = val;
960 if (evac) evacuate(q);
961 val = *q;
962 // evacuate() cannot recurse through
963 // eval_thunk_selector(), because we know val is not
964 // a THUNK_SELECTOR.
965 unchain_thunk_selectors(prev_thunk_selector, val);
966 return;
967 }
968
969 case IND:
970 case IND_PERM:
971 case IND_OLDGEN:
972 case IND_OLDGEN_PERM:
973 case IND_STATIC:
974 // Again, we might need to untag a constructor.
975 selectee = UNTAG_CLOSURE( ((StgInd *)selectee)->indirectee );
976 goto selector_loop;
977
978 case THUNK_SELECTOR:
979 {
980 StgClosure *val;
981
982 // recursively evaluate this selector. We don't want to
983 // recurse indefinitely, so we impose a depth bound.
984 if (gct->thunk_selector_depth >= MAX_THUNK_SELECTOR_DEPTH) {
985 goto bale_out;
986 }
987
988 gct->thunk_selector_depth++;
989 // rtsFalse says "don't evacuate the result". It will,
990 // however, update any THUNK_SELECTORs that are evaluated
991 // along the way.
992 eval_thunk_selector(&val, (StgSelector*)selectee, rtsFalse);
993 gct->thunk_selector_depth--;
994
995 // did we actually manage to evaluate it?
996 if (val == selectee) goto bale_out;
997
998 // Of course this pointer might be tagged...
999 selectee = UNTAG_CLOSURE(val);
1000 goto selector_loop;
1001 }
1002
1003 case AP:
1004 case AP_STACK:
1005 case THUNK:
1006 case THUNK_1_0:
1007 case THUNK_0_1:
1008 case THUNK_2_0:
1009 case THUNK_1_1:
1010 case THUNK_0_2:
1011 case THUNK_STATIC:
1012 case CAF_BLACKHOLE:
1013 case SE_CAF_BLACKHOLE:
1014 case SE_BLACKHOLE:
1015 case BLACKHOLE:
1016 // not evaluated yet
1017 goto bale_out;
1018
1019 default:
1020 barf("eval_thunk_selector: strange selectee %d",
1021 (int)(info->type));
1022 }
1023
1024 bale_out:
1025 // We didn't manage to evaluate this thunk; restore the old info
1026 // pointer. But don't forget: we still need to evacuate the thunk itself.
1027 SET_INFO(p, (const StgInfoTable *)info_ptr);
1028 // THREADED_RTS: we just unlocked the thunk, so another thread
1029 // might get in and update it. copy() will lock it again and
1030 // check whether it was updated in the meantime.
1031 *q = (StgClosure *)p;
1032 if (evac) {
1033 copy(q,(const StgInfoTable *)info_ptr,(StgClosure *)p,THUNK_SELECTOR_sizeW(),bd->step->to);
1034 }
1035 unchain_thunk_selectors(prev_thunk_selector, *q);
1036 return;
1037 }