Implement -xns for -xn with selector opt
[ghc.git] / rts / sm / NonMovingMark.c
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 1998-2018
4 *
5 * Non-moving garbage collector and allocator: Mark phase
6 *
7 * ---------------------------------------------------------------------------*/
8
9 #include "Rts.h"
10 // We call evacuate, which expects the thread-local gc_thread to be valid;
11 // This is sometimes declared as a register variable therefore it is necessary
12 // to include the declaration so that the compiler doesn't clobber the register.
13 #include "NonMovingMark.h"
14 #include "NonMovingShortcut.h"
15 #include "NonMoving.h"
16 #include "BlockAlloc.h" /* for countBlocks */
17 #include "HeapAlloc.h"
18 #include "Task.h"
19 #include "Trace.h"
20 #include "HeapUtils.h"
21 #include "Printer.h"
22 #include "Schedule.h"
23 #include "Weak.h"
24 #include "Stats.h"
25 #include "STM.h"
26 #include "MarkWeak.h"
27 #include "sm/Storage.h"
28 #include "CNF.h"
29
30 static void mark_closure (MarkQueue *queue, const StgClosure *p, StgClosure **origin);
31 static void mark_tso (MarkQueue *queue, StgTSO *tso);
32 static void mark_stack (MarkQueue *queue, StgStack *stack);
33 static void mark_PAP_payload (MarkQueue *queue,
34 StgClosure *fun,
35 StgClosure **payload,
36 StgWord size);
37
38 // How many Array# entries to add to the mark queue at once?
39 #define MARK_ARRAY_CHUNK_LENGTH 128
40
41 /* Note [Large objects in the non-moving collector]
42 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
43 * The nonmoving collector keeps a separate list of its large objects, apart from
44 * oldest_gen->large_objects. There are two reasons for this:
45 *
46 * 1. oldest_gen is mutated by minor collections, which happen concurrently with
47 * marking
48 * 2. the non-moving collector needs a consistent picture
49 *
50 * At the beginning of a major collection, nonmovingCollect takes the objects in
51 * oldest_gen->large_objects (which includes all large objects evacuated by the
52 * moving collector) and adds them to nonmoving_large_objects. This is the set
53 * of large objects that will being collected in the current major GC cycle.
54 *
55 * As the concurrent mark phase proceeds, the large objects in
56 * nonmoving_large_objects that are found to be live are moved to
57 * nonmoving_marked_large_objects. During sweep we discard all objects that remain
58 * in nonmoving_large_objects and move everything in nonmoving_marked_larged_objects
59 * back to nonmoving_large_objects.
60 *
61 * During minor collections large objects will accumulate on
62 * oldest_gen->large_objects, where they will be picked up by the nonmoving
63 * collector and moved to nonmoving_large_objects during the next major GC.
64 * When this happens the block gets its BF_NONMOVING_SWEEPING flag set to
65 * indicate that it is part of the snapshot and consequently should be marked by
66 * the nonmoving mark phase..
67 */
68
69 bdescr *nonmoving_large_objects = NULL;
70 bdescr *nonmoving_marked_large_objects = NULL;
71 memcount n_nonmoving_large_blocks = 0;
72 memcount n_nonmoving_marked_large_blocks = 0;
73
74 bdescr *nonmoving_compact_objects = NULL;
75 bdescr *nonmoving_marked_compact_objects = NULL;
76 memcount n_nonmoving_compact_blocks = 0;
77 memcount n_nonmoving_marked_compact_blocks = 0;
78
79 #if defined(THREADED_RTS)
80 /* Protects everything above. Furthermore, we only set the BF_MARKED bit of
81 * large object blocks when this is held. This ensures that the write barrier
82 * (e.g. finish_upd_rem_set_mark) and the collector (mark_closure) don't try to
83 * move the same large object to nonmoving_marked_large_objects more than once.
84 */
85 static Mutex nonmoving_large_objects_mutex;
86 // Note that we don't need a similar lock for compact objects becuase we never
87 // mark a compact object eagerly in a write barrier; all compact objects are
88 // marked by the mark thread, so there can't be any races here.
89 #endif
90
91 /*
92 * Where we keep our threads during collection since we must have a snapshot of
93 * the threads that lived in the nonmoving heap at the time that the snapshot
94 * was taken to safely resurrect.
95 */
96 StgTSO *nonmoving_old_threads = END_TSO_QUEUE;
97 /* Same for weak pointers */
98 StgWeak *nonmoving_old_weak_ptr_list = NULL;
99 /* Because we can "tidy" thread and weak lists concurrently with a minor GC we
100 * need to move marked threads and weaks to these lists until we pause for sync.
101 * Then we move them to oldest_gen lists. */
102 StgTSO *nonmoving_threads = END_TSO_QUEUE;
103 StgWeak *nonmoving_weak_ptr_list = NULL;
104
105 #if defined(DEBUG)
106 // TODO (osa): Document
107 StgIndStatic *debug_caf_list_snapshot = (StgIndStatic*)END_OF_CAF_LIST;
108 #endif
109
110 /* Note [Update remembered set]
111 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
112 * The concurrent non-moving collector uses a remembered set to ensure
113 * that its marking is consistent with the snapshot invariant defined in
114 * the design. This remembered set, known as the update remembered set,
115 * records all pointers that have been overwritten since the beginning
116 * of the concurrent mark. This ensures that concurrent mutation cannot hide
117 * pointers to live objects from the nonmoving garbage collector.
118 *
119 * The update remembered set is maintained via a write barrier that
120 * is enabled whenever a concurrent mark is active. This write barrier
121 * can be found in a number of places:
122 *
123 * - In rts/Primops.cmm in primops responsible for modifying mutable closures
124 * (e.g. MVARs, MUT_VARs, etc.)
125 *
126 * - In rts/STM.c, where
127 *
128 * - In the dirty_* functions found in rts/Storage.c where we dirty MVARs,
129 * MUT_VARs, TSOs and STACKs. STACK is a somewhat special case, as described
130 * in Note [StgStack dirtiness flags and concurrent marking] in TSO.h.
131 *
132 * - In the code generated by the STG code generator for pointer array writes
133 *
134 * - In thunk updates (e.g. updateWithIndirection) to ensure that the free
135 * variables of the original thunk remain reachable.
136 *
137 * There is also a read barrier to handle weak references, as described in
138 * Note [Concurrent read barrier on deRefWeak#].
139 *
140 * The representation of the update remembered set is the same as that of
141 * the mark queue. For efficiency, each capability maintains its own local
142 * accumulator of remembered set entries. When a capability fills its
143 * accumulator it is linked in to the global remembered set
144 * (upd_rem_set_block_list), where it is consumed by the mark phase.
145 *
146 * The mark phase is responsible for freeing update remembered set block
147 * allocations.
148 *
149 *
150 * Note [Concurrent read barrier on deRefWeak#]
151 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
152 *
153 * In general the non-moving GC assumes that all pointers reachable from a
154 * marked object are themselves marked (or in the mark queue). However,
155 * weak pointers are an obvious exception to this rule. In particular,
156 * deRefWeakPtr# allows the mutator to turn a weak reference into a strong
157 * reference. This interacts badly with concurrent collection. For
158 * instance, consider this program:
159 *
160 * f :: a -> b -> IO b
161 * f k v = do
162 * -- assume that k and v are the only references to the
163 * -- closures to which they refer.
164 * weak <- mkWeakPtr k v Nothing
165 *
166 * -- N.B. k is now technically dead since the only reference to it is
167 * -- weak, but we've not yet had a chance to tombstone the WeakPtr
168 * -- (which will happen in the course of major GC).
169 * performMajorGC
170 * -- Now we are running concurrently with the mark...
171
172 * Just x <- deRefWeak weak
173 * -- We have now introduced a reference to `v`, which will
174 * -- not be marked as the only reference to `v` when the snapshot was
175 * -- taken is via a WeakPtr.
176 * return x
177 *
178 */
179 static Mutex upd_rem_set_lock;
180 bdescr *upd_rem_set_block_list = NULL;
181
182 #if defined(THREADED_RTS)
183 /* Used during the mark/sweep phase transition to track how many capabilities
184 * have pushed their update remembered sets. Protected by upd_rem_set_lock.
185 */
186 static volatile StgWord upd_rem_set_flush_count = 0;
187 #endif
188
189
190 /* Signaled by each capability when it has flushed its update remembered set */
191 static Condition upd_rem_set_flushed_cond;
192
193 /* Indicates to mutators that the write barrier must be respected. Set while
194 * concurrent mark is running.
195 */
196 StgWord nonmoving_write_barrier_enabled = false;
197
198 /* Used to provide the current mark queue to the young generation
199 * collector for scavenging.
200 */
201 MarkQueue *current_mark_queue = NULL;
202
203 /* Initialise update remembered set data structures */
204 void nonmovingMarkInitUpdRemSet() {
205 initMutex(&upd_rem_set_lock);
206 initCondition(&upd_rem_set_flushed_cond);
207 #if defined(THREADED_RTS)
208 initMutex(&nonmoving_large_objects_mutex);
209 #endif
210 }
211
212 #if defined(THREADED_RTS) && defined(DEBUG)
213 static uint32_t markQueueLength(MarkQueue *q);
214 #endif
215 static void init_mark_queue_(MarkQueue *queue);
216
217 /* Transfers the given capability's update-remembered set to the global
218 * remembered set.
219 *
220 * Really the argument type should be UpdRemSet* but this would be rather
221 * inconvenient without polymorphism.
222 */
223 void nonmovingAddUpdRemSetBlocks(MarkQueue *rset)
224 {
225 if (markQueueIsEmpty(rset)) return;
226
227 // find the tail of the queue
228 bdescr *start = rset->blocks;
229 bdescr *end = start;
230 while (end->link != NULL)
231 end = end->link;
232
233 // add the blocks to the global remembered set
234 ACQUIRE_LOCK(&upd_rem_set_lock);
235 end->link = upd_rem_set_block_list;
236 upd_rem_set_block_list = start;
237 RELEASE_LOCK(&upd_rem_set_lock);
238
239 // Reset remembered set
240 ACQUIRE_SM_LOCK;
241 init_mark_queue_(rset);
242 rset->is_upd_rem_set = true;
243 RELEASE_SM_LOCK;
244 }
245
246 #if defined(THREADED_RTS)
247 /* Called by capabilities to flush their update remembered sets when
248 * synchronising with the non-moving collector as it transitions from mark to
249 * sweep phase.
250 */
251 void nonmovingFlushCapUpdRemSetBlocks(Capability *cap)
252 {
253 debugTrace(DEBUG_nonmoving_gc,
254 "Capability %d flushing update remembered set: %d",
255 cap->no, markQueueLength(&cap->upd_rem_set.queue));
256 traceConcUpdRemSetFlush(cap);
257 nonmovingAddUpdRemSetBlocks(&cap->upd_rem_set.queue);
258 atomic_inc(&upd_rem_set_flush_count, 1);
259 signalCondition(&upd_rem_set_flushed_cond);
260 // After this mutation will remain suspended until nonmovingFinishFlush
261 // releases its capabilities.
262 }
263
264 /* Request that all capabilities flush their update remembered sets and suspend
265 * execution until the further notice.
266 */
267 void nonmovingBeginFlush(Task *task)
268 {
269 debugTrace(DEBUG_nonmoving_gc, "Starting update remembered set flush...");
270 traceConcSyncBegin();
271 upd_rem_set_flush_count = 0;
272 stat_startNonmovingGcSync();
273 stopAllCapabilitiesWith(NULL, task, SYNC_FLUSH_UPD_REM_SET);
274
275 // XXX: We may have been given a capability via releaseCapability (i.e. a
276 // task suspended due to a foreign call) in which case our requestSync
277 // logic won't have been hit. Make sure that everyone so far has flushed.
278 // Ideally we want to mark asynchronously with syncing.
279 for (uint32_t i = 0; i < n_capabilities; i++) {
280 nonmovingFlushCapUpdRemSetBlocks(capabilities[i]);
281 }
282 }
283
284 /* Wait until a capability has flushed its update remembered set. Returns true
285 * if all capabilities have flushed.
286 */
287 bool nonmovingWaitForFlush()
288 {
289 ACQUIRE_LOCK(&upd_rem_set_lock);
290 debugTrace(DEBUG_nonmoving_gc, "Flush count %d", upd_rem_set_flush_count);
291 bool finished = upd_rem_set_flush_count == n_capabilities;
292 if (!finished) {
293 waitCondition(&upd_rem_set_flushed_cond, &upd_rem_set_lock);
294 }
295 RELEASE_LOCK(&upd_rem_set_lock);
296 return finished;
297 }
298
299 /* Note [Unintentional marking in resurrectThreads]
300 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
301 * In both moving and non-moving collectors threads found to be unreachable are
302 * evacuated/marked and then resurrected with resurrectThreads. resurrectThreads
303 * raises an exception in the unreachable thread via raiseAsync, which does
304 * mutations on the heap. These mutations cause adding stuff to UpdRemSet of the
305 * thread's capability. Here's an example backtrace where this happens:
306 *
307 * #0 updateRemembSetPushClosure
308 * #1 0x000000000072b363 in dirty_TVAR
309 * #2 0x00000000007162e5 in remove_watch_queue_entries_for_trec
310 * #3 0x0000000000717098 in stmAbortTransaction
311 * #4 0x000000000070c6eb in raiseAsync
312 * #5 0x000000000070b473 in throwToSingleThreaded__
313 * #6 0x000000000070b4ab in throwToSingleThreaded
314 * #7 0x00000000006fce82 in resurrectThreads
315 * #8 0x00000000007215db in nonmovingMark_
316 * #9 0x0000000000721438 in nonmovingConcurrentMark
317 * #10 0x00007f1ee81cd6db in start_thread
318 * #11 0x00007f1ee850688f in clone
319 *
320 * However we don't really want to run write barriers when calling
321 * resurrectThreads here, because we're in a GC pause, and overwritten values
322 * are definitely gone forever (as opposed to being inserted in a marked object
323 * or kept in registers and used later).
324 *
325 * When this happens, if we don't reset the UpdRemSets, what happens is in the
326 * next mark we see these objects that were added in previous mark's
327 * resurrectThreads in UpdRemSets, and mark those. This causes keeping
328 * unreachable objects alive, and effects weak finalization and thread resurrect
329 * (which rely on things become unreachable). As an example, stm048 fails when
330 * we get this wrong, because when we do raiseAsync on a thread that was blocked
331 * on an STM transaction we mutate a TVAR_WATCH_QUEUE, which has a reference to
332 * the TSO that was running the STM transaction. If the TSO becomes unreachable
333 * again in the next GC we don't realize this, because it was added to an
334 * UpdRemSet in the previous GC's mark phase, because of raiseAsync.
335 *
336 * To fix this we clear all UpdRemSets in nonmovingFinishFlush, right before
337 * releasing capabilities. This is somewhat inefficient (we allow adding objects
338 * to UpdRemSets, only to later reset them), but the only case where we add to
339 * UpdRemSets during mark is resurrectThreads, and I don't think we do so many
340 * resurrection in a thread that we fill UpdRemSets and allocate new blocks. So
341 * pushing an UpdRemSet in this case is really fast, and resetting is even
342 * faster (we just update a pointer).
343 *
344 * TODO (osa): What if we actually marked UpdRemSets in this case, in the mark
345 * loop? Would that work? Or what would break?
346 */
347
348 /* Notify capabilities that the synchronisation is finished; they may resume
349 * execution.
350 */
351 void nonmovingFinishFlush(Task *task)
352 {
353 // See Note [Unintentional marking in resurrectThreads]
354 for (uint32_t i = 0; i < n_capabilities; i++) {
355 reset_upd_rem_set(&capabilities[i]->upd_rem_set);
356 }
357 // Also reset upd_rem_set_block_list in case some of the UpdRemSets were
358 // filled and we flushed them.
359 freeChain_lock(upd_rem_set_block_list);
360 upd_rem_set_block_list = NULL;
361
362 debugTrace(DEBUG_nonmoving_gc, "Finished update remembered set flush...");
363 traceConcSyncEnd();
364 stat_endNonmovingGcSync();
365 releaseAllCapabilities(n_capabilities, NULL, task);
366 }
367 #endif
368
369 /*********************************************************
370 * Pushing to either the mark queue or remembered set
371 *********************************************************/
372
373 STATIC_INLINE void
374 push (MarkQueue *q, const MarkQueueEnt *ent)
375 {
376 // Are we at the end of the block?
377 if (q->top->head == MARK_QUEUE_BLOCK_ENTRIES) {
378 // Yes, this block is full.
379 if (q->is_upd_rem_set) {
380 nonmovingAddUpdRemSetBlocks(q);
381 } else {
382 // allocate a fresh block.
383 ACQUIRE_SM_LOCK;
384 bdescr *bd = allocGroup(MARK_QUEUE_BLOCKS);
385 bd->link = q->blocks;
386 q->blocks = bd;
387 q->top = (MarkQueueBlock *) bd->start;
388 q->top->head = 0;
389 RELEASE_SM_LOCK;
390 }
391 }
392
393 q->top->entries[q->top->head] = *ent;
394 q->top->head++;
395 }
396
397 /* A variant of push to be used by the minor GC when it encounters a reference
398 * to an object in the non-moving heap. In contrast to the other push
399 * operations this uses the gc_alloc_block_sync spinlock instead of the
400 * SM_LOCK to allocate new blocks in the event that the mark queue is full.
401 */
402 void
403 markQueuePushClosureGC (MarkQueue *q, StgClosure *p)
404 {
405 /* We should not make it here if we are doing a deadlock detect GC.
406 * See Note [Deadlock detection under nonmoving collector].
407 */
408 ASSERT(!deadlock_detect_gc);
409
410 // Are we at the end of the block?
411 if (q->top->head == MARK_QUEUE_BLOCK_ENTRIES) {
412 // Yes, this block is full.
413 // allocate a fresh block.
414 ACQUIRE_SPIN_LOCK(&gc_alloc_block_sync);
415 bdescr *bd = allocGroup(MARK_QUEUE_BLOCKS);
416 bd->link = q->blocks;
417 q->blocks = bd;
418 q->top = (MarkQueueBlock *) bd->start;
419 q->top->head = 0;
420 RELEASE_SPIN_LOCK(&gc_alloc_block_sync);
421 }
422
423 MarkQueueEnt ent = {
424 .mark_closure = {
425 .p = UNTAG_CLOSURE(p),
426 .origin = NULL,
427 }
428 };
429 q->top->entries[q->top->head] = ent;
430 q->top->head++;
431 }
432
433 static inline
434 void push_closure (MarkQueue *q,
435 StgClosure *p,
436 StgClosure **origin)
437 {
438 #if defined(DEBUG)
439 ASSERT(!HEAP_ALLOCED_GC(p) || (Bdescr((StgPtr) p)->gen == oldest_gen));
440 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
441 // Commenting out: too slow
442 // if (RtsFlags.DebugFlags.sanity) {
443 // assert_in_nonmoving_heap((P_)p);
444 // if (origin)
445 // assert_in_nonmoving_heap((P_)origin);
446 // }
447 #endif
448
449 // This must be true as origin points to a pointer and therefore must be
450 // word-aligned. However, we check this as otherwise we would confuse this
451 // with a mark_array entry
452 ASSERT(((uintptr_t) origin & 0x3) == 0);
453
454 MarkQueueEnt ent = {
455 .mark_closure = {
456 .p = p,
457 .origin = origin,
458 }
459 };
460 push(q, &ent);
461 }
462
463 static
464 void push_array (MarkQueue *q,
465 const StgMutArrPtrs *array,
466 StgWord start_index)
467 {
468 // TODO: Push this into callers where they already have the Bdescr
469 if (HEAP_ALLOCED_GC(array) && (Bdescr((StgPtr) array)->gen != oldest_gen))
470 return;
471
472 MarkQueueEnt ent = {
473 .mark_array = {
474 .array = array,
475 .start_index = (start_index << 16) | 0x3,
476 }
477 };
478 push(q, &ent);
479 }
480
481 static
482 void push_thunk_srt (MarkQueue *q, const StgInfoTable *info)
483 {
484 const StgThunkInfoTable *thunk_info = itbl_to_thunk_itbl(info);
485 if (thunk_info->i.srt) {
486 push_closure(q, (StgClosure*)GET_SRT(thunk_info), NULL);
487 }
488 }
489
490 static
491 void push_fun_srt (MarkQueue *q, const StgInfoTable *info)
492 {
493 const StgFunInfoTable *fun_info = itbl_to_fun_itbl(info);
494 if (fun_info->i.srt) {
495 push_closure(q, (StgClosure*)GET_FUN_SRT(fun_info), NULL);
496 }
497 }
498
499 /*********************************************************
500 * Pushing to the update remembered set
501 *
502 * upd_rem_set_push_* functions are directly called by
503 * mutators and need to check whether the value is in
504 * non-moving heap.
505 *********************************************************/
506
507 // Check if the object is traced by the non-moving collector. This holds in two
508 // conditions:
509 //
510 // - Object is in non-moving heap
511 // - Object is a large (BF_LARGE) and marked as BF_NONMOVING
512 // - Object is static (HEAP_ALLOCED_GC(obj) == false)
513 //
514 static
515 bool check_in_nonmoving_heap(StgClosure *p) {
516 if (HEAP_ALLOCED_GC(p)) {
517 // This works for both large and small objects:
518 return Bdescr((P_)p)->flags & BF_NONMOVING;
519 } else {
520 return true; // a static object
521 }
522 }
523
524 /* Push the free variables of a (now-evaluated) thunk to the
525 * update remembered set.
526 */
527 inline void updateRemembSetPushThunk(Capability *cap, StgThunk *thunk)
528 {
529 const StgInfoTable *info;
530 do {
531 info = get_volatile_itbl((StgClosure *) thunk);
532 } while (info->type == WHITEHOLE);
533 updateRemembSetPushThunkEager(cap, (StgThunkInfoTable *) info, thunk);
534 }
535
536 /* Push the free variables of a thunk to the update remembered set.
537 * This is called by the thunk update code (e.g. updateWithIndirection) before
538 * we update the indirectee to ensure that the thunk's free variables remain
539 * visible to the concurrent collector.
540 *
541 * See Note [Update rememembered set].
542 */
543 void updateRemembSetPushThunkEager(Capability *cap,
544 const StgThunkInfoTable *info,
545 StgThunk *thunk)
546 {
547 /* N.B. info->i.type mustn't be WHITEHOLE */
548 switch (info->i.type) {
549 case THUNK:
550 case THUNK_1_0:
551 case THUNK_0_1:
552 case THUNK_2_0:
553 case THUNK_1_1:
554 case THUNK_0_2:
555 {
556 MarkQueue *queue = &cap->upd_rem_set.queue;
557 push_thunk_srt(queue, &info->i);
558
559 for (StgWord i = 0; i < info->i.layout.payload.ptrs; i++) {
560 if (check_in_nonmoving_heap(thunk->payload[i])) {
561 // Don't bother to push origin; it makes the barrier needlessly
562 // expensive with little benefit.
563 push_closure(queue, thunk->payload[i], NULL);
564 }
565 }
566 break;
567 }
568 case AP:
569 {
570 MarkQueue *queue = &cap->upd_rem_set.queue;
571 StgAP *ap = (StgAP *) thunk;
572 if (check_in_nonmoving_heap(ap->fun)) {
573 push_closure(queue, ap->fun, NULL);
574 }
575 mark_PAP_payload(queue, ap->fun, ap->payload, ap->n_args);
576 break;
577 }
578 case THUNK_SELECTOR:
579 case BLACKHOLE:
580 // TODO: This is right, right?
581 break;
582 default:
583 barf("updateRemembSetPushThunk: invalid thunk pushed: p=%p, type=%d",
584 thunk, info->i.type);
585 }
586 }
587
588 void updateRemembSetPushThunk_(StgRegTable *reg, StgThunk *p)
589 {
590 updateRemembSetPushThunk(regTableToCapability(reg), p);
591 }
592
593 inline void updateRemembSetPushClosure(Capability *cap, StgClosure *p)
594 {
595 if (check_in_nonmoving_heap(p)) {
596 MarkQueue *queue = &cap->upd_rem_set.queue;
597 push_closure(queue, p, NULL);
598 }
599 }
600
601 void updateRemembSetPushClosure_(StgRegTable *reg, StgClosure *p)
602 {
603 updateRemembSetPushClosure(regTableToCapability(reg), p);
604 }
605
606 STATIC_INLINE bool needs_upd_rem_set_mark(StgClosure *p)
607 {
608 // TODO: Deduplicate with mark_closure
609 bdescr *bd = Bdescr((StgPtr) p);
610 if (bd->gen != oldest_gen) {
611 return false;
612 } else if (bd->flags & BF_LARGE) {
613 if (! (bd->flags & BF_NONMOVING_SWEEPING)) {
614 return false;
615 } else {
616 return ! (bd->flags & BF_MARKED);
617 }
618 } else {
619 struct NonmovingSegment *seg = nonmovingGetSegment((StgPtr) p);
620 nonmoving_block_idx block_idx = nonmovingGetBlockIdx((StgPtr) p);
621 return nonmovingGetMark(seg, block_idx) != nonmovingMarkEpoch;
622 }
623 }
624
625 /* Set the mark bit; only to be called *after* we have fully marked the closure */
626 STATIC_INLINE void finish_upd_rem_set_mark(StgClosure *p)
627 {
628 bdescr *bd = Bdescr((StgPtr) p);
629 if (bd->flags & BF_LARGE) {
630 // Someone else may have already marked it.
631 ACQUIRE_LOCK(&nonmoving_large_objects_mutex);
632 if (! (bd->flags & BF_MARKED)) {
633 bd->flags |= BF_MARKED;
634 dbl_link_remove(bd, &nonmoving_large_objects);
635 dbl_link_onto(bd, &nonmoving_marked_large_objects);
636 n_nonmoving_large_blocks -= bd->blocks;
637 n_nonmoving_marked_large_blocks += bd->blocks;
638 }
639 RELEASE_LOCK(&nonmoving_large_objects_mutex);
640 } else {
641 struct NonmovingSegment *seg = nonmovingGetSegment((StgPtr) p);
642 nonmoving_block_idx block_idx = nonmovingGetBlockIdx((StgPtr) p);
643 nonmovingSetMark(seg, block_idx);
644 }
645 }
646
647 void updateRemembSetPushTSO(Capability *cap, StgTSO *tso)
648 {
649 if (needs_upd_rem_set_mark((StgClosure *) tso)) {
650 debugTrace(DEBUG_nonmoving_gc, "upd_rem_set: TSO %p", tso);
651 mark_tso(&cap->upd_rem_set.queue, tso);
652 finish_upd_rem_set_mark((StgClosure *) tso);
653 }
654 }
655
656 void updateRemembSetPushStack(Capability *cap, StgStack *stack)
657 {
658 // N.B. caller responsible for checking nonmoving_write_barrier_enabled
659 if (needs_upd_rem_set_mark((StgClosure *) stack)) {
660 StgWord marking = stack->marking;
661 // See Note [StgStack dirtiness flags and concurrent marking]
662 if (cas(&stack->marking, marking, nonmovingMarkEpoch)
663 != nonmovingMarkEpoch) {
664 // We have claimed the right to mark the stack.
665 debugTrace(DEBUG_nonmoving_gc, "upd_rem_set: STACK %p", stack->sp);
666 mark_stack(&cap->upd_rem_set.queue, stack);
667 finish_upd_rem_set_mark((StgClosure *) stack);
668 return;
669 } else {
670 // The concurrent GC has claimed the right to mark the stack.
671 // Wait until it finishes marking before proceeding with
672 // mutation.
673 while (needs_upd_rem_set_mark((StgClosure *) stack));
674 #if defined(PARALLEL_GC)
675 busy_wait_nop(); // TODO: Spinning here is unfortunate
676 #endif
677 return;
678 }
679 }
680 }
681
682 /*********************************************************
683 * Pushing to the mark queue
684 *********************************************************/
685
686 void markQueuePush (MarkQueue *q, const MarkQueueEnt *ent)
687 {
688 push(q, ent);
689 }
690
691 void markQueuePushClosure (MarkQueue *q,
692 StgClosure *p,
693 StgClosure **origin)
694 {
695 // TODO: Push this into callers where they already have the Bdescr
696 if (check_in_nonmoving_heap(p)) {
697 push_closure(q, p, origin);
698 }
699 }
700
701 /* TODO: Do we really never want to specify the origin here? */
702 void markQueueAddRoot (MarkQueue* q, StgClosure** root)
703 {
704 markQueuePushClosure(q, *root, NULL);
705 }
706
707 /* Push a closure to the mark queue without origin information */
708 void markQueuePushClosure_ (MarkQueue *q, StgClosure *p)
709 {
710 markQueuePushClosure(q, p, NULL);
711 }
712
713 void markQueuePushFunSrt (MarkQueue *q, const StgInfoTable *info)
714 {
715 push_fun_srt(q, info);
716 }
717
718 void markQueuePushThunkSrt (MarkQueue *q, const StgInfoTable *info)
719 {
720 push_thunk_srt(q, info);
721 }
722
723 void markQueuePushArray (MarkQueue *q,
724 const StgMutArrPtrs *array,
725 StgWord start_index)
726 {
727 push_array(q, array, start_index);
728 }
729
730 /*********************************************************
731 * Popping from the mark queue
732 *********************************************************/
733
734 // Returns invalid MarkQueueEnt if queue is empty.
735 static MarkQueueEnt markQueuePop_ (MarkQueue *q)
736 {
737 MarkQueueBlock *top;
738
739 again:
740 top = q->top;
741
742 // Are we at the beginning of the block?
743 if (top->head == 0) {
744 // Is this the first block of the queue?
745 if (q->blocks->link == NULL) {
746 // Yes, therefore queue is empty...
747 MarkQueueEnt none = { .null_entry = { .p = NULL } };
748 return none;
749 } else {
750 // No, unwind to the previous block and try popping again...
751 bdescr *old_block = q->blocks;
752 q->blocks = old_block->link;
753 q->top = (MarkQueueBlock*)q->blocks->start;
754 ACQUIRE_SM_LOCK;
755 freeGroup(old_block); // TODO: hold on to a block to avoid repeated allocation/deallocation?
756 RELEASE_SM_LOCK;
757 goto again;
758 }
759 }
760
761 top->head--;
762 MarkQueueEnt ent = top->entries[top->head];
763 return ent;
764 }
765
766 static MarkQueueEnt markQueuePop (MarkQueue *q)
767 {
768 #if MARK_PREFETCH_QUEUE_DEPTH == 0
769 return markQueuePop_(q);
770 #else
771 unsigned int i = q->prefetch_head;
772 while (nonmovingMarkQueueEntryType(&q->prefetch_queue[i]) == NULL_ENTRY) {
773 MarkQueueEnt new = markQueuePop_(q);
774 if (nonmovingMarkQueueEntryType(&new) == NULL_ENTRY) {
775 // Mark queue is empty; look for any valid entries in the prefetch
776 // queue
777 for (unsigned int j = (i+1) % MARK_PREFETCH_QUEUE_DEPTH;
778 j != i;
779 j = (j+1) % MARK_PREFETCH_QUEUE_DEPTH)
780 {
781 if (nonmovingMarkQueueEntryType(&q->prefetch_queue[j]) != NULL_ENTRY) {
782 i = j;
783 goto done;
784 }
785 }
786 return new;
787 }
788
789 // The entry may not be a MARK_CLOSURE but it doesn't matter, our
790 // MarkQueueEnt encoding always places the pointer to the object to be
791 // marked first.
792 prefetchForRead(&new.mark_closure.p->header.info);
793 prefetchForRead(Bdescr((StgPtr) new.mark_closure.p));
794 q->prefetch_queue[i] = new;
795 i = (i + 1) % MARK_PREFETCH_QUEUE_DEPTH;
796 }
797
798 done:
799 ;
800 MarkQueueEnt ret = q->prefetch_queue[i];
801 q->prefetch_queue[i].null_entry.p = NULL;
802 q->prefetch_head = i;
803 return ret;
804 #endif
805 }
806
807 /*********************************************************
808 * Creating and destroying MarkQueues and UpdRemSets
809 *********************************************************/
810
811 /* Must hold sm_mutex. */
812 static void init_mark_queue_ (MarkQueue *queue)
813 {
814 bdescr *bd = allocGroup(MARK_QUEUE_BLOCKS);
815 queue->blocks = bd;
816 queue->top = (MarkQueueBlock *) bd->start;
817 queue->top->head = 0;
818 #if MARK_PREFETCH_QUEUE_DEPTH > 0
819 memset(&queue->prefetch_queue, 0, sizeof(queue->prefetch_queue));
820 queue->prefetch_head = 0;
821 #endif
822 }
823
824 /* Must hold sm_mutex. */
825 void initMarkQueue (MarkQueue *queue)
826 {
827 init_mark_queue_(queue);
828 queue->is_upd_rem_set = false;
829 }
830
831 /* Must hold sm_mutex. */
832 void init_upd_rem_set (UpdRemSet *rset)
833 {
834 init_mark_queue_(&rset->queue);
835 rset->queue.is_upd_rem_set = true;
836 }
837
838 void reset_upd_rem_set (UpdRemSet *rset)
839 {
840 // UpdRemSets always have one block for the mark queue. This assertion is to
841 // update this code if we change that.
842 ASSERT(rset->queue.blocks->link == NULL);
843 rset->queue.top->head = 0;
844 }
845
846 void freeMarkQueue (MarkQueue *queue)
847 {
848 freeChain_lock(queue->blocks);
849 }
850
851 #if defined(THREADED_RTS) && defined(DEBUG)
852 static uint32_t
853 markQueueLength (MarkQueue *q)
854 {
855 uint32_t n = 0;
856 for (bdescr *block = q->blocks; block; block = block->link) {
857 MarkQueueBlock *queue = (MarkQueueBlock*)block->start;
858 n += queue->head;
859 }
860 return n;
861 }
862 #endif
863
864
865 /*********************************************************
866 * Marking
867 *********************************************************/
868
869 /*
870 * N.B. Mutation of TRecHeaders is completely unprotected by any write
871 * barrier. Consequently it's quite important that we deeply mark
872 * any outstanding transactions.
873 */
874 static void
875 mark_trec_header (MarkQueue *queue, StgTRecHeader *trec)
876 {
877 while (trec != NO_TREC) {
878 StgTRecChunk *chunk = trec->current_chunk;
879 markQueuePushClosure_(queue, (StgClosure *) trec);
880 markQueuePushClosure_(queue, (StgClosure *) chunk);
881 while (chunk != END_STM_CHUNK_LIST) {
882 for (StgWord i=0; i < chunk->next_entry_idx; i++) {
883 TRecEntry *ent = &chunk->entries[i];
884 markQueuePushClosure_(queue, (StgClosure *) ent->tvar);
885 markQueuePushClosure_(queue, ent->expected_value);
886 markQueuePushClosure_(queue, ent->new_value);
887 }
888 chunk = chunk->prev_chunk;
889 }
890 trec = trec->enclosing_trec;
891 }
892 }
893
894 static void
895 mark_tso (MarkQueue *queue, StgTSO *tso)
896 {
897 // TODO: Clear dirty if contains only old gen objects
898
899 if (tso->bound != NULL) {
900 markQueuePushClosure_(queue, (StgClosure *) tso->bound->tso);
901 }
902
903 markQueuePushClosure_(queue, (StgClosure *) tso->blocked_exceptions);
904 markQueuePushClosure_(queue, (StgClosure *) tso->bq);
905 mark_trec_header(queue, tso->trec);
906 markQueuePushClosure_(queue, (StgClosure *) tso->stackobj);
907 markQueuePushClosure_(queue, (StgClosure *) tso->_link);
908 if ( tso->why_blocked == BlockedOnMVar
909 || tso->why_blocked == BlockedOnMVarRead
910 || tso->why_blocked == BlockedOnBlackHole
911 || tso->why_blocked == BlockedOnMsgThrowTo
912 || tso->why_blocked == NotBlocked
913 ) {
914 markQueuePushClosure_(queue, tso->block_info.closure);
915 }
916 }
917
918 static void
919 do_push_closure (StgClosure **p, void *user)
920 {
921 MarkQueue *queue = (MarkQueue *) user;
922 // TODO: Origin? need reference to containing closure
923 markQueuePushClosure_(queue, *p);
924 }
925
926 static void
927 mark_large_bitmap (MarkQueue *queue,
928 StgClosure **p,
929 StgLargeBitmap *large_bitmap,
930 StgWord size)
931 {
932 walk_large_bitmap(do_push_closure, p, large_bitmap, size, queue);
933 }
934
935 static void
936 mark_small_bitmap (MarkQueue *queue, StgClosure **p, StgWord size, StgWord bitmap)
937 {
938 while (size > 0) {
939 if ((bitmap & 1) == 0) {
940 // TODO: Origin?
941 markQueuePushClosure(queue, *p, NULL);
942 }
943 p++;
944 bitmap = bitmap >> 1;
945 size--;
946 }
947 }
948
949 static GNUC_ATTR_HOT
950 void mark_PAP_payload (MarkQueue *queue,
951 StgClosure *fun,
952 StgClosure **payload,
953 StgWord size)
954 {
955 const StgFunInfoTable *fun_info = get_fun_itbl(UNTAG_CONST_CLOSURE(fun));
956 ASSERT(fun_info->i.type != PAP);
957 StgPtr p = (StgPtr) payload;
958
959 StgWord bitmap;
960 switch (fun_info->f.fun_type) {
961 case ARG_GEN:
962 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
963 goto small_bitmap;
964 case ARG_GEN_BIG:
965 mark_large_bitmap(queue, payload, GET_FUN_LARGE_BITMAP(fun_info), size);
966 break;
967 case ARG_BCO:
968 mark_large_bitmap(queue, payload, BCO_BITMAP(fun), size);
969 break;
970 default:
971 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
972 small_bitmap:
973 mark_small_bitmap(queue, (StgClosure **) p, size, bitmap);
974 break;
975 }
976 }
977
978 /* Helper for mark_stack; returns next stack frame. */
979 static StgPtr
980 mark_arg_block (MarkQueue *queue, const StgFunInfoTable *fun_info, StgClosure **args)
981 {
982 StgWord bitmap, size;
983
984 StgPtr p = (StgPtr)args;
985 switch (fun_info->f.fun_type) {
986 case ARG_GEN:
987 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
988 size = BITMAP_SIZE(fun_info->f.b.bitmap);
989 goto small_bitmap;
990 case ARG_GEN_BIG:
991 size = GET_FUN_LARGE_BITMAP(fun_info)->size;
992 mark_large_bitmap(queue, (StgClosure**)p, GET_FUN_LARGE_BITMAP(fun_info), size);
993 p += size;
994 break;
995 default:
996 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
997 size = BITMAP_SIZE(stg_arg_bitmaps[fun_info->f.fun_type]);
998 small_bitmap:
999 mark_small_bitmap(queue, (StgClosure**)p, size, bitmap);
1000 p += size;
1001 break;
1002 }
1003 return p;
1004 }
1005
1006 static GNUC_ATTR_HOT void
1007 mark_stack_ (MarkQueue *queue, StgPtr sp, StgPtr spBottom)
1008 {
1009 ASSERT(sp <= spBottom);
1010
1011 while (sp < spBottom) {
1012 const StgRetInfoTable *info = get_ret_itbl((StgClosure *)sp);
1013 switch (info->i.type) {
1014 case UPDATE_FRAME:
1015 {
1016 // See Note [upd-black-hole] in rts/Scav.c
1017 StgUpdateFrame *frame = (StgUpdateFrame *) sp;
1018 markQueuePushClosure_(queue, frame->updatee);
1019 sp += sizeofW(StgUpdateFrame);
1020 continue;
1021 }
1022
1023 // small bitmap (< 32 entries, or 64 on a 64-bit machine)
1024 case CATCH_STM_FRAME:
1025 case CATCH_RETRY_FRAME:
1026 case ATOMICALLY_FRAME:
1027 case UNDERFLOW_FRAME:
1028 case STOP_FRAME:
1029 case CATCH_FRAME:
1030 case RET_SMALL:
1031 {
1032 StgWord bitmap = BITMAP_BITS(info->i.layout.bitmap);
1033 StgWord size = BITMAP_SIZE(info->i.layout.bitmap);
1034 // NOTE: the payload starts immediately after the info-ptr, we
1035 // don't have an StgHeader in the same sense as a heap closure.
1036 sp++;
1037 mark_small_bitmap(queue, (StgClosure **) sp, size, bitmap);
1038 sp += size;
1039 }
1040 follow_srt:
1041 if (info->i.srt) {
1042 markQueuePushClosure_(queue, (StgClosure*)GET_SRT(info));
1043 }
1044 continue;
1045
1046 case RET_BCO: {
1047 sp++;
1048 markQueuePushClosure_(queue, *(StgClosure**)sp);
1049 StgBCO *bco = (StgBCO *)*sp;
1050 sp++;
1051 StgWord size = BCO_BITMAP_SIZE(bco);
1052 mark_large_bitmap(queue, (StgClosure **) sp, BCO_BITMAP(bco), size);
1053 sp += size;
1054 continue;
1055 }
1056
1057 // large bitmap (> 32 entries, or > 64 on a 64-bit machine)
1058 case RET_BIG:
1059 {
1060 StgWord size;
1061
1062 size = GET_LARGE_BITMAP(&info->i)->size;
1063 sp++;
1064 mark_large_bitmap(queue, (StgClosure **) sp, GET_LARGE_BITMAP(&info->i), size);
1065 sp += size;
1066 // and don't forget to follow the SRT
1067 goto follow_srt;
1068 }
1069
1070 case RET_FUN:
1071 {
1072 StgRetFun *ret_fun = (StgRetFun *)sp;
1073 const StgFunInfoTable *fun_info;
1074
1075 markQueuePushClosure_(queue, ret_fun->fun);
1076 fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun));
1077 sp = mark_arg_block(queue, fun_info, ret_fun->payload);
1078 goto follow_srt;
1079 }
1080
1081 default:
1082 barf("mark_stack: weird activation record found on stack: %d", (int)(info->i.type));
1083 }
1084 }
1085 }
1086
1087 static GNUC_ATTR_HOT void
1088 mark_stack (MarkQueue *queue, StgStack *stack)
1089 {
1090 // TODO: Clear dirty if contains only old gen objects
1091
1092 mark_stack_(queue, stack->sp, stack->stack + stack->stack_size);
1093 }
1094
1095 /* See Note [Static objects under the nonmoving collector].
1096 *
1097 * Returns true if the object needs to be marked.
1098 */
1099 static bool
1100 bump_static_flag(StgClosure **link_field, StgClosure *q STG_UNUSED)
1101 {
1102 while (1) {
1103 StgWord link = (StgWord) *link_field;
1104 StgWord new = (link & ~STATIC_BITS) | static_flag;
1105 if ((link & STATIC_BITS) == static_flag)
1106 return false;
1107 else if (cas((StgVolatilePtr) link_field, link, new) == link) {
1108 return true;
1109 }
1110 }
1111 }
1112
1113 static GNUC_ATTR_HOT void
1114 mark_closure (MarkQueue *queue, const StgClosure *p0, StgClosure **origin)
1115 {
1116 StgClosure *p = (StgClosure*)p0;
1117
1118 try_again:
1119 ;
1120 bdescr *bd = NULL;
1121 StgClosure *p_next = NULL;
1122 StgWord tag = GET_CLOSURE_TAG(p);
1123 p = UNTAG_CLOSURE(p);
1124
1125 # define PUSH_FIELD(obj, field) \
1126 markQueuePushClosure(queue, \
1127 (StgClosure *) (obj)->field, \
1128 (StgClosure **) &(obj)->field)
1129
1130 if (!HEAP_ALLOCED_GC(p)) {
1131 const StgInfoTable *info = get_itbl(p);
1132 StgHalfWord type = info->type;
1133
1134 if (type == CONSTR_0_1 || type == CONSTR_0_2 || type == CONSTR_NOCAF) {
1135 // no need to put these on the static linked list, they don't need
1136 // to be marked.
1137 return;
1138 }
1139
1140 switch (type) {
1141
1142 case THUNK_STATIC:
1143 if (info->srt != 0) {
1144 if (bump_static_flag(THUNK_STATIC_LINK((StgClosure *)p), p)) {
1145 markQueuePushThunkSrt(queue, info); // TODO this function repeats the check above
1146 }
1147 }
1148 goto done;
1149
1150 case FUN_STATIC:
1151 if (info->srt != 0 || info->layout.payload.ptrs != 0) {
1152 if (bump_static_flag(STATIC_LINK(info, (StgClosure *)p), p)) {
1153 markQueuePushFunSrt(queue, info); // TODO this function repeats the check above
1154
1155 // a FUN_STATIC can also be an SRT, so it may have pointer
1156 // fields. See Note [SRTs] in CmmBuildInfoTables, specifically
1157 // the [FUN] optimisation.
1158 // TODO (osa) I don't understand this comment
1159 for (StgHalfWord i = 0; i < info->layout.payload.ptrs; ++i) {
1160 PUSH_FIELD(p, payload[i]);
1161 }
1162 }
1163 }
1164 goto done;
1165
1166 case IND_STATIC:
1167 if (bump_static_flag(IND_STATIC_LINK((StgClosure *)p), p)) {
1168 PUSH_FIELD((StgInd *) p, indirectee);
1169 }
1170 goto done;
1171
1172 case CONSTR:
1173 case CONSTR_1_0:
1174 case CONSTR_2_0:
1175 case CONSTR_1_1:
1176 if (bump_static_flag(STATIC_LINK(info, (StgClosure *)p), p)) {
1177 for (StgHalfWord i = 0; i < info->layout.payload.ptrs; ++i) {
1178 PUSH_FIELD(p, payload[i]);
1179 }
1180 }
1181 goto done;
1182
1183 case WHITEHOLE:
1184 while (get_volatile_itbl(p)->type == WHITEHOLE);
1185 // busy_wait_nop(); // FIXME
1186 goto try_again;
1187
1188 default:
1189 barf("mark_closure(static): strange closure type %d", (int)(info->type));
1190 }
1191 }
1192
1193 bd = Bdescr((StgPtr) p);
1194
1195 if (bd->gen != oldest_gen) {
1196 // Here we have an object living outside of the non-moving heap. While
1197 // we likely evacuated nearly everything to the nonmoving heap during
1198 // preparation there are nevertheless a few ways in which we might trace
1199 // a reference into younger generations:
1200 //
1201 // * a mutable object might have been updated
1202 // * we might have aged an object
1203 goto done;
1204 }
1205
1206 ASSERTM(LOOKS_LIKE_CLOSURE_PTR(p), "invalid closure, info=%p", p->header.info);
1207
1208 ASSERT(!IS_FORWARDING_PTR(p->header.info));
1209
1210 // N.B. only the first block of a compact region is guaranteed to carry
1211 // BF_NONMOVING; conseqently we must separately check for BF_COMPACT.
1212 if (bd->flags & (BF_COMPACT | BF_NONMOVING)) {
1213
1214 if (bd->flags & BF_COMPACT) {
1215 StgCompactNFData *str = objectGetCompact((StgClosure*)p);
1216 bd = Bdescr((P_)str);
1217
1218 if (! (bd->flags & BF_NONMOVING_SWEEPING)) {
1219 // Not in the snapshot
1220 return;
1221 }
1222 if (bd->flags & BF_MARKED) {
1223 goto done;
1224 }
1225 } else if (bd->flags & BF_LARGE) {
1226 if (! (bd->flags & BF_NONMOVING_SWEEPING)) {
1227 // Not in the snapshot
1228 goto done;
1229 }
1230 if (bd->flags & BF_MARKED) {
1231 goto done;
1232 }
1233
1234 // Mark contents
1235 p = (StgClosure*)bd->start;
1236 } else {
1237 struct NonmovingSegment *seg = nonmovingGetSegment((StgPtr) p);
1238 nonmoving_block_idx block_idx = nonmovingGetBlockIdx((StgPtr) p);
1239
1240 /* We don't mark blocks that,
1241 * - were not live at the time that the snapshot was taken, or
1242 * - we have already marked this cycle
1243 */
1244 uint8_t mark = nonmovingGetMark(seg, block_idx);
1245 /* Don't mark things we've already marked (since we may loop) */
1246 if (mark == nonmovingMarkEpoch)
1247 goto done;
1248
1249 StgClosure *snapshot_loc =
1250 (StgClosure *) nonmovingSegmentGetBlock(seg, nonmovingSegmentInfo(seg)->next_free_snap);
1251 if (p >= snapshot_loc && mark == 0) {
1252 /*
1253 * In this case we are looking at a block that wasn't allocated
1254 * at the time that the snapshot was taken. We mustn't trace
1255 * things above the allocation pointer that aren't marked since
1256 * they may not be valid objects.
1257 */
1258 goto done;
1259 }
1260 }
1261 }
1262
1263 // A pinned object that is still attached to a capability (because it's not
1264 // filled yet). No need to trace it pinned objects can't contain poiners.
1265 else if (bd->flags & BF_PINNED) {
1266 #if defined(DEBUG)
1267 bool found_it = false;
1268 for (uint32_t i = 0; i < n_capabilities; ++i) {
1269 if (capabilities[i]->pinned_object_block == bd) {
1270 found_it = true;
1271 break;
1272 }
1273 }
1274 ASSERT(found_it);
1275 #endif
1276 return; // we don't update origin here! TODO(osa): explain this
1277 }
1278
1279 else {
1280 barf("Strange closure in nonmoving mark: %p", p);
1281 }
1282
1283 /////////////////////////////////////////////////////
1284 // Trace pointers
1285 /////////////////////////////////////////////////////
1286
1287 const StgInfoTable *info = get_itbl(p);
1288 switch (info->type) {
1289
1290 case MVAR_CLEAN:
1291 case MVAR_DIRTY: {
1292 StgMVar *mvar = (StgMVar *) p;
1293 PUSH_FIELD(mvar, head);
1294 PUSH_FIELD(mvar, tail);
1295 PUSH_FIELD(mvar, value);
1296 break;
1297 }
1298
1299 case TVAR: {
1300 StgTVar *tvar = ((StgTVar *)p);
1301 PUSH_FIELD(tvar, current_value);
1302 PUSH_FIELD(tvar, first_watch_queue_entry);
1303 break;
1304 }
1305
1306 case FUN_2_0:
1307 markQueuePushFunSrt(queue, info);
1308 PUSH_FIELD(p, payload[1]);
1309 PUSH_FIELD(p, payload[0]);
1310 break;
1311
1312 case THUNK_2_0: {
1313 StgThunk *thunk = (StgThunk *) p;
1314 markQueuePushThunkSrt(queue, info);
1315 PUSH_FIELD(thunk, payload[1]);
1316 PUSH_FIELD(thunk, payload[0]);
1317 break;
1318 }
1319
1320 case CONSTR_2_0:
1321 PUSH_FIELD(p, payload[1]);
1322 PUSH_FIELD(p, payload[0]);
1323 break;
1324
1325 case THUNK_1_0:
1326 markQueuePushThunkSrt(queue, info);
1327 PUSH_FIELD((StgThunk *) p, payload[0]);
1328 break;
1329
1330 case FUN_1_0:
1331 markQueuePushFunSrt(queue, info);
1332 PUSH_FIELD(p, payload[0]);
1333 break;
1334
1335 case CONSTR_1_0:
1336 PUSH_FIELD(p, payload[0]);
1337 break;
1338
1339 case THUNK_0_1:
1340 markQueuePushThunkSrt(queue, info);
1341 break;
1342
1343 case FUN_0_1:
1344 markQueuePushFunSrt(queue, info);
1345 break;
1346
1347 case CONSTR_0_1:
1348 case CONSTR_0_2:
1349 break;
1350
1351 case THUNK_0_2:
1352 markQueuePushThunkSrt(queue, info);
1353 break;
1354
1355 case FUN_0_2:
1356 markQueuePushFunSrt(queue, info);
1357 break;
1358
1359 case THUNK_1_1:
1360 markQueuePushThunkSrt(queue, info);
1361 PUSH_FIELD((StgThunk *) p, payload[0]);
1362 break;
1363
1364 case FUN_1_1:
1365 markQueuePushFunSrt(queue, info);
1366 PUSH_FIELD(p, payload[0]);
1367 break;
1368
1369 case CONSTR_1_1:
1370 PUSH_FIELD(p, payload[0]);
1371 break;
1372
1373 case FUN:
1374 markQueuePushFunSrt(queue, info);
1375 goto gen_obj;
1376
1377 case THUNK: {
1378 markQueuePushThunkSrt(queue, info);
1379 for (StgWord i = 0; i < info->layout.payload.ptrs; i++) {
1380 StgClosure **field = &((StgThunk *) p)->payload[i];
1381 markQueuePushClosure(queue, *field, field);
1382 }
1383 break;
1384 }
1385
1386 gen_obj:
1387 case CONSTR:
1388 case CONSTR_NOCAF:
1389 case WEAK:
1390 case PRIM:
1391 {
1392 for (StgWord i = 0; i < info->layout.payload.ptrs; i++) {
1393 StgClosure **field = &((StgClosure *) p)->payload[i];
1394 markQueuePushClosure(queue, *field, field);
1395 }
1396 break;
1397 }
1398
1399 case BCO: {
1400 StgBCO *bco = (StgBCO *)p;
1401 PUSH_FIELD(bco, instrs);
1402 PUSH_FIELD(bco, literals);
1403 PUSH_FIELD(bco, ptrs);
1404 break;
1405 }
1406
1407
1408 case IND: {
1409 PUSH_FIELD((StgInd *) p, indirectee);
1410 if (origin != NULL) {
1411 p_next = ((StgInd*)p)->indirectee;
1412 }
1413 break;
1414 }
1415
1416 case BLACKHOLE: {
1417 PUSH_FIELD((StgInd *) p, indirectee);
1418 StgClosure *indirectee = ((StgInd*)p)->indirectee;
1419 if (GET_CLOSURE_TAG(indirectee) == 0 || origin == NULL) {
1420 // do nothing
1421 } else {
1422 p_next = indirectee;
1423 }
1424 break;
1425 }
1426
1427 case MUT_VAR_CLEAN:
1428 case MUT_VAR_DIRTY:
1429 PUSH_FIELD((StgMutVar *)p, var);
1430 break;
1431
1432 case BLOCKING_QUEUE: {
1433 StgBlockingQueue *bq = (StgBlockingQueue *)p;
1434 PUSH_FIELD(bq, bh);
1435 PUSH_FIELD(bq, owner);
1436 PUSH_FIELD(bq, queue);
1437 PUSH_FIELD(bq, link);
1438 break;
1439 }
1440
1441 case THUNK_SELECTOR:
1442 if (RtsFlags.GcFlags.nonmovingSelectorOpt) {
1443 nonmoving_eval_thunk_selector(queue, (StgSelector*)p, origin);
1444 } else {
1445 PUSH_FIELD((StgSelector *) p, selectee);
1446 }
1447 break;
1448
1449 case AP_STACK: {
1450 StgAP_STACK *ap = (StgAP_STACK *)p;
1451 PUSH_FIELD(ap, fun);
1452 mark_stack_(queue, (StgPtr) ap->payload, (StgPtr) ap->payload + ap->size);
1453 break;
1454 }
1455
1456 case PAP: {
1457 StgPAP *pap = (StgPAP *) p;
1458 PUSH_FIELD(pap, fun);
1459 mark_PAP_payload(queue, pap->fun, pap->payload, pap->n_args);
1460 break;
1461 }
1462
1463 case AP: {
1464 StgAP *ap = (StgAP *) p;
1465 PUSH_FIELD(ap, fun);
1466 mark_PAP_payload(queue, ap->fun, ap->payload, ap->n_args);
1467 break;
1468 }
1469
1470 case ARR_WORDS:
1471 // nothing to follow
1472 break;
1473
1474 case MUT_ARR_PTRS_CLEAN:
1475 case MUT_ARR_PTRS_DIRTY:
1476 case MUT_ARR_PTRS_FROZEN_CLEAN:
1477 case MUT_ARR_PTRS_FROZEN_DIRTY:
1478 // TODO: Check this against Scav.c
1479 markQueuePushArray(queue, (StgMutArrPtrs *) p, 0);
1480 break;
1481
1482 case SMALL_MUT_ARR_PTRS_CLEAN:
1483 case SMALL_MUT_ARR_PTRS_DIRTY:
1484 case SMALL_MUT_ARR_PTRS_FROZEN_CLEAN:
1485 case SMALL_MUT_ARR_PTRS_FROZEN_DIRTY: {
1486 StgSmallMutArrPtrs *arr = (StgSmallMutArrPtrs *) p;
1487 for (StgWord i = 0; i < arr->ptrs; i++) {
1488 StgClosure **field = &arr->payload[i];
1489 markQueuePushClosure(queue, *field, field);
1490 }
1491 break;
1492 }
1493
1494 case TSO:
1495 mark_tso(queue, (StgTSO *) p);
1496 break;
1497
1498 case STACK: {
1499 // See Note [StgStack dirtiness flags and concurrent marking]
1500 StgStack *stack = (StgStack *) p;
1501 StgWord marking = stack->marking;
1502
1503 // N.B. stack->marking must be != nonmovingMarkEpoch unless
1504 // someone has already marked it.
1505 if (cas(&stack->marking, marking, nonmovingMarkEpoch)
1506 != nonmovingMarkEpoch) {
1507 // We have claimed the right to mark the stack.
1508 mark_stack(queue, stack);
1509 } else {
1510 // A mutator has already started marking the stack; we just let it
1511 // do its thing and move on. There's no reason to wait; we know that
1512 // the stack will be fully marked before we sweep due to the final
1513 // post-mark synchronization. Most importantly, we do not set its
1514 // mark bit, the mutator is responsible for this.
1515 goto done;
1516 }
1517 break;
1518 }
1519
1520 case MUT_PRIM: {
1521 for (StgHalfWord p_idx = 0; p_idx < info->layout.payload.ptrs; ++p_idx) {
1522 StgClosure **field = &p->payload[p_idx];
1523 markQueuePushClosure(queue, *field, field);
1524 }
1525 break;
1526 }
1527
1528 case TREC_CHUNK: {
1529 // TODO: Should we abort here? This should have already been marked
1530 // when we dirtied the TSO
1531 StgTRecChunk *tc = ((StgTRecChunk *) p);
1532 PUSH_FIELD(tc, prev_chunk);
1533 TRecEntry *end = &tc->entries[tc->next_entry_idx];
1534 for (TRecEntry *e = &tc->entries[0]; e < end; e++) {
1535 markQueuePushClosure_(queue, (StgClosure *) e->tvar);
1536 markQueuePushClosure_(queue, (StgClosure *) e->expected_value);
1537 markQueuePushClosure_(queue, (StgClosure *) e->new_value);
1538 }
1539 break;
1540 }
1541
1542 case WHITEHOLE:
1543 while (get_volatile_itbl(p)->type == WHITEHOLE);
1544 goto try_again;
1545
1546 case COMPACT_NFDATA:
1547 break;
1548
1549 default:
1550 barf("mark_closure: unimplemented/strange closure type %d @ %p",
1551 info->type, p);
1552 }
1553
1554 # undef PUSH_FIELD
1555
1556 /* Set the mark bit: it's important that we do this only after we actually push
1557 * the object's pointers since in the case of marking stacks there may be a
1558 * mutator waiting for us to finish so it can start execution.
1559 */
1560 if (bd->flags & BF_COMPACT) {
1561 StgCompactNFData *str = objectGetCompact((StgClosure*)p);
1562 dbl_link_remove(bd, &nonmoving_compact_objects);
1563 dbl_link_onto(bd, &nonmoving_marked_compact_objects);
1564 StgWord blocks = str->totalW / BLOCK_SIZE_W;
1565 n_nonmoving_compact_blocks -= blocks;
1566 n_nonmoving_marked_compact_blocks += blocks;
1567 bd->flags |= BF_MARKED;
1568 } else if (bd->flags & BF_LARGE) {
1569 /* Marking a large object isn't idempotent since we move it to
1570 * nonmoving_marked_large_objects; to ensure that we don't repeatedly
1571 * mark a large object, we only set BF_MARKED on large objects in the
1572 * nonmoving heap while holding nonmoving_large_objects_mutex
1573 */
1574 ACQUIRE_LOCK(&nonmoving_large_objects_mutex);
1575 if (! (bd->flags & BF_MARKED)) {
1576 // Remove the object from nonmoving_large_objects and link it to
1577 // nonmoving_marked_large_objects
1578 dbl_link_remove(bd, &nonmoving_large_objects);
1579 dbl_link_onto(bd, &nonmoving_marked_large_objects);
1580 n_nonmoving_large_blocks -= bd->blocks;
1581 n_nonmoving_marked_large_blocks += bd->blocks;
1582 bd->flags |= BF_MARKED;
1583 }
1584 RELEASE_LOCK(&nonmoving_large_objects_mutex);
1585 } else if (bd->flags & BF_NONMOVING) {
1586 // TODO: Kill repetition
1587 struct NonmovingSegment *seg = nonmovingGetSegment((StgPtr) p);
1588 nonmoving_block_idx block_idx = nonmovingGetBlockIdx((StgPtr) p);
1589 nonmovingSetMark(seg, block_idx);
1590 }
1591
1592 // If we found a indirection to shortcut keep going.
1593 if (p_next) {
1594 p = p_next;
1595 goto try_again;
1596 }
1597
1598 done:
1599 if (origin != NULL && (!HEAP_ALLOCED(p) || bd->flags & BF_NONMOVING)) {
1600 if (UNTAG_CLOSURE((StgClosure*)p0) != p && *origin == p0) {
1601 if (cas((StgVolatilePtr)origin, (StgWord)p0, (StgWord)TAG_CLOSURE(tag, p)) == (StgWord)p0) {
1602 // debugBelch("Thunk optimization successful\n");
1603 }
1604 }
1605 }
1606 }
1607
1608 /* This is the main mark loop.
1609 * Invariants:
1610 *
1611 * a. nonmovingPrepareMark has been called.
1612 * b. the nursery has been fully evacuated into the non-moving generation.
1613 * c. the mark queue has been seeded with a set of roots.
1614 *
1615 */
1616 GNUC_ATTR_HOT void
1617 nonmovingMark (MarkQueue *queue)
1618 {
1619 traceConcMarkBegin();
1620 debugTrace(DEBUG_nonmoving_gc, "Starting mark pass");
1621 unsigned int count = 0;
1622 while (true) {
1623 count++;
1624 MarkQueueEnt ent = markQueuePop(queue);
1625
1626 switch (nonmovingMarkQueueEntryType(&ent)) {
1627 case MARK_CLOSURE:
1628 mark_closure(queue, ent.mark_closure.p, ent.mark_closure.origin);
1629 break;
1630 case MARK_ARRAY: {
1631 const StgMutArrPtrs *arr = ent.mark_array.array;
1632 StgWord start = ent.mark_array.start_index >> 16;
1633 StgWord end = start + MARK_ARRAY_CHUNK_LENGTH;
1634 if (end < arr->ptrs) {
1635 markQueuePushArray(queue, ent.mark_array.array, end);
1636 } else {
1637 end = arr->ptrs;
1638 }
1639 for (StgWord i = start; i < end; i++) {
1640 markQueuePushClosure_(queue, arr->payload[i]);
1641 }
1642 break;
1643 }
1644 case NULL_ENTRY:
1645 // Perhaps the update remembered set has more to mark...
1646 if (upd_rem_set_block_list) {
1647 ACQUIRE_LOCK(&upd_rem_set_lock);
1648 bdescr *old = queue->blocks;
1649 queue->blocks = upd_rem_set_block_list;
1650 queue->top = (MarkQueueBlock *) queue->blocks->start;
1651 upd_rem_set_block_list = NULL;
1652 RELEASE_LOCK(&upd_rem_set_lock);
1653
1654 ACQUIRE_SM_LOCK;
1655 freeGroup(old);
1656 RELEASE_SM_LOCK;
1657 } else {
1658 // Nothing more to do
1659 debugTrace(DEBUG_nonmoving_gc, "Finished mark pass: %d", count);
1660 traceConcMarkEnd(count);
1661 return;
1662 }
1663 }
1664 }
1665 }
1666
1667 // A variant of `isAlive` that works for non-moving heap. Used for:
1668 //
1669 // - Collecting weak pointers; checking key of a weak pointer.
1670 // - Resurrecting threads; checking if a thread is dead.
1671 // - Sweeping object lists: large_objects, mut_list, stable_name_table.
1672 //
1673 // This may only be used after a full mark but before nonmovingSweep as it
1674 // relies on the correctness of the next_free_snap and mark bitmaps.
1675 bool nonmovingIsAlive (StgClosure *p)
1676 {
1677 // Ignore static closures. See comments in `isAlive`.
1678 if (!HEAP_ALLOCED_GC(p)) {
1679 return true;
1680 }
1681
1682 bdescr *bd = Bdescr((P_)p);
1683
1684 // All non-static objects in the non-moving heap should be marked as
1685 // BF_NONMOVING
1686 ASSERT(bd->flags & BF_NONMOVING);
1687
1688 if (bd->flags & (BF_COMPACT | BF_LARGE)) {
1689 if (bd->flags & BF_COMPACT) {
1690 StgCompactNFData *str = objectGetCompact((StgClosure*)p);
1691 bd = Bdescr((P_)str);
1692 }
1693 return (bd->flags & BF_NONMOVING_SWEEPING) == 0
1694 // the large object wasn't in the snapshot and therefore wasn't marked
1695 || (bd->flags & BF_MARKED) != 0;
1696 // The object was marked
1697 } else {
1698 struct NonmovingSegment *seg = nonmovingGetSegment((StgPtr) p);
1699 nonmoving_block_idx i = nonmovingGetBlockIdx((StgPtr) p);
1700 uint8_t mark = nonmovingGetMark(seg, i);
1701 if (i >= nonmovingSegmentInfo(seg)->next_free_snap) {
1702 // If the object is allocated after next_free_snap then one of the
1703 // following must be true:
1704 //
1705 // * if its mark is 0 then the block was not allocated last time
1706 // the segment was swept; however, it may have been allocated since
1707 // then and therefore we must conclude that the block is alive.
1708 //
1709 // * if its mark is equal to nonmovingMarkEpoch then we found that
1710 // the object was alive in the snapshot of the current GC (recall
1711 // that this function may only be used after a mark).
1712 // Consequently we must conclude that the object is still alive.
1713 //
1714 // * if its mark is not equal to nonmovingMarkEpoch then we found
1715 // that the object was not reachable in the last snapshot.
1716 // Assuming that the mark is complete we can conclude that the
1717 // object is dead since the snapshot invariant guarantees that
1718 // all objects alive in the snapshot would be marked.
1719 //
1720 return mark == nonmovingMarkEpoch || mark == 0;
1721 } else {
1722 // If the object is below next_free_snap then the snapshot
1723 // invariant guarantees that it is marked if reachable.
1724 return mark == nonmovingMarkEpoch;
1725 }
1726 }
1727 }
1728
1729 // Check whether a snapshotted object is alive. That is for an object that we
1730 // know to be in the snapshot, is its mark bit set. It is imperative that the
1731 // object is in the snapshot (e.g. was in the nonmoving heap at the time that
1732 // the snapshot was taken) since we assume that its mark bit reflects its
1733 // reachability.
1734 //
1735 // This is used when
1736 //
1737 // - Collecting weak pointers; checking key of a weak pointer.
1738 // - Resurrecting threads; checking if a thread is dead.
1739 // - Sweeping object lists: large_objects, mut_list, stable_name_table.
1740 //
1741 static bool nonmovingIsNowAlive (StgClosure *p)
1742 {
1743 // Ignore static closures. See comments in `isAlive`.
1744 if (!HEAP_ALLOCED_GC(p)) {
1745 return true;
1746 }
1747
1748 bdescr *bd = Bdescr((P_)p);
1749
1750 // All non-static objects in the non-moving heap should be marked as
1751 // BF_NONMOVING
1752 ASSERT(bd->flags & BF_NONMOVING);
1753
1754 if (bd->flags & BF_LARGE) {
1755 return (bd->flags & BF_NONMOVING_SWEEPING) == 0
1756 // the large object wasn't in the snapshot and therefore wasn't marked
1757 || (bd->flags & BF_MARKED) != 0;
1758 // The object was marked
1759 } else {
1760 return nonmovingClosureMarkedThisCycle((P_)p);
1761 }
1762 }
1763
1764 // Non-moving heap variant of `tidyWeakList`
1765 bool nonmovingTidyWeaks (struct MarkQueue_ *queue)
1766 {
1767 bool did_work = false;
1768
1769 StgWeak **last_w = &nonmoving_old_weak_ptr_list;
1770 StgWeak *next_w;
1771 for (StgWeak *w = nonmoving_old_weak_ptr_list; w != NULL; w = next_w) {
1772 if (w->header.info == &stg_DEAD_WEAK_info) {
1773 // finalizeWeak# was called on the weak
1774 next_w = w->link;
1775 *last_w = next_w;
1776 continue;
1777 }
1778
1779 // Otherwise it's a live weak
1780 ASSERT(w->header.info == &stg_WEAK_info);
1781
1782 if (nonmovingIsNowAlive(w->key)) {
1783 nonmovingMarkLiveWeak(queue, w);
1784 did_work = true;
1785
1786 // remove this weak ptr from old_weak_ptr list
1787 *last_w = w->link;
1788 next_w = w->link;
1789
1790 // and put it on the weak ptr list
1791 w->link = nonmoving_weak_ptr_list;
1792 nonmoving_weak_ptr_list = w;
1793 } else {
1794 last_w = &(w->link);
1795 next_w = w->link;
1796 }
1797 }
1798
1799 return did_work;
1800 }
1801
1802 void nonmovingMarkDeadWeak (struct MarkQueue_ *queue, StgWeak *w)
1803 {
1804 if (w->cfinalizers != &stg_NO_FINALIZER_closure) {
1805 markQueuePushClosure_(queue, w->value);
1806 }
1807 markQueuePushClosure_(queue, w->finalizer);
1808 }
1809
1810 void nonmovingMarkLiveWeak (struct MarkQueue_ *queue, StgWeak *w)
1811 {
1812 ASSERT(nonmovingClosureMarkedThisCycle((P_)w));
1813 markQueuePushClosure_(queue, w->value);
1814 markQueuePushClosure_(queue, w->finalizer);
1815 markQueuePushClosure_(queue, w->cfinalizers);
1816 }
1817
1818 // When we're done with marking, any weak pointers with non-marked keys will be
1819 // considered "dead". We mark values and finalizers of such weaks, and then
1820 // schedule them for finalization in `scheduleFinalizers` (which we run during
1821 // synchronization).
1822 void nonmovingMarkDeadWeaks (struct MarkQueue_ *queue, StgWeak **dead_weaks)
1823 {
1824 StgWeak *next_w;
1825 for (StgWeak *w = nonmoving_old_weak_ptr_list; w; w = next_w) {
1826 ASSERT(!nonmovingClosureMarkedThisCycle((P_)(w->key)));
1827 nonmovingMarkDeadWeak(queue, w);
1828 next_w = w ->link;
1829 w->link = *dead_weaks;
1830 *dead_weaks = w;
1831 }
1832 }
1833
1834 // Non-moving heap variant of of `tidyThreadList`
1835 void nonmovingTidyThreads ()
1836 {
1837 StgTSO *next;
1838 StgTSO **prev = &nonmoving_old_threads;
1839 for (StgTSO *t = nonmoving_old_threads; t != END_TSO_QUEUE; t = next) {
1840
1841 next = t->global_link;
1842
1843 // N.B. This thread is in old_threads, consequently we *know* it is in
1844 // the snapshot and it is therefore safe to rely on the bitmap to
1845 // determine its reachability.
1846 if (nonmovingIsNowAlive((StgClosure*)t)) {
1847 // alive
1848 *prev = next;
1849
1850 // move this thread onto threads list
1851 t->global_link = nonmoving_threads;
1852 nonmoving_threads = t;
1853 } else {
1854 // not alive (yet): leave this thread on the old_threads list
1855 prev = &(t->global_link);
1856 }
1857 }
1858 }
1859
1860 void nonmovingResurrectThreads (struct MarkQueue_ *queue, StgTSO **resurrected_threads)
1861 {
1862 StgTSO *next;
1863 for (StgTSO *t = nonmoving_old_threads; t != END_TSO_QUEUE; t = next) {
1864 next = t->global_link;
1865
1866 switch (t->what_next) {
1867 case ThreadKilled:
1868 case ThreadComplete:
1869 continue;
1870 default:
1871 markQueuePushClosure_(queue, (StgClosure*)t);
1872 t->global_link = *resurrected_threads;
1873 *resurrected_threads = t;
1874 }
1875 }
1876 }
1877
1878 #if defined(DEBUG)
1879
1880 void printMarkQueueEntry (MarkQueueEnt *ent)
1881 {
1882 switch(nonmovingMarkQueueEntryType(ent)) {
1883 case MARK_CLOSURE:
1884 debugBelch("Closure: ");
1885 printClosure(ent->mark_closure.p);
1886 break;
1887 case MARK_ARRAY:
1888 debugBelch("Array\n");
1889 break;
1890 case NULL_ENTRY:
1891 debugBelch("End of mark\n");
1892 break;
1893 }
1894 }
1895
1896 void printMarkQueue (MarkQueue *q)
1897 {
1898 debugBelch("======== MARK QUEUE ========\n");
1899 for (bdescr *block = q->blocks; block; block = block->link) {
1900 MarkQueueBlock *queue = (MarkQueueBlock*)block->start;
1901 for (uint32_t i = 0; i < queue->head; ++i) {
1902 printMarkQueueEntry(&queue->entries[i]);
1903 }
1904 }
1905 debugBelch("===== END OF MARK QUEUE ====\n");
1906 }
1907
1908 #endif