rts: COMPACT_NFDATA support for the nonmoving collector
authorÖmer Sinan Ağacan <omeragacan@gmail.com>
Thu, 23 May 2019 10:32:42 +0000 (13:32 +0300)
committerBen Gamari <ben@smart-cactus.org>
Tue, 22 Oct 2019 22:56:32 +0000 (18:56 -0400)
This largely follows the model used for large objects, with appropriate
adjustments made to account for references in the sharing deduplication
hashtable.

rts/sm/CNF.c
rts/sm/Evac.c
rts/sm/NonMoving.c
rts/sm/NonMovingMark.c
rts/sm/NonMovingMark.h
rts/sm/NonMovingScav.c
rts/sm/NonMovingSweep.c
rts/sm/NonMovingSweep.h
rts/sm/Sanity.c
rts/sm/Scav.c
rts/sm/Scav.h

index f85b390..87d1d84 100644 (file)
@@ -276,7 +276,10 @@ compactFree(StgCompactNFData *str)
     for ( ; block; block = next) {
         next = block->next;
         bd = Bdescr((StgPtr)block);
-        ASSERT((bd->flags & BF_EVACUATED) == 0);
+        ASSERT(RtsFlags.GcFlags.useNonmoving || ((bd->flags & BF_EVACUATED) == 0));
+            // When using the non-moving collector we leave compact object
+            // evacuated to the oldset gen as BF_EVACUATED to avoid evacuating
+            // objects in the non-moving heap.
         freeGroup(bd);
     }
 }
index f8ee263..5a9cb15 100644 (file)
@@ -436,12 +436,22 @@ evacuate_compact (StgPtr p)
     bd = Bdescr((StgPtr)str);
     gen_no = bd->gen_no;
 
+    if (bd->flags & BF_NONMOVING) {
+        // We may have evacuated the block to the nonmoving generation. If so
+        // we need to make sure it is added to the mark queue since the only
+        // reference to it may be from the moving heap.
+        if (major_gc && !deadlock_detect_gc)
+            markQueuePushClosureGC(&gct->cap->upd_rem_set.queue, (StgClosure *) str);
+        return;
+    }
+
     // already evacuated? (we're about to do the same check,
     // but we avoid taking the spin-lock)
     if (bd->flags & BF_EVACUATED) {
         /* Don't forget to set the gct->failed_to_evac flag if we didn't get
          * the desired destination (see comments in evacuate()).
          */
+        debugTrace(DEBUG_compact, "Compact %p already evacuated", str);
         if (gen_no < gct->evac_gen_no) {
             gct->failed_to_evac = true;
             TICK_GC_FAILED_PROMOTION();
@@ -490,9 +500,15 @@ evacuate_compact (StgPtr p)
     // for that - the only code touching the generation of the block is
     // in the GC, and that should never see blocks other than the first)
     bd->flags |= BF_EVACUATED;
+    if (RtsFlags.GcFlags.useNonmoving && new_gen == oldest_gen) {
+        bd->flags |= BF_NONMOVING;
+    }
     initBdescr(bd, new_gen, new_gen->to);
 
     if (str->hash) {
+        // If there is a hash-table for sharing preservation then we need to add
+        // the compact to the scavenging work list to ensure that the hashtable
+        // is scavenged.
         gen_workspace *ws = &gct->gens[new_gen_no];
         bd->link = ws->todo_large_objects;
         ws->todo_large_objects = bd;
@@ -658,13 +674,6 @@ loop:
       // they are not)
       if (bd->flags & BF_COMPACT) {
           evacuate_compact((P_)q);
-
-          // We may have evacuated the block to the nonmoving generation. If so
-          // we need to make sure it is added to the mark queue since the only
-          // reference to it may be from the moving heap.
-          if (major_gc && bd->flags & BF_NONMOVING && !deadlock_detect_gc) {
-              markQueuePushClosureGC(&gct->cap->upd_rem_set.queue, q);
-          }
           return;
       }
 
index 8174fa7..9a6db9b 100644 (file)
@@ -750,6 +750,22 @@ static void nonmovingPrepareMark(void)
     oldest_gen->n_large_blocks = 0;
     nonmoving_live_words = 0;
 
+    // Move new compact objects from younger generations to nonmoving_compact_objects
+    for (bdescr *bd = oldest_gen->compact_objects; bd; bd = next) {
+        next = bd->link;
+        bd->flags |= BF_NONMOVING_SWEEPING;
+        dbl_link_onto(bd, &nonmoving_compact_objects);
+    }
+    n_nonmoving_compact_blocks += oldest_gen->n_compact_blocks;
+    oldest_gen->n_compact_blocks = 0;
+    oldest_gen->compact_objects = NULL;
+    // TODO (osa): what about "in import" stuff??
+
+    // Clear compact object mark bits
+    for (bdescr *bd = nonmoving_compact_objects; bd; bd = bd->link) {
+        bd->flags &= ~BF_MARKED;
+    }
+
     // Clear large object bits
     for (bdescr *bd = nonmoving_large_objects; bd; bd = bd->link) {
         bd->flags &= ~BF_MARKED;
@@ -810,6 +826,8 @@ void nonmovingCollect(StgWeak **dead_weaks, StgTSO **resurrected_threads)
     // N.B. These should have been cleared at the end of the last sweep.
     ASSERT(nonmoving_marked_large_objects == NULL);
     ASSERT(n_nonmoving_marked_large_blocks == 0);
+    ASSERT(nonmoving_marked_compact_objects == NULL);
+    ASSERT(n_nonmoving_marked_compact_blocks == 0);
 
     MarkQueue *mark_queue = stgMallocBytes(sizeof(MarkQueue), "mark queue");
     initMarkQueue(mark_queue);
@@ -1055,6 +1073,7 @@ static void nonmovingMark_(MarkQueue *mark_queue, StgWeak **dead_weaks, StgTSO *
     // collect them in a map in mark_queue and we pass it here to sweep large
     // objects
     nonmovingSweepLargeObjects();
+    nonmovingSweepCompactObjects();
     nonmovingSweepStableNameTable();
 
     nonmovingSweep();
index 54398a7..03e3428 100644 (file)
@@ -24,6 +24,7 @@
 #include "STM.h"
 #include "MarkWeak.h"
 #include "sm/Storage.h"
+#include "CNF.h"
 
 static void mark_closure (MarkQueue *queue, const StgClosure *p, StgClosure **origin);
 static void mark_tso (MarkQueue *queue, StgTSO *tso);
@@ -68,6 +69,12 @@ bdescr *nonmoving_large_objects = NULL;
 bdescr *nonmoving_marked_large_objects = NULL;
 memcount n_nonmoving_large_blocks = 0;
 memcount n_nonmoving_marked_large_blocks = 0;
+
+bdescr *nonmoving_compact_objects = NULL;
+bdescr *nonmoving_marked_compact_objects = NULL;
+memcount n_nonmoving_compact_blocks = 0;
+memcount n_nonmoving_marked_compact_blocks = 0;
+
 #if defined(THREADED_RTS)
 /* Protects everything above. Furthermore, we only set the BF_MARKED bit of
  * large object blocks when this is held. This ensures that the write barrier
@@ -75,6 +82,9 @@ memcount n_nonmoving_marked_large_blocks = 0;
  * move the same large object to nonmoving_marked_large_objects more than once.
  */
 static Mutex nonmoving_large_objects_mutex;
+// Note that we don't need a similar lock for compact objects becuase we never
+// mark a compact object eagerly in a write barrier; all compact objects are
+// marked by the mark thread, so there can't be any races here.
 #endif
 
 /*
@@ -1246,9 +1256,22 @@ mark_closure (MarkQueue *queue, const StgClosure *p0, StgClosure **origin)
 
     ASSERT(!IS_FORWARDING_PTR(p->header.info));
 
-    if (bd->flags & BF_NONMOVING) {
+    // N.B. only the first block of a compact region is guaranteed to carry
+    // BF_NONMOVING; conseqently we must separately check for BF_COMPACT.
+    if (bd->flags & (BF_COMPACT | BF_NONMOVING)) {
 
-        if (bd->flags & BF_LARGE) {
+        if (bd->flags & BF_COMPACT) {
+            StgCompactNFData *str = objectGetCompact((StgClosure*)p);
+            bd = Bdescr((P_)str);
+
+            if (! (bd->flags & BF_NONMOVING_SWEEPING)) {
+                // Not in the snapshot
+                return;
+            }
+            if (bd->flags & BF_MARKED) {
+                goto done;
+            }
+        } else if (bd->flags & BF_LARGE) {
             if (! (bd->flags & BF_NONMOVING_SWEEPING)) {
                 // Not in the snapshot
                 goto done;
@@ -1569,6 +1592,9 @@ mark_closure (MarkQueue *queue, const StgClosure *p0, StgClosure **origin)
         while (get_volatile_itbl(p)->type == WHITEHOLE);
         goto try_again;
 
+    case COMPACT_NFDATA:
+        break;
+
     default:
         barf("mark_closure: unimplemented/strange closure type %d @ %p",
              info->type, p);
@@ -1580,7 +1606,15 @@ mark_closure (MarkQueue *queue, const StgClosure *p0, StgClosure **origin)
      * the object's pointers since in the case of marking stacks there may be a
      * mutator waiting for us to finish so it can start execution.
      */
-    if (bd->flags & BF_LARGE) {
+    if (bd->flags & BF_COMPACT) {
+        StgCompactNFData *str = objectGetCompact((StgClosure*)p);
+        dbl_link_remove(bd, &nonmoving_compact_objects);
+        dbl_link_onto(bd, &nonmoving_marked_compact_objects);
+        StgWord blocks = str->totalW / BLOCK_SIZE_W;
+        n_nonmoving_compact_blocks -= blocks;
+        n_nonmoving_marked_compact_blocks += blocks;
+        bd->flags |= BF_MARKED;
+    } else if (bd->flags & BF_LARGE) {
         /* Marking a large object isn't idempotent since we move it to
          * nonmoving_marked_large_objects; to ensure that we don't repeatedly
          * mark a large object, we only set BF_MARKED on large objects in the
@@ -1701,7 +1735,11 @@ bool nonmovingIsAlive (StgClosure *p)
     // BF_NONMOVING
     ASSERT(bd->flags & BF_NONMOVING);
 
-    if (bd->flags & BF_LARGE) {
+    if (bd->flags & (BF_COMPACT | BF_LARGE)) {
+        if (bd->flags & BF_COMPACT) {
+            StgCompactNFData *str = objectGetCompact((StgClosure*)p);
+            bd = Bdescr((P_)str);
+        }
         return (bd->flags & BF_NONMOVING_SWEEPING) == 0
                    // the large object wasn't in the snapshot and therefore wasn't marked
             || (bd->flags & BF_MARKED) != 0;
index 806776c..fe150f4 100644 (file)
@@ -128,8 +128,10 @@ typedef struct {
 // The length of MarkQueueBlock.entries
 #define MARK_QUEUE_BLOCK_ENTRIES ((MARK_QUEUE_BLOCKS * BLOCK_SIZE - sizeof(MarkQueueBlock)) / sizeof(MarkQueueEnt))
 
-extern bdescr *nonmoving_large_objects, *nonmoving_marked_large_objects;
-extern memcount n_nonmoving_large_blocks, n_nonmoving_marked_large_blocks;
+extern bdescr *nonmoving_large_objects, *nonmoving_marked_large_objects,
+              *nonmoving_compact_objects, *nonmoving_marked_compact_objects;
+extern memcount n_nonmoving_large_blocks, n_nonmoving_marked_large_blocks,
+                n_nonmoving_compact_blocks, n_nonmoving_marked_compact_blocks;
 
 extern StgTSO *nonmoving_old_threads;
 extern StgWeak *nonmoving_old_weak_ptr_list;
index 0efbde6..9583c7b 100644 (file)
@@ -337,6 +337,10 @@ nonmovingScavengeOne (StgClosure *q)
         evacuate(&((StgInd *)p)->indirectee);
         break;
 
+    case COMPACT_NFDATA:
+        scavenge_compact((StgCompactNFData*)p);
+        break;
+
     default:
         barf("nonmoving scavenge: unimplemented/strange closure type %d @ %p",
              info->type, p);
index 3ee27ef..31a9041 100644 (file)
@@ -16,6 +16,7 @@
 #include "Storage.h"
 #include "Trace.h"
 #include "StableName.h"
+#include "CNF.h" // compactFree
 
 // On which list should a particular segment be placed?
 enum SweepResult {
@@ -301,6 +302,21 @@ void nonmovingSweepLargeObjects()
     n_nonmoving_marked_large_blocks = 0;
 }
 
+void nonmovingSweepCompactObjects()
+{
+    bdescr *next;
+    ACQUIRE_SM_LOCK;
+    for (bdescr *bd = nonmoving_compact_objects; bd; bd = next) {
+        next = bd->link;
+        compactFree(((StgCompactNFDataBlock*)bd->start)->owner);
+    }
+    RELEASE_SM_LOCK;
+    nonmoving_compact_objects = nonmoving_marked_compact_objects;
+    n_nonmoving_compact_blocks = n_nonmoving_marked_compact_blocks;
+    nonmoving_marked_compact_objects = NULL;
+    n_nonmoving_marked_compact_blocks = 0;
+}
+
 // Helper for nonmovingSweepStableNameTable. Essentially nonmovingIsAlive,
 // but works when the object died in moving heap, see
 // nonmovingSweepStableNameTable
index 5ae5b68..24e9ecc 100644 (file)
@@ -19,6 +19,9 @@ void nonmovingSweepMutLists(void);
 // Remove unmarked entries in oldest generation scavenged_large_objects list
 void nonmovingSweepLargeObjects(void);
 
+// Remove unmarked entries in oldest generation compact_objects list
+void nonmovingSweepCompactObjects(void);
+
 // Remove dead entries in the stable name table
 void nonmovingSweepStableNameTable(void);
 
index 99671e9..23f0fc5 100644 (file)
@@ -820,9 +820,26 @@ static void checkGeneration (generation *gen,
 #endif
 
     if (RtsFlags.GcFlags.useNonmoving && gen == oldest_gen) {
+        ASSERT(countNonMovingSegments(nonmovingHeap.free) == (W_) nonmovingHeap.n_free * NONMOVING_SEGMENT_BLOCKS);
         ASSERT(countBlocks(nonmoving_large_objects) == n_nonmoving_large_blocks);
         ASSERT(countBlocks(nonmoving_marked_large_objects) == n_nonmoving_marked_large_blocks);
-        ASSERT(countNonMovingSegments(nonmovingHeap.free) == (W_) nonmovingHeap.n_free * NONMOVING_SEGMENT_BLOCKS);
+
+        // Compact regions
+        // Accounting here is tricky due to the fact that the CNF allocation
+        // code modifies generation->n_compact_blocks directly. However, most
+        // objects being swept by the nonmoving GC are tracked in
+        // nonmoving_*_compact_objects. Consequently we can only maintain a very loose
+        // sanity invariant here.
+        uint32_t counted_cnf_blocks = 0;
+        counted_cnf_blocks += countCompactBlocks(nonmoving_marked_compact_objects);
+        counted_cnf_blocks += countCompactBlocks(nonmoving_compact_objects);
+        counted_cnf_blocks += countCompactBlocks(oldest_gen->compact_objects);
+
+        uint32_t total_cnf_blocks = 0;
+        total_cnf_blocks += n_nonmoving_compact_blocks + oldest_gen->n_compact_blocks;
+        total_cnf_blocks += n_nonmoving_marked_compact_blocks;
+
+        ASSERT(counted_cnf_blocks == total_cnf_blocks);
     }
 
     checkHeapChain(gen->blocks);
@@ -919,6 +936,8 @@ findMemoryLeak (void)
         markBlocks(upd_rem_set_block_list);
         markBlocks(nonmoving_large_objects);
         markBlocks(nonmoving_marked_large_objects);
+        markBlocks(nonmoving_compact_objects);
+        markBlocks(nonmoving_marked_compact_objects);
         for (i = 0; i < NONMOVING_ALLOCA_CNT; i++) {
             struct NonmovingAllocator *alloc = nonmovingHeap.allocators[i];
             markNonMovingSegments(alloc->filled);
@@ -997,17 +1016,19 @@ genBlocks (generation *gen)
         ASSERT(countNonMovingHeap(&nonmovingHeap) == gen->n_blocks);
         ret += countAllocdBlocks(nonmoving_large_objects);
         ret += countAllocdBlocks(nonmoving_marked_large_objects);
+        ret += countAllocdCompactBlocks(nonmoving_compact_objects);
+        ret += countAllocdCompactBlocks(nonmoving_marked_compact_objects);
         ret += countNonMovingHeap(&nonmovingHeap);
         if (current_mark_queue)
             ret += countBlocks(current_mark_queue->blocks);
     } else {
         ASSERT(countBlocks(gen->blocks) == gen->n_blocks);
+        ASSERT(countCompactBlocks(gen->compact_objects) == gen->n_compact_blocks);
+        ASSERT(countCompactBlocks(gen->compact_blocks_in_import) == gen->n_compact_blocks_in_import);
         ret += gen->n_blocks;
     }
 
     ASSERT(countBlocks(gen->large_objects) == gen->n_large_blocks);
-    ASSERT(countCompactBlocks(gen->compact_objects) == gen->n_compact_blocks);
-    ASSERT(countCompactBlocks(gen->compact_blocks_in_import) == gen->n_compact_blocks_in_import);
 
     ret += gen->n_old_blocks +
         countAllocdBlocks(gen->large_objects) +
index cac9ca1..501d958 100644 (file)
@@ -84,6 +84,7 @@ static void scavenge_large_bitmap (StgPtr p,
 # define scavenge_mut_arr_ptrs(info) scavenge_mut_arr_ptrs1(info)
 # define scavenge_PAP(pap) scavenge_PAP1(pap)
 # define scavenge_AP(ap) scavenge_AP1(ap)
+# define scavenge_compact(str) scavenge_compact1(str)
 #endif
 
 static void do_evacuate(StgClosure **p, void *user STG_UNUSED)
@@ -173,7 +174,10 @@ evacuate_hash_entry(MapHashData *dat, StgWord key, const void *value)
     SET_GCT(old_gct);
 }
 
-static void
+/* Here we scavenge the sharing-preservation hash-table, which may contain keys
+ * living in from-space.
+ */
+void
 scavenge_compact(StgCompactNFData *str)
 {
     bool saved_eager;
index 644aa32..94250bc 100644 (file)
@@ -24,6 +24,7 @@ void    scavenge_thunk_srt (const StgInfoTable *info);
 StgPtr  scavenge_mut_arr_ptrs (StgMutArrPtrs *a);
 StgPtr  scavenge_PAP (StgPAP *pap);
 StgPtr  scavenge_AP (StgAP *ap);
+void    scavenge_compact (StgCompactNFData *str);
 
 #if defined(THREADED_RTS)
 void    scavenge_loop1 (void);
@@ -35,6 +36,7 @@ void    scavenge_thunk_srt1 (const StgInfoTable *info);
 StgPtr  scavenge_mut_arr_ptrs1 (StgMutArrPtrs *a);
 StgPtr  scavenge_PAP1 (StgPAP *pap);
 StgPtr  scavenge_AP1 (StgAP *ap);
+void    scavenge_compact1 (StgCompactNFData *str);
 #endif
 
 #include "EndPrivate.h"