Add assertion of the invariant for the spark counters
authorDuncan Coutts <duncan@well-typed.com>
Thu, 2 Jun 2011 16:28:56 +0000 (17:28 +0100)
committerDuncan Coutts <duncan@well-typed.com>
Mon, 18 Jul 2011 15:31:14 +0000 (16:31 +0100)
The invariant is: created = converted + remaining + gcd + fizzled
Since sparks move between capabilities, we have to aggregate the
counters over all capabilities. This in turn means we can only check
the invariant at stable points where all but one capabilities are
stopped. We can do this at shutdown time and before and after a global
synchronised GC.

rts/Capability.c
rts/Capability.h
rts/Schedule.c

index d93c9c1..410d3d0 100644 (file)
@@ -843,6 +843,10 @@ shutdownCapabilities(Task *task, rtsBool safe)
         shutdownCapability(&capabilities[i], task, safe);
     }
     traceCapsetDelete(CAPSET_OSPROCESS_DEFAULT);
+
+#if defined(THREADED_RTS)
+    ASSERT(checkSparkCountInvariant());
+#endif
 }
 
 static void
@@ -913,3 +917,34 @@ markCapabilities (evac_fn evac, void *user)
         markCapability(evac, user, &capabilities[n], rtsFalse);
     }
 }
+
+#if defined(THREADED_RTS)
+rtsBool checkSparkCountInvariant (void)
+{
+    SparkCounters sparks = { 0, 0, 0, 0, 0, 0 };
+    StgWord64 remaining = 0;
+    nat i;
+
+    for (i = 0; i < n_capabilities; i++) {
+        sparks.created   += capabilities[i].spark_stats.created;
+        sparks.dud       += capabilities[i].spark_stats.dud;
+        sparks.overflowed+= capabilities[i].spark_stats.overflowed;
+        sparks.converted += capabilities[i].spark_stats.converted;
+        sparks.gcd       += capabilities[i].spark_stats.gcd;
+        sparks.fizzled   += capabilities[i].spark_stats.fizzled;
+        remaining        += sparkPoolSize(capabilities[i].sparks);
+    }
+    
+    /* The invariant is
+     *   created = converted + remaining + gcd + fizzled
+     */
+    debugTrace(DEBUG_sparks,"spark invariant: %ld == %ld + %ld + %ld + %ld "
+                            "(created == converted + remaining + gcd + fizzled)",
+                            sparks.created, sparks.converted, remaining,
+                            sparks.gcd, sparks.fizzled);
+
+    return (sparks.created ==
+              sparks.converted + remaining + sparks.gcd + sparks.fizzled);
+
+}
+#endif
index 2037989..10c7c49 100644 (file)
@@ -139,6 +139,10 @@ struct Capability_ {
   ASSERT(myTask() == task);                            \
   ASSERT_TASK_ID(task);
 
+#if defined(THREADED_RTS)
+rtsBool checkSparkCountInvariant (void);
+#endif
+
 // Converts a *StgRegTable into a *Capability.
 //
 INLINE_HEADER Capability *
index 125f9f0..5c94e20 100644 (file)
@@ -1414,6 +1414,11 @@ scheduleDoGC (Capability *cap, Task *task USED_IF_THREADS, rtsBool force_major)
         // multi-threaded GC: make sure all the Capabilities donate one
         // GC thread each.
         waitForGcThreads(cap);
+        
+#if defined(THREADED_RTS)
+        // Stable point where we can do a global check on our spark counters
+        ASSERT(checkSparkCountInvariant());
+#endif
     }
 
 #endif
@@ -1461,6 +1466,11 @@ delete_threads_and_gc:
         recent_activity = ACTIVITY_YES;
     }
 
+#if defined(THREADED_RTS)
+    // Stable point where we can do a global check on our spark counters
+    ASSERT(checkSparkCountInvariant());
+#endif
+
     if (heap_census) {
         debugTrace(DEBUG_sched, "performing heap census");
         heapCensus();