Eliminate atomic_inc_by and instead medofiy atomic_inc.
[ghc.git] / rts / Sparks.c
index a17b78c..4241656 100644 (file)
 
 #if defined(THREADED_RTS)
 
-void
-initSparkPools( void )
+SparkPool *
+allocSparkPool( void )
 {
-    /* walk over the capabilities, allocating a spark pool for each one */
-    nat i;
-    for (i = 0; i < n_capabilities; i++) {
-      capabilities[i].sparks = newWSDeque(RtsFlags.ParFlags.maxLocalSparks);
-    }
+    return newWSDeque(RtsFlags.ParFlags.maxLocalSparks);
 }
 
 void
@@ -45,9 +41,9 @@ createSparkThread (Capability *cap)
     StgTSO *tso;
 
     tso = createIOThread (cap, RtsFlags.GcFlags.initialStkSize, 
-                          &base_GHCziConc_runSparks_closure);
+                          (StgClosure *)runSparks_closure);
 
-    traceSchedEvent(cap, EVENT_CREATE_SPARK_THREAD, 0, tso->id);
+    traceEventCreateSparkThread(cap, tso->id);
 
     appendToRunQueue(cap,tso);
 }
@@ -63,47 +59,23 @@ newSpark (StgRegTable *reg, StgClosure *p)
     Capability *cap = regTableToCapability(reg);
     SparkPool *pool = cap->sparks;
 
-    /* I am not sure whether this is the right thing to do.
-     * Maybe it is better to exploit the tag information
-     * instead of throwing it away?
-     */
-    p = UNTAG_CLOSURE(p);
-
-    if (closure_SHOULD_SPARK(p)) {
-        pushWSDeque(pool,p);
-    }  
-
-    cap->sparks_created++;
+    if (!fizzledSpark(p)) {
+        if (pushWSDeque(pool,p)) {
+            cap->spark_stats.created++;
+            traceEventSparkCreate(cap);
+        } else {
+            /* overflowing the spark pool */
+            cap->spark_stats.overflowed++;
+            traceEventSparkOverflow(cap);
+       }
+    } else {
+        cap->spark_stats.dud++;
+        traceEventSparkDud(cap);
+    }
 
     return 1;
 }
 
-/* -----------------------------------------------------------------------------
- * 
- * tryStealSpark: try to steal a spark from a Capability.
- *
- * Returns a valid spark, or NULL if the pool was empty, and can
- * occasionally return NULL if there was a race with another thread
- * stealing from the same pool.  In this case, try again later.
- *
- -------------------------------------------------------------------------- */
-
-StgClosure *
-tryStealSpark (Capability *cap)
-{
-  SparkPool *pool = cap->sparks;
-  StgClosure *stolen;
-
-  do { 
-      stolen = stealWSDeque_(pool); 
-      // use the no-loopy version, stealWSDeque_(), since if we get a
-      // spurious NULL here the caller may want to try stealing from
-      // other pools before trying again.
-  } while (stolen != NULL && !closure_SHOULD_SPARK(stolen));
-
-  return stolen;
-}
-
 /* --------------------------------------------------------------------------
  * Remove all sparks from the spark queues which should not spark any
  * more.  Called after GC. We assume exclusive access to the structure
@@ -112,7 +84,7 @@ tryStealSpark (Capability *cap)
  * -------------------------------------------------------------------------- */
 
 void
-pruneSparkQueue (evac_fn evac, void *user, Capability *cap)
+pruneSparkQueue (Capability *cap)
 { 
     SparkPool *pool;
     StgClosurePtr spark, tmp, *elements;
@@ -196,29 +168,65 @@ pruneSparkQueue (evac_fn evac, void *user, Capability *cap)
       // We have to be careful here: in the parallel GC, another
       // thread might evacuate this closure while we're looking at it,
       // so grab the info pointer just once.
-      info = spark->header.info;
-      if (IS_FORWARDING_PTR(info)) {
-          tmp = (StgClosure*)UN_FORWARDING_PTR(info);
-          /* if valuable work: shift inside the pool */
-          if (closure_SHOULD_SPARK(tmp)) {
-              elements[botInd] = tmp; // keep entry (new address)
-              botInd++;
-              n++;
-          } else {
-              pruned_sparks++; // discard spark
-              cap->sparks_pruned++;
-          }
+      if (GET_CLOSURE_TAG(spark) != 0) {
+          // Tagged pointer is a value, so the spark has fizzled.  It
+          // probably never happens that we get a tagged pointer in
+          // the spark pool, because we would have pruned the spark
+          // during the previous GC cycle if it turned out to be
+          // evaluated, but it doesn't hurt to have this check for
+          // robustness.
+          pruned_sparks++;
+          cap->spark_stats.fizzled++;
+          traceEventSparkFizzle(cap);
       } else {
-          if (!(closure_flags[INFO_PTR_TO_STRUCT(info)->type] & _NS)) {
-              elements[botInd] = spark; // keep entry (new address)
-              evac (user, &elements[botInd]);
-              botInd++;
-              n++;
+          info = spark->header.info;
+          if (IS_FORWARDING_PTR(info)) {
+              tmp = (StgClosure*)UN_FORWARDING_PTR(info);
+              /* if valuable work: shift inside the pool */
+              if (closure_SHOULD_SPARK(tmp)) {
+                  elements[botInd] = tmp; // keep entry (new address)
+                  botInd++;
+                  n++;
+              } else {
+                  pruned_sparks++; // discard spark
+                  cap->spark_stats.fizzled++;
+                  traceEventSparkFizzle(cap);
+              }
+          } else if (HEAP_ALLOCED(spark)) {
+              if ((Bdescr((P_)spark)->flags & BF_EVACUATED)) {
+                  if (closure_SHOULD_SPARK(spark)) {
+                      elements[botInd] = spark; // keep entry (new address)
+                      botInd++;
+                      n++;
+                  } else {
+                      pruned_sparks++; // discard spark
+                      cap->spark_stats.fizzled++;
+                      traceEventSparkFizzle(cap);
+                  }
+              } else {
+                  pruned_sparks++; // discard spark
+                  cap->spark_stats.gcd++;
+                  traceEventSparkGC(cap);
+              }
           } else {
-              pruned_sparks++; // discard spark
-              cap->sparks_pruned++;
+              if (INFO_PTR_TO_STRUCT(info)->type == THUNK_STATIC) {
+                  if (*THUNK_STATIC_LINK(spark) != NULL) {
+                      elements[botInd] = spark; // keep entry (new address)
+                      botInd++;
+                      n++;
+                  } else {
+                      pruned_sparks++; // discard spark
+                      cap->spark_stats.gcd++;
+                      traceEventSparkGC(cap);
+                  }
+              } else {
+                  pruned_sparks++; // discard spark
+                  cap->spark_stats.fizzled++;
+                  traceEventSparkFizzle(cap);
+              }
           }
       }
+
       currInd++;
 
       // in the loop, we may reach the bounds, and instantly wrap around