Generalise `Control.Monad.{when,unless,guard}`
[ghc.git] / rts / RaiseAsync.c
index bebbcd4..7da3e64 100644 (file)
 #include "win32/IOManager.h"
 #endif
 
 #include "win32/IOManager.h"
 #endif
 
-static void raiseAsync (Capability *cap,
-                       StgTSO *tso,
-                       StgClosure *exception, 
-                       rtsBool stop_at_atomically,
-                       StgUpdateFrame *stop_here);
+static StgTSO* raiseAsync (Capability *cap,
+                           StgTSO *tso,
+                           StgClosure *exception,
+                           rtsBool stop_at_atomically,
+                           StgUpdateFrame *stop_here);
 
 static void removeFromQueues(Capability *cap, StgTSO *tso);
 
 static void removeFromMVarBlockedQueue (StgTSO *tso);
 
 
 static void removeFromQueues(Capability *cap, StgTSO *tso);
 
 static void removeFromMVarBlockedQueue (StgTSO *tso);
 
-static void blockedThrowTo (Capability *cap, 
-                            StgTSO *target, MessageThrowTo *msg);
-
 static void throwToSendMsg (Capability *cap USED_IF_THREADS,
                             Capability *target_cap USED_IF_THREADS, 
                             MessageThrowTo *msg USED_IF_THREADS);
 static void throwToSendMsg (Capability *cap USED_IF_THREADS,
                             Capability *target_cap USED_IF_THREADS, 
                             MessageThrowTo *msg USED_IF_THREADS);
@@ -57,43 +54,38 @@ static void throwToSendMsg (Capability *cap USED_IF_THREADS,
    has been raised.
    -------------------------------------------------------------------------- */
 
    has been raised.
    -------------------------------------------------------------------------- */
 
-void
-throwToSingleThreaded(Capability *cap, StgTSO *tso, StgClosure *exception)
-{
-    throwToSingleThreaded_(cap, tso, exception, rtsFalse);
-}
-
-void
-throwToSingleThreaded_(Capability *cap, StgTSO *tso, StgClosure *exception, 
-                      rtsBool stop_at_atomically)
+static void
+throwToSingleThreaded__ (Capability *cap, StgTSO *tso, StgClosure *exception, 
+                         rtsBool stop_at_atomically, StgUpdateFrame *stop_here)
 {
 {
-    tso = deRefTSO(tso);
-
     // Thread already dead?
     if (tso->what_next == ThreadComplete || tso->what_next == ThreadKilled) {
     // Thread already dead?
     if (tso->what_next == ThreadComplete || tso->what_next == ThreadKilled) {
-       return;
+        return;
     }
 
     // Remove it from any blocking queues
     removeFromQueues(cap,tso);
 
     }
 
     // Remove it from any blocking queues
     removeFromQueues(cap,tso);
 
-    raiseAsync(cap, tso, exception, stop_at_atomically, NULL);
+    raiseAsync(cap, tso, exception, stop_at_atomically, stop_here);
 }
 
 void
 }
 
 void
-suspendComputation(Capability *cap, StgTSO *tso, StgUpdateFrame *stop_here)
+throwToSingleThreaded (Capability *cap, StgTSO *tso, StgClosure *exception)
 {
 {
-    tso = deRefTSO(tso);
-
-    // Thread already dead?
-    if (tso->what_next == ThreadComplete || tso->what_next == ThreadKilled) {
-       return;
-    }
+    throwToSingleThreaded__(cap, tso, exception, rtsFalse, NULL);
+}
 
 
-    // Remove it from any blocking queues
-    removeFromQueues(cap,tso);
+void
+throwToSingleThreaded_ (Capability *cap, StgTSO *tso, StgClosure *exception,
+                        rtsBool stop_at_atomically)
+{
+    throwToSingleThreaded__ (cap, tso, exception, stop_at_atomically, NULL);
+}
 
 
-    raiseAsync(cap, tso, NULL, rtsFalse, stop_here);
+void // cannot return a different TSO
+suspendComputation (Capability *cap, StgTSO *tso, StgUpdateFrame *stop_here)
+{
+    throwToSingleThreaded__ (cap, tso, NULL, rtsFalse, stop_here);
 }
 
 /* -----------------------------------------------------------------------------
 }
 
 /* -----------------------------------------------------------------------------
@@ -127,7 +119,7 @@ suspendComputation(Capability *cap, StgTSO *tso, StgUpdateFrame *stop_here)
    Capability, and it is
 
      - NotBlocked, BlockedOnMsgThrowTo,
    Capability, and it is
 
      - NotBlocked, BlockedOnMsgThrowTo,
-       BlockedOnCCall
+       BlockedOnCCall_Interruptible
 
      - or it is masking exceptions (TSO_BLOCKEX)
 
 
      - or it is masking exceptions (TSO_BLOCKEX)
 
@@ -161,8 +153,7 @@ throwTo (Capability *cap,   // the Capability we hold
     MessageThrowTo *msg;
 
     msg = (MessageThrowTo *) allocate(cap, sizeofW(MessageThrowTo));
     MessageThrowTo *msg;
 
     msg = (MessageThrowTo *) allocate(cap, sizeofW(MessageThrowTo));
-    // message starts locked; the caller has to unlock it when it is
-    // ready.
+    // the message starts locked; see below
     SET_HDR(msg, &stg_WHITEHOLE_info, CCS_SYSTEM);
     msg->source      = source;
     msg->target      = target;
     SET_HDR(msg, &stg_WHITEHOLE_info, CCS_SYSTEM);
     msg->source      = source;
     msg->target      = target;
@@ -171,9 +162,16 @@ throwTo (Capability *cap,  // the Capability we hold
     switch (throwToMsg(cap, msg))
     {
     case THROWTO_SUCCESS:
     switch (throwToMsg(cap, msg))
     {
     case THROWTO_SUCCESS:
+        // unlock the message now, otherwise we leave a WHITEHOLE in
+        // the heap (#6103)
+        SET_HDR(msg, &stg_MSG_THROWTO_info, CCS_SYSTEM);
         return NULL;
         return NULL;
+
     case THROWTO_BLOCKED:
     default:
     case THROWTO_BLOCKED:
     default:
+        // the caller will unlock the message when it is ready.  We
+        // cannot unlock it yet, because the calling thread will need
+        // to tidy up its state first.
         return msg;
     }
 }
         return msg;
     }
 }
@@ -195,9 +193,6 @@ retry:
 check_target:
     ASSERT(target != END_TSO_QUEUE);
 
 check_target:
     ASSERT(target != END_TSO_QUEUE);
 
-    // follow ThreadRelocated links in the target first
-    target = deRefTSO(target);
-
     // Thread already dead?
     if (target->what_next == ThreadComplete 
        || target->what_next == ThreadKilled) {
     // Thread already dead?
     if (target->what_next == ThreadComplete 
        || target->what_next == ThreadKilled) {
@@ -289,13 +284,14 @@ check_target:
         }
 
         // nobody else can wake up this TSO after we claim the message
         }
 
         // nobody else can wake up this TSO after we claim the message
-        unlockClosure((StgClosure*)m, &stg_MSG_NULL_info);
+        doneWithMsgThrowTo(m);
 
         raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
         return THROWTO_SUCCESS;
     }
 
     case BlockedOnMVar:
 
         raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
         return THROWTO_SUCCESS;
     }
 
     case BlockedOnMVar:
+    case BlockedOnMVarRead:
     {
        /*
          To establish ownership of this TSO, we need to acquire a
     {
        /*
          To establish ownership of this TSO, we need to acquire a
@@ -308,7 +304,7 @@ check_target:
 
        // ASSUMPTION: tso->block_info must always point to a
        // closure.  In the threaded RTS it does.
 
        // ASSUMPTION: tso->block_info must always point to a
        // closure.  In the threaded RTS it does.
-        switch (get_itbl(mvar)->type) {
+        switch (get_itbl((StgClosure *)mvar)->type) {
         case MVAR_CLEAN:
         case MVAR_DIRTY:
             break;
         case MVAR_CLEAN:
         case MVAR_DIRTY:
             break;
@@ -318,14 +314,9 @@ check_target:
 
        info = lockClosure((StgClosure *)mvar);
 
 
        info = lockClosure((StgClosure *)mvar);
 
-       if (target->what_next == ThreadRelocated) {
-           target = target->_link;
-           unlockClosure((StgClosure *)mvar,info);
-           goto retry;
-       }
-       // we have the MVar, let's check whether the thread
+        // we have the MVar, let's check whether the thread
        // is still blocked on the same MVar.
        // is still blocked on the same MVar.
-       if (target->why_blocked != BlockedOnMVar
+       if ((target->why_blocked != BlockedOnMVar && target->why_blocked != BlockedOnMVarRead)
            || (StgMVar *)target->block_info.closure != mvar) {
            unlockClosure((StgClosure *)mvar, info);
            goto retry;
            || (StgMVar *)target->block_info.closure != mvar) {
            unlockClosure((StgClosure *)mvar, info);
            goto retry;
@@ -357,14 +348,20 @@ check_target:
 
     case BlockedOnBlackHole:
     {
 
     case BlockedOnBlackHole:
     {
-        // Revoke the message by replacing it with IND. We're not
-        // locking anything here, so we might still get a TRY_WAKEUP
-        // message from the owner of the blackhole some time in the
-        // future, but that doesn't matter.
-        ASSERT(target->block_info.bh->header.info == &stg_MSG_BLACKHOLE_info);
-        OVERWRITE_INFO(target->block_info.bh, &stg_IND_info);
-        raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
-        return THROWTO_SUCCESS;
+       if (target->flags & TSO_BLOCKEX) {
+            // BlockedOnBlackHole is not interruptible.
+            blockedThrowTo(cap,target,msg);
+           return THROWTO_BLOCKED;
+       } else {
+            // Revoke the message by replacing it with IND. We're not
+            // locking anything here, so we might still get a TRY_WAKEUP
+            // message from the owner of the blackhole some time in the
+            // future, but that doesn't matter.
+            ASSERT(target->block_info.bh->header.info == &stg_MSG_BLACKHOLE_info);
+            OVERWRITE_INFO(target->block_info.bh, &stg_IND_info);
+            raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
+            return THROWTO_SUCCESS;
+        }
     }
 
     case BlockedOnSTM:
     }
 
     case BlockedOnSTM:
@@ -386,8 +383,32 @@ check_target:
            return THROWTO_SUCCESS;
        }
 
            return THROWTO_SUCCESS;
        }
 
+    case BlockedOnCCall_Interruptible:
+#ifdef THREADED_RTS
+    {
+        Task *task = NULL;
+        // walk suspended_ccalls to find the correct worker thread
+        InCall *incall;
+        for (incall = cap->suspended_ccalls; incall != NULL; incall = incall->next) {
+            if (incall->suspended_tso == target) {
+                task = incall->task;
+                break;
+            }
+        }
+        if (task != NULL) {
+            blockedThrowTo(cap, target, msg);
+            if (!((target->flags & TSO_BLOCKEX) &&
+                  ((target->flags & TSO_INTERRUPTIBLE) == 0))) {
+                interruptWorkerTask(task);
+            }
+            return THROWTO_BLOCKED;
+        } else {
+            debugTraceCap(DEBUG_sched, cap, "throwTo: could not find worker thread to kill");
+        }
+        // fall to next
+    }
+#endif
     case BlockedOnCCall:
     case BlockedOnCCall:
-    case BlockedOnCCall_NoUnblockExc:
        blockedThrowTo(cap,target,msg);
        return THROWTO_BLOCKED;
 
        blockedThrowTo(cap,target,msg);
        return THROWTO_BLOCKED;
 
@@ -409,8 +430,20 @@ check_target:
        }
 #endif
 
        }
 #endif
 
+    case ThreadMigrating:
+        // if is is ThreadMigrating and tso->cap is ours, then it
+        // *must* be migrating *to* this capability.  If it were
+        // migrating away from the capability, then tso->cap would
+        // point to the destination.
+        //
+        // There is a MSG_WAKEUP in the message queue for this thread,
+        // but we can just do it preemptively:
+        tryWakeupThread(cap, target);
+        // and now retry, the thread should be runnable.
+        goto retry;
+
     default:
     default:
-       barf("throwTo: unrecognised why_blocked value");
+        barf("throwTo: unrecognised why_blocked (%d)", target->why_blocked);
     }
     barf("throwTo");
 }
     }
     barf("throwTo");
 }
@@ -431,7 +464,7 @@ throwToSendMsg (Capability *cap STG_UNUSED,
 // Block a throwTo message on the target TSO's blocked_exceptions
 // queue.  The current Capability must own the target TSO in order to
 // modify the blocked_exceptions queue.
 // Block a throwTo message on the target TSO's blocked_exceptions
 // queue.  The current Capability must own the target TSO in order to
 // modify the blocked_exceptions queue.
-static void
+void
 blockedThrowTo (Capability *cap, StgTSO *target, MessageThrowTo *msg)
 {
     debugTraceCap(DEBUG_sched, cap, "throwTo: blocking on thread %lu",
 blockedThrowTo (Capability *cap, StgTSO *target, MessageThrowTo *msg)
 {
     debugTraceCap(DEBUG_sched, cap, "throwTo: blocking on thread %lu",
@@ -466,7 +499,8 @@ maybePerformBlockedException (Capability *cap, StgTSO *tso)
 {
     MessageThrowTo *msg;
     const StgInfoTable *i;
 {
     MessageThrowTo *msg;
     const StgInfoTable *i;
-    
+    StgTSO *source;
+
     if (tso->what_next == ThreadComplete || tso->what_next == ThreadFinished) {
         if (tso->blocked_exceptions != END_BLOCKED_EXCEPTIONS_QUEUE) {
             awakenBlockedExceptionQueue(cap,tso);
     if (tso->what_next == ThreadComplete || tso->what_next == ThreadFinished) {
         if (tso->blocked_exceptions != END_BLOCKED_EXCEPTIONS_QUEUE) {
             awakenBlockedExceptionQueue(cap,tso);
@@ -498,8 +532,9 @@ maybePerformBlockedException (Capability *cap, StgTSO *tso)
         }
 
         throwToSingleThreaded(cap, msg->target, msg->exception);
         }
 
         throwToSingleThreaded(cap, msg->target, msg->exception);
-        unlockClosure((StgClosure*)msg,&stg_MSG_NULL_info);
-        tryWakeupThread(cap, msg->source);
+        source = msg->source;
+        doneWithMsgThrowTo(msg);
+        tryWakeupThread(cap, source);
         return 1;
     }
     return 0;
         return 1;
     }
     return 0;
@@ -513,13 +548,15 @@ awakenBlockedExceptionQueue (Capability *cap, StgTSO *tso)
 {
     MessageThrowTo *msg;
     const StgInfoTable *i;
 {
     MessageThrowTo *msg;
     const StgInfoTable *i;
+    StgTSO *source;
 
     for (msg = tso->blocked_exceptions; msg != END_BLOCKED_EXCEPTIONS_QUEUE;
          msg = (MessageThrowTo*)msg->link) {
         i = lockClosure((StgClosure *)msg);
         if (i != &stg_MSG_NULL_info) {
 
     for (msg = tso->blocked_exceptions; msg != END_BLOCKED_EXCEPTIONS_QUEUE;
          msg = (MessageThrowTo*)msg->link) {
         i = lockClosure((StgClosure *)msg);
         if (i != &stg_MSG_NULL_info) {
-            unlockClosure((StgClosure *)msg,&stg_MSG_NULL_info);
-            tryWakeupThread(cap, msg->source);
+            source = msg->source;
+            doneWithMsgThrowTo(msg);
+            tryWakeupThread(cap, source);
         } else {
             unlockClosure((StgClosure *)msg,i);
         }
         } else {
             unlockClosure((StgClosure *)msg,i);
         }
@@ -559,7 +596,7 @@ removeFromMVarBlockedQueue (StgTSO *tso)
 
     if (mvar->head == q) {
         mvar->head = q->link;
 
     if (mvar->head == q) {
         mvar->head = q->link;
-        q->header.info = &stg_IND_info;
+        OVERWRITE_INFO(q, &stg_IND_info);
         if (mvar->tail == q) {
             mvar->tail = (StgMVarTSOQueue*)END_TSO_QUEUE;
         }
         if (mvar->tail == q) {
             mvar->tail = (StgMVarTSOQueue*)END_TSO_QUEUE;
         }
@@ -569,10 +606,10 @@ removeFromMVarBlockedQueue (StgTSO *tso)
         // we lose the tail pointer when the GC shorts out the IND.
         // So we use MSG_NULL as a kind of non-dupable indirection;
         // these are ignored by takeMVar/putMVar.
         // we lose the tail pointer when the GC shorts out the IND.
         // So we use MSG_NULL as a kind of non-dupable indirection;
         // these are ignored by takeMVar/putMVar.
-        q->header.info = &stg_MSG_NULL_info;
+        OVERWRITE_INFO(q, &stg_MSG_NULL_info);
     }
     else {
     }
     else {
-        q->header.info = &stg_IND_info;
+        OVERWRITE_INFO(q, &stg_IND_info);
     }
 
     // revoke the MVar operation
     }
 
     // revoke the MVar operation
@@ -598,6 +635,7 @@ removeFromQueues(Capability *cap, StgTSO *tso)
     goto done;
 
   case BlockedOnMVar:
     goto done;
 
   case BlockedOnMVar:
+  case BlockedOnMVarRead:
       removeFromMVarBlockedQueue(tso);
       goto done;
 
       removeFromMVarBlockedQueue(tso);
       goto done;
 
@@ -614,7 +652,7 @@ removeFromQueues(Capability *cap, StgTSO *tso)
       // ASSERT(m->header.info == &stg_WHITEHOLE_info);
 
       // unlock and revoke it at the same time
       // ASSERT(m->header.info == &stg_WHITEHOLE_info);
 
       // unlock and revoke it at the same time
-      unlockClosure((StgClosure*)m,&stg_MSG_NULL_info);
+      doneWithMsgThrowTo(m);
       break;
   }
 
       break;
   }
 
@@ -685,7 +723,7 @@ removeFromQueues(Capability *cap, StgTSO *tso)
  *
  * -------------------------------------------------------------------------- */
 
  *
  * -------------------------------------------------------------------------- */
 
-static void
+static StgTSO *
 raiseAsync(Capability *cap, StgTSO *tso, StgClosure *exception, 
           rtsBool stop_at_atomically, StgUpdateFrame *stop_here)
 {
 raiseAsync(Capability *cap, StgTSO *tso, StgClosure *exception, 
           rtsBool stop_at_atomically, StgUpdateFrame *stop_here)
 {
@@ -693,6 +731,7 @@ raiseAsync(Capability *cap, StgTSO *tso, StgClosure *exception,
     StgPtr sp, frame;
     StgClosure *updatee;
     nat i;
     StgPtr sp, frame;
     StgClosure *updatee;
     nat i;
+    StgStack *stack;
 
     debugTraceCap(DEBUG_sched, cap,
                   "raising exception in thread %ld.", (long)tso->id);
 
     debugTraceCap(DEBUG_sched, cap,
                   "raising exception in thread %ld.", (long)tso->id);
@@ -703,30 +742,26 @@ raiseAsync(Capability *cap, StgTSO *tso, StgClosure *exception,
      * See also Exception.cmm:stg_raisezh.
      * This wasn't done for asynchronous exceptions originally; see #1450 
      */
      * See also Exception.cmm:stg_raisezh.
      * This wasn't done for asynchronous exceptions originally; see #1450 
      */
-    if (RtsFlags.ProfFlags.showCCSOnException)
+    if (RtsFlags.ProfFlags.showCCSOnException && exception != NULL)
     {
     {
-        fprintCCS_stderr(tso->prof.CCCS);
+        fprintCCS_stderr(tso->prof.cccs,exception,tso);
     }
 #endif
     }
 #endif
-    // ASSUMES: the thread is not already complete or dead, or
-    // ThreadRelocated.  Upper layers should deal with that.
+    // ASSUMES: the thread is not already complete or dead
+    // Upper layers should deal with that.
     ASSERT(tso->what_next != ThreadComplete && 
     ASSERT(tso->what_next != ThreadComplete && 
-           tso->what_next != ThreadKilled && 
-           tso->what_next != ThreadRelocated);
+           tso->what_next != ThreadKilled);
 
     // only if we own this TSO (except that deleteThread() calls this 
     ASSERT(tso->cap == cap);
 
 
     // only if we own this TSO (except that deleteThread() calls this 
     ASSERT(tso->cap == cap);
 
-    // wake it up
-    if (tso->why_blocked != NotBlocked) {
-        tso->why_blocked = NotBlocked;
-        appendToRunQueue(cap,tso);
-    }        
+    stack = tso->stackobj;
 
     // mark it dirty; we're about to change its stack.
     dirty_TSO(cap, tso);
 
     // mark it dirty; we're about to change its stack.
     dirty_TSO(cap, tso);
+    dirty_STACK(cap, stack);
 
 
-    sp = tso->sp;
+    sp = stack->sp;
     
     if (stop_here != NULL) {
         updatee = stop_here->updatee;
     
     if (stop_here != NULL) {
         updatee = stop_here->updatee;
@@ -762,10 +797,13 @@ raiseAsync(Capability *cap, StgTSO *tso, StgClosure *exception,
        // 
        // 5. If it's a STOP_FRAME, then kill the thread.
         // 
        // 
        // 5. If it's a STOP_FRAME, then kill the thread.
         // 
-        // NB: if we pass an ATOMICALLY_FRAME then abort the associated 
+        // 6. If it's an UNDERFLOW_FRAME, then continue with the next
+        //    stack chunk.
+        //
+        // NB: if we pass an ATOMICALLY_FRAME then abort the associated
         // transaction
        
         // transaction
        
-       info = get_ret_itbl((StgClosure *)frame);
+        info = get_ret_itbl((StgClosure *)frame);
 
        switch (info->i.type) {
 
 
        switch (info->i.type) {
 
@@ -790,7 +828,7 @@ raiseAsync(Capability *cap, StgTSO *tso, StgClosure *exception,
            
            SET_HDR(ap,&stg_AP_STACK_info,
                    ((StgClosure *)frame)->header.prof.ccs /* ToDo */); 
            
            SET_HDR(ap,&stg_AP_STACK_info,
                    ((StgClosure *)frame)->header.prof.ccs /* ToDo */); 
-           TICK_ALLOC_UP_THK(words+1,0);
+           TICK_ALLOC_UP_THK(WDS(words+1),0);
            
            //IF_DEBUG(scheduler,
            //       debugBelch("sched: Updating ");
            
            //IF_DEBUG(scheduler,
            //       debugBelch("sched: Updating ");
@@ -820,12 +858,46 @@ raiseAsync(Capability *cap, StgTSO *tso, StgClosure *exception,
            continue; //no need to bump frame
        }
 
            continue; //no need to bump frame
        }
 
-       case STOP_FRAME:
+        case UNDERFLOW_FRAME:
+        {
+           StgAP_STACK * ap;
+           nat words;
+           
+           // First build an AP_STACK consisting of the stack chunk above the
+           // current update frame, with the top word on the stack as the
+           // fun field.
+           //
+           words = frame - sp - 1;
+           ap = (StgAP_STACK *)allocate(cap,AP_STACK_sizeW(words));
+           
+           ap->size = words;
+           ap->fun  = (StgClosure *)sp[0];
+           sp++;
+           for(i=0; i < (nat)words; ++i) {
+               ap->payload[i] = (StgClosure *)*sp++;
+           }
+           
+            SET_HDR(ap,&stg_AP_STACK_NOUPD_info,
+                   ((StgClosure *)frame)->header.prof.ccs /* ToDo */); 
+            TICK_ALLOC_SE_THK(WDS(words+1),0);
+
+            stack->sp = sp;
+            threadStackUnderflow(cap,tso);
+            stack = tso->stackobj;
+            sp = stack->sp;
+
+            sp--;
+            sp[0] = (W_)ap;
+            frame = sp + 1;
+            continue;
+        }
+
+        case STOP_FRAME:
        {
            // We've stripped the entire stack, the thread is now dead.
            tso->what_next = ThreadKilled;
        {
            // We've stripped the entire stack, the thread is now dead.
            tso->what_next = ThreadKilled;
-           tso->sp = frame + sizeofW(StgStopFrame);
-           return;
+            stack->sp = frame + sizeofW(StgStopFrame);
+            goto done;
        }
 
        case CATCH_FRAME:
        }
 
        case CATCH_FRAME:
@@ -834,9 +906,7 @@ raiseAsync(Capability *cap, StgTSO *tso, StgClosure *exception,
            // top of the CATCH_FRAME ready to enter.
            //
        {
            // top of the CATCH_FRAME ready to enter.
            //
        {
-#ifdef PROFILING
            StgCatchFrame *cf = (StgCatchFrame *)frame;
            StgCatchFrame *cf = (StgCatchFrame *)frame;
-#endif
            StgThunk *raise;
            
            if (exception == NULL) break;
            StgThunk *raise;
            
            if (exception == NULL) break;
@@ -845,7 +915,7 @@ raiseAsync(Capability *cap, StgTSO *tso, StgClosure *exception,
            // handler in this frame.
            //
            raise = (StgThunk *)allocate(cap,sizeofW(StgThunk)+1);
            // handler in this frame.
            //
            raise = (StgThunk *)allocate(cap,sizeofW(StgThunk)+1);
-           TICK_ALLOC_SE_THK(1,0);
+           TICK_ALLOC_SE_THK(WDS(1),0);
            SET_HDR(raise,&stg_raise_info,cf->header.prof.ccs);
            raise->payload[0] = exception;
            
            SET_HDR(raise,&stg_raise_info,cf->header.prof.ccs);
            raise->payload[0] = exception;
            
@@ -853,28 +923,32 @@ raiseAsync(Capability *cap, StgTSO *tso, StgClosure *exception,
            //
            sp = frame - 1;
            
            //
            sp = frame - 1;
            
-           /* Ensure that async excpetions are blocked now, so we don't get
+           /* Ensure that async exceptions are blocked now, so we don't get
             * a surprise exception before we get around to executing the
             * handler.
             */
             * a surprise exception before we get around to executing the
             * handler.
             */
-           tso->flags |= TSO_BLOCKEX | TSO_INTERRUPTIBLE;
+            tso->flags |= TSO_BLOCKEX;
+            if ((cf->exceptions_blocked & TSO_INTERRUPTIBLE) == 0) {
+                tso->flags &= ~TSO_INTERRUPTIBLE;
+            } else {
+                tso->flags |= TSO_INTERRUPTIBLE;
+            }
 
            /* Put the newly-built THUNK on top of the stack, ready to execute
             * when the thread restarts.
             */
            sp[0] = (W_)raise;
            sp[-1] = (W_)&stg_enter_info;
 
            /* Put the newly-built THUNK on top of the stack, ready to execute
             * when the thread restarts.
             */
            sp[0] = (W_)raise;
            sp[-1] = (W_)&stg_enter_info;
-           tso->sp = sp-1;
+            stack->sp = sp-1;
            tso->what_next = ThreadRunGHC;
            tso->what_next = ThreadRunGHC;
-           IF_DEBUG(sanity, checkTSO(tso));
-           return;
+            goto done;
        }
            
        case ATOMICALLY_FRAME:
            if (stop_at_atomically) {
                ASSERT(tso->trec->enclosing_trec == NO_TREC);
                stmCondemnTransaction(cap, tso -> trec);
        }
            
        case ATOMICALLY_FRAME:
            if (stop_at_atomically) {
                ASSERT(tso->trec->enclosing_trec == NO_TREC);
                stmCondemnTransaction(cap, tso -> trec);
-               tso->sp = frame - 2;
+                stack->sp = frame - 2;
                 // The ATOMICALLY_FRAME expects to be returned a
                 // result from the transaction, which it stores in the
                 // stack frame.  Hence we arrange to return a dummy
                 // The ATOMICALLY_FRAME expects to be returned a
                 // result from the transaction, which it stores in the
                 // stack frame.  Hence we arrange to return a dummy
@@ -883,24 +957,69 @@ raiseAsync(Capability *cap, StgTSO *tso, StgClosure *exception,
                 // ATOMICALLY_FRAME instance for condemned
                 // transactions, but I don't fully understand the
                 // interaction with STM invariants.
                 // ATOMICALLY_FRAME instance for condemned
                 // transactions, but I don't fully understand the
                 // interaction with STM invariants.
-                tso->sp[1] = (W_)&stg_NO_TREC_closure;
-                tso->sp[0] = (W_)&stg_gc_unpt_r1_info;
-               tso->what_next = ThreadRunGHC;
-               return;
+                stack->sp[1] = (W_)&stg_NO_TREC_closure;
+                stack->sp[0] = (W_)&stg_ret_p_info;
+                tso->what_next = ThreadRunGHC;
+                goto done;
            }
            }
-           // Not stop_at_atomically... fall through and abort the
-           // transaction.
-           
-       case CATCH_STM_FRAME:
+            else
+            {
+                // Freezing an STM transaction.  Just aborting the
+                // transaction would be wrong; this is what we used to
+                // do, and it goes wrong if the ATOMICALLY_FRAME ever
+                // gets back onto the stack again, which it will do if
+                // the transaction is inside unsafePerformIO or
+                // unsafeInterleaveIO and hence inside an UPDATE_FRAME.
+                //
+                // So we want to make it so that if the enclosing
+                // computation is resumed, we will re-execute the
+                // transaction.  We therefore:
+                //
+                //   1. abort the current transaction
+                //   3. replace the stack up to and including the
+                //      atomically frame with a closure representing
+                //      a call to "atomically x", where x is the code
+                //      of the transaction.
+                //   4. continue stripping the stack
+                //
+                StgTRecHeader *trec = tso->trec;
+                StgTRecHeader *outer = trec->enclosing_trec;
+
+                StgThunk *atomically;
+                StgAtomicallyFrame *af = (StgAtomicallyFrame*)frame;
+
+                debugTraceCap(DEBUG_stm, cap,
+                              "raiseAsync: freezing atomically frame")
+                stmAbortTransaction(cap, trec);
+                stmFreeAbortedTRec(cap, trec);
+                tso->trec = outer;
+
+                atomically = (StgThunk*)allocate(cap,sizeofW(StgThunk)+1);
+                TICK_ALLOC_SE_THK(1,0);
+                SET_HDR(atomically,&stg_atomically_info,af->header.prof.ccs);
+                atomically->payload[0] = af->code;
+
+                // discard stack up to and including the ATOMICALLY_FRAME
+                frame += sizeofW(StgAtomicallyFrame);
+                sp = frame - 1;
+
+                // replace the ATOMICALLY_FRAME with call to atomically#
+                sp[0] = (W_)atomically;
+                continue;
+            }
+
+        case CATCH_STM_FRAME:
        case CATCH_RETRY_FRAME:
        case CATCH_RETRY_FRAME:
-           // IF we find an ATOMICALLY_FRAME then we abort the
-           // current transaction and propagate the exception.  In
-           // this case (unlike ordinary exceptions) we do not care
+            // CATCH frames within an atomically block: abort the
+            // inner transaction and continue.  Eventually we will
+            // hit the outer transaction that will get frozen (see
+            // above).
+            //
+            // In this case (unlike ordinary exceptions) we do not care
            // whether the transaction is valid or not because its
            // possible validity cannot have caused the exception
            // and will not be visible after the abort.
            // whether the transaction is valid or not because its
            // possible validity cannot have caused the exception
            // and will not be visible after the abort.
-
-               {
+        {
             StgTRecHeader *trec = tso -> trec;
             StgTRecHeader *outer = trec -> enclosing_trec;
            debugTraceCap(DEBUG_stm, cap,
             StgTRecHeader *trec = tso -> trec;
             StgTRecHeader *outer = trec -> enclosing_trec;
            debugTraceCap(DEBUG_stm, cap,
@@ -908,9 +1027,9 @@ raiseAsync(Capability *cap, StgTSO *tso, StgClosure *exception,
             stmAbortTransaction(cap, trec);
            stmFreeAbortedTRec(cap, trec);
             tso -> trec = outer;
             stmAbortTransaction(cap, trec);
            stmFreeAbortedTRec(cap, trec);
             tso -> trec = outer;
-           break;
-           };
-           
+            break;
+        };
+
        default:
            break;
        }
        default:
            break;
        }
@@ -919,8 +1038,24 @@ raiseAsync(Capability *cap, StgTSO *tso, StgClosure *exception,
        frame += stack_frame_sizeW((StgClosure *)frame);
     }
 
        frame += stack_frame_sizeW((StgClosure *)frame);
     }
 
-    // if we got here, then we stopped at stop_here
-    ASSERT(stop_here != NULL);
+done:
+    IF_DEBUG(sanity, checkTSO(tso));
+
+    // wake it up
+    if (tso->why_blocked != NotBlocked) {
+        tso->why_blocked = NotBlocked;
+        appendToRunQueue(cap,tso);
+    }        
+
+    return tso;
 }
 
 
 }
 
 
+
+// Local Variables:
+// mode: C
+// fill-column: 80
+// indent-tabs-mode: nil
+// c-basic-offset: 4
+// buffer-file-coding-system: utf-8-unix
+// End: