1 /* -*- tab-width: 8 -*- */
2 /* -----------------------------------------------------------------------------
4 * (c) The GHC Team, 1998-2012
6 * Out-of-line primitive operations
8 * This file contains the implementations of all the primitive
9 * operations ("primops") which are not expanded inline. See
10 * ghc/compiler/prelude/primops.txt.pp for a list of all the primops;
11 * this file contains code for most of those with the attribute
14 * Entry convention: the entry convention for a primop is the
15 * NativeNodeCall convention, and the return convention is
16 * NativeReturn. (see compiler/cmm/CmmCallConv.hs)
18 * This file is written in a subset of C--, extended with various
19 * features specific to GHC. It is compiled by GHC directly. For the
20 * syntax of .cmm files, see the parser in ghc/compiler/cmm/CmmParse.y.
22 * ---------------------------------------------------------------------------*/
27 import pthread_mutex_lock;
28 import pthread_mutex_unlock;
30 import base_ControlziExceptionziBase_nestedAtomically_closure;
31 import EnterCriticalSection;
32 import LeaveCriticalSection;
33 import ghczmprim_GHCziTypes_False_closure;
34 #if defined(USE_MINIINTERPRETER) || !defined(mingw32_HOST_OS)
38 /*-----------------------------------------------------------------------------
41 Basically just new*Array - the others are all inline macros.
43 The slow entry point is for returning from a heap check, the saved
44 size argument must be re-loaded from the stack.
45 -------------------------------------------------------------------------- */
47 /* for objects that are *less* than the size of a word, make sure we
48 * round up to the nearest word for the size of the array.
51 stg_newByteArrayzh ( W_ n )
53 W_ words, payload_words;
56 MAYBE_GC_N(stg_newByteArrayzh, n);
58 payload_words = ROUNDUP_BYTES_TO_WDS(n);
59 words = BYTES_TO_WDS(SIZEOF_StgArrWords) + payload_words;
60 ("ptr" p) = ccall allocate(MyCapability() "ptr",words);
61 TICK_ALLOC_PRIM(SIZEOF_StgArrWords,WDS(payload_words),0);
62 SET_HDR(p, stg_ARR_WORDS_info, CCCS);
63 StgArrWords_bytes(p) = n;
68 #define BA_MASK (BA_ALIGN-1)
70 stg_newPinnedByteArrayzh ( W_ n )
72 W_ words, bytes, payload_words;
75 MAYBE_GC_N(stg_newPinnedByteArrayzh, n);
78 /* payload_words is what we will tell the profiler we had to allocate */
79 payload_words = ROUNDUP_BYTES_TO_WDS(bytes);
80 /* When we actually allocate memory, we need to allow space for the
82 bytes = bytes + SIZEOF_StgArrWords;
83 /* And we want to align to BA_ALIGN bytes, so we need to allow space
84 to shift up to BA_ALIGN - 1 bytes: */
85 bytes = bytes + BA_ALIGN - 1;
86 /* Now we convert to a number of words: */
87 words = ROUNDUP_BYTES_TO_WDS(bytes);
89 ("ptr" p) = ccall allocatePinned(MyCapability() "ptr", words);
90 TICK_ALLOC_PRIM(SIZEOF_StgArrWords,WDS(payload_words),0);
92 /* Now we need to move p forward so that the payload is aligned
94 p = p + ((-p - SIZEOF_StgArrWords) & BA_MASK);
96 SET_HDR(p, stg_ARR_WORDS_info, CCCS);
97 StgArrWords_bytes(p) = n;
101 stg_newAlignedPinnedByteArrayzh ( W_ n, W_ alignment )
103 W_ words, bytes, payload_words;
106 again: MAYBE_GC(again);
108 /* we always supply at least word-aligned memory, so there's no
109 need to allow extra space for alignment if the requirement is less
110 than a word. This also prevents mischief with alignment == 0. */
111 if (alignment <= SIZEOF_W) { alignment = 1; }
115 /* payload_words is what we will tell the profiler we had to allocate */
116 payload_words = ROUNDUP_BYTES_TO_WDS(bytes);
118 /* When we actually allocate memory, we need to allow space for the
120 bytes = bytes + SIZEOF_StgArrWords;
121 /* And we want to align to <alignment> bytes, so we need to allow space
122 to shift up to <alignment - 1> bytes: */
123 bytes = bytes + alignment - 1;
124 /* Now we convert to a number of words: */
125 words = ROUNDUP_BYTES_TO_WDS(bytes);
127 ("ptr" p) = ccall allocatePinned(MyCapability() "ptr", words);
128 TICK_ALLOC_PRIM(SIZEOF_StgArrWords,WDS(payload_words),0);
130 /* Now we need to move p forward so that the payload is aligned
131 to <alignment> bytes. Note that we are assuming that
132 <alignment> is a power of 2, which is technically not guaranteed */
133 p = p + ((-p - SIZEOF_StgArrWords) & (alignment - 1));
135 SET_HDR(p, stg_ARR_WORDS_info, CCCS);
136 StgArrWords_bytes(p) = n;
140 // RRN: This one does not use the "ticketing" approach because it
141 // deals in unboxed scalars, not heap pointers.
142 stg_casIntArrayzh( gcptr arr, W_ ind, W_ old, W_ new )
143 /* MutableByteArray# s -> Int# -> Int# -> Int# -> State# s -> (# State# s, Int# #) */
147 p = arr + SIZEOF_StgArrWords + WDS(ind);
148 (h) = ccall cas(p, old, new);
154 stg_fetchAddIntArrayzh( gcptr arr, W_ ind, W_ incr )
155 /* MutableByteArray# s -> Int# -> Int# -> State# s -> (# State# s, Int# #) */
159 p = arr + SIZEOF_StgArrWords + WDS(ind);
160 (h) = ccall atomic_inc(p, incr);
166 stg_newArrayzh ( W_ n /* words */, gcptr init )
171 again: MAYBE_GC(again);
173 // the mark area contains one byte for each 2^MUT_ARR_PTRS_CARD_BITS words
174 // in the array, making sure we round up, and then rounding up to a whole
176 size = n + mutArrPtrsCardWords(n);
177 words = BYTES_TO_WDS(SIZEOF_StgMutArrPtrs) + size;
178 ("ptr" arr) = ccall allocate(MyCapability() "ptr",words);
179 TICK_ALLOC_PRIM(SIZEOF_StgMutArrPtrs, WDS(size), 0);
181 SET_HDR(arr, stg_MUT_ARR_PTRS_DIRTY_info, CCCS);
182 StgMutArrPtrs_ptrs(arr) = n;
183 StgMutArrPtrs_size(arr) = size;
185 // Initialise all elements of the the array with the value in R2
186 p = arr + SIZEOF_StgMutArrPtrs;
188 if (p < arr + SIZEOF_StgMutArrPtrs + WDS(n)) {
197 stg_unsafeThawArrayzh ( gcptr arr )
199 // SUBTLETY TO DO WITH THE OLD GEN MUTABLE LIST
201 // A MUT_ARR_PTRS lives on the mutable list, but a MUT_ARR_PTRS_FROZEN
202 // normally doesn't. However, when we freeze a MUT_ARR_PTRS, we leave
203 // it on the mutable list for the GC to remove (removing something from
204 // the mutable list is not easy).
206 // So that we can tell whether a MUT_ARR_PTRS_FROZEN is on the mutable list,
207 // when we freeze it we set the info ptr to be MUT_ARR_PTRS_FROZEN0
208 // to indicate that it is still on the mutable list.
210 // So, when we thaw a MUT_ARR_PTRS_FROZEN, we must cope with two cases:
211 // either it is on a mut_list, or it isn't. We adopt the convention that
212 // the closure type is MUT_ARR_PTRS_FROZEN0 if it is on the mutable list,
213 // and MUT_ARR_PTRS_FROZEN otherwise. In fact it wouldn't matter if
214 // we put it on the mutable list more than once, but it would get scavenged
215 // multiple times during GC, which would be unnecessarily slow.
217 if (StgHeader_info(arr) != stg_MUT_ARR_PTRS_FROZEN0_info) {
218 SET_INFO(arr,stg_MUT_ARR_PTRS_DIRTY_info);
220 // must be done after SET_INFO, because it ASSERTs closure_MUTABLE()
223 SET_INFO(arr,stg_MUT_ARR_PTRS_DIRTY_info);
228 stg_copyArrayzh ( gcptr src, W_ src_off, gcptr dst, W_ dst_off, W_ n )
230 copyArray(src, src_off, dst, dst_off, n)
233 stg_copyMutableArrayzh ( gcptr src, W_ src_off, gcptr dst, W_ dst_off, W_ n )
235 copyMutableArray(src, src_off, dst, dst_off, n)
238 stg_copyArrayArrayzh ( gcptr src, W_ src_off, gcptr dst, W_ dst_off, W_ n )
240 copyArray(src, src_off, dst, dst_off, n)
243 stg_copyMutableArrayArrayzh ( gcptr src, W_ src_off, gcptr dst, W_ dst_off, W_ n )
245 copyMutableArray(src, src_off, dst, dst_off, n)
248 stg_cloneArrayzh ( gcptr src, W_ offset, W_ n )
250 cloneArray(stg_MUT_ARR_PTRS_FROZEN_info, src, offset, n)
253 stg_cloneMutableArrayzh ( gcptr src, W_ offset, W_ n )
255 cloneArray(stg_MUT_ARR_PTRS_DIRTY_info, src, offset, n)
258 // We have to escape the "z" in the name.
259 stg_freezzeArrayzh ( gcptr src, W_ offset, W_ n )
261 cloneArray(stg_MUT_ARR_PTRS_FROZEN_info, src, offset, n)
264 stg_thawArrayzh ( gcptr src, W_ offset, W_ n )
266 cloneArray(stg_MUT_ARR_PTRS_DIRTY_info, src, offset, n)
269 // RRN: Uses the ticketed approach; see casMutVar
270 stg_casArrayzh ( gcptr arr, W_ ind, gcptr old, gcptr new )
271 /* MutableArray# s a -> Int# -> a -> a -> State# s -> (# State# s, Int#, Any a #) */
276 p = arr + SIZEOF_StgMutArrPtrs + WDS(ind);
277 (h) = ccall cas(p, old, new);
280 // Failure, return what was there instead of 'old':
283 // Compare and Swap Succeeded:
284 SET_HDR(arr, stg_MUT_ARR_PTRS_DIRTY_info, CCCS);
285 len = StgMutArrPtrs_ptrs(arr);
286 // The write barrier. We must write a byte into the mark table:
287 I8[arr + SIZEOF_StgMutArrPtrs + WDS(len) + (ind >> MUT_ARR_PTRS_CARD_BITS )] = 1;
292 stg_newArrayArrayzh ( W_ n /* words */ )
297 MAYBE_GC_N(stg_newArrayArrayzh, n);
299 // the mark area contains one byte for each 2^MUT_ARR_PTRS_CARD_BITS words
300 // in the array, making sure we round up, and then rounding up to a whole
302 size = n + mutArrPtrsCardWords(n);
303 words = BYTES_TO_WDS(SIZEOF_StgMutArrPtrs) + size;
304 ("ptr" arr) = ccall allocate(MyCapability() "ptr",words);
305 TICK_ALLOC_PRIM(SIZEOF_StgMutArrPtrs, WDS(size), 0);
307 SET_HDR(arr, stg_MUT_ARR_PTRS_DIRTY_info, W_[CCCS]);
308 StgMutArrPtrs_ptrs(arr) = n;
309 StgMutArrPtrs_size(arr) = size;
311 // Initialise all elements of the array with a pointer to the new array
312 p = arr + SIZEOF_StgMutArrPtrs;
314 if (p < arr + SIZEOF_StgMutArrPtrs + WDS(n)) {
324 /* -----------------------------------------------------------------------------
325 SmallArray primitives
326 -------------------------------------------------------------------------- */
328 stg_newSmallArrayzh ( W_ n /* words */, gcptr init )
333 again: MAYBE_GC(again);
335 words = BYTES_TO_WDS(SIZEOF_StgSmallMutArrPtrs) + n;
336 ("ptr" arr) = ccall allocate(MyCapability() "ptr",words);
337 TICK_ALLOC_PRIM(SIZEOF_StgSmallMutArrPtrs, WDS(n), 0);
339 SET_HDR(arr, stg_SMALL_MUT_ARR_PTRS_DIRTY_info, CCCS);
340 StgSmallMutArrPtrs_ptrs(arr) = n;
342 // Initialise all elements of the the array with the value in R2
343 p = arr + SIZEOF_StgSmallMutArrPtrs;
345 if (p < arr + SIZEOF_StgSmallMutArrPtrs + WDS(n)) {
354 stg_unsafeThawSmallArrayzh ( gcptr arr )
356 // See stg_unsafeThawArrayzh
357 if (StgHeader_info(arr) != stg_SMALL_MUT_ARR_PTRS_FROZEN0_info) {
358 SET_INFO(arr, stg_SMALL_MUT_ARR_PTRS_DIRTY_info);
360 // must be done after SET_INFO, because it ASSERTs closure_MUTABLE()
363 SET_INFO(arr, stg_SMALL_MUT_ARR_PTRS_DIRTY_info);
368 stg_cloneSmallArrayzh ( gcptr src, W_ offset, W_ n )
370 cloneSmallArray(stg_SMALL_MUT_ARR_PTRS_FROZEN_info, src, offset, n)
373 stg_cloneSmallMutableArrayzh ( gcptr src, W_ offset, W_ n )
375 cloneSmallArray(stg_SMALL_MUT_ARR_PTRS_DIRTY_info, src, offset, n)
378 // We have to escape the "z" in the name.
379 stg_freezzeSmallArrayzh ( gcptr src, W_ offset, W_ n )
381 cloneSmallArray(stg_SMALL_MUT_ARR_PTRS_FROZEN_info, src, offset, n)
384 stg_thawSmallArrayzh ( gcptr src, W_ offset, W_ n )
386 cloneSmallArray(stg_SMALL_MUT_ARR_PTRS_DIRTY_info, src, offset, n)
389 stg_copySmallArrayzh ( gcptr src, W_ src_off, gcptr dst, W_ dst_off, W_ n)
391 W_ dst_p, src_p, bytes;
393 SET_INFO(dst, stg_SMALL_MUT_ARR_PTRS_DIRTY_info);
395 dst_p = dst + SIZEOF_StgSmallMutArrPtrs + WDS(dst_off);
396 src_p = src + SIZEOF_StgSmallMutArrPtrs + WDS(src_off);
398 prim %memcpy(dst_p, src_p, bytes, WDS(1));
403 stg_copySmallMutableArrayzh ( gcptr src, W_ src_off, gcptr dst, W_ dst_off, W_ n)
405 W_ dst_p, src_p, bytes;
407 SET_INFO(dst, stg_SMALL_MUT_ARR_PTRS_DIRTY_info);
409 dst_p = dst + SIZEOF_StgSmallMutArrPtrs + WDS(dst_off);
410 src_p = src + SIZEOF_StgSmallMutArrPtrs + WDS(src_off);
413 prim %memmove(dst_p, src_p, bytes, WDS(1));
415 prim %memcpy(dst_p, src_p, bytes, WDS(1));
421 // RRN: Uses the ticketed approach; see casMutVar
422 stg_casSmallArrayzh ( gcptr arr, W_ ind, gcptr old, gcptr new )
423 /* SmallMutableArray# s a -> Int# -> a -> a -> State# s -> (# State# s, Int#, Any a #) */
428 p = arr + SIZEOF_StgSmallMutArrPtrs + WDS(ind);
429 (h) = ccall cas(p, old, new);
432 // Failure, return what was there instead of 'old':
435 // Compare and Swap Succeeded:
436 SET_HDR(arr, stg_SMALL_MUT_ARR_PTRS_DIRTY_info, CCCS);
442 /* -----------------------------------------------------------------------------
444 -------------------------------------------------------------------------- */
446 stg_newMutVarzh ( gcptr init )
450 ALLOC_PRIM_P (SIZEOF_StgMutVar, stg_newMutVarzh, init);
452 mv = Hp - SIZEOF_StgMutVar + WDS(1);
453 SET_HDR(mv,stg_MUT_VAR_DIRTY_info,CCCS);
454 StgMutVar_var(mv) = init;
459 // RRN: To support the "ticketed" approach, we return the NEW rather
460 // than old value if the CAS is successful. This is received in an
461 // opaque form in the Haskell code, preventing the compiler from
462 // changing its pointer identity. The ticket can then be safely used
463 // in future CAS operations.
464 stg_casMutVarzh ( gcptr mv, gcptr old, gcptr new )
465 /* MutVar# s a -> a -> a -> State# s -> (# State#, Int#, Any a #) */
469 (h) = ccall cas(mv + SIZEOF_StgHeader + OFFSET_StgMutVar_var, old, new);
473 if (GET_INFO(mv) == stg_MUT_VAR_CLEAN_info) {
474 ccall dirty_MUT_VAR(BaseReg "ptr", mv "ptr");
480 stg_atomicModifyMutVarzh ( gcptr mv, gcptr f )
484 /* If x is the current contents of the MutVar#, then
485 We want to make the new contents point to
489 and the return value is
493 obviously we can share (f x).
495 z = [stg_ap_2 f x] (max (HS + 2) MIN_UPD_SIZE)
496 y = [stg_sel_0 z] (max (HS + 1) MIN_UPD_SIZE)
497 r = [stg_sel_1 z] (max (HS + 1) MIN_UPD_SIZE)
501 #define THUNK_1_SIZE (SIZEOF_StgThunkHeader + WDS(MIN_UPD_SIZE))
502 #define TICK_ALLOC_THUNK_1() TICK_ALLOC_UP_THK(WDS(1),WDS(MIN_UPD_SIZE-1))
504 #define THUNK_1_SIZE (SIZEOF_StgThunkHeader + WDS(1))
505 #define TICK_ALLOC_THUNK_1() TICK_ALLOC_UP_THK(WDS(1),0)
509 #define THUNK_2_SIZE (SIZEOF_StgThunkHeader + WDS(MIN_UPD_SIZE))
510 #define TICK_ALLOC_THUNK_2() TICK_ALLOC_UP_THK(WDS(2),WDS(MIN_UPD_SIZE-2))
512 #define THUNK_2_SIZE (SIZEOF_StgThunkHeader + WDS(2))
513 #define TICK_ALLOC_THUNK_2() TICK_ALLOC_UP_THK(WDS(2),0)
516 #define SIZE (THUNK_2_SIZE + THUNK_1_SIZE + THUNK_1_SIZE)
518 HP_CHK_GEN_TICKY(SIZE);
520 TICK_ALLOC_THUNK_2();
521 CCCS_ALLOC(THUNK_2_SIZE);
522 z = Hp - THUNK_2_SIZE + WDS(1);
523 SET_HDR(z, stg_ap_2_upd_info, CCCS);
524 LDV_RECORD_CREATE(z);
525 StgThunk_payload(z,0) = f;
527 TICK_ALLOC_THUNK_1();
528 CCCS_ALLOC(THUNK_1_SIZE);
529 y = z - THUNK_1_SIZE;
530 SET_HDR(y, stg_sel_0_upd_info, CCCS);
531 LDV_RECORD_CREATE(y);
532 StgThunk_payload(y,0) = z;
534 TICK_ALLOC_THUNK_1();
535 CCCS_ALLOC(THUNK_1_SIZE);
536 r = y - THUNK_1_SIZE;
537 SET_HDR(r, stg_sel_1_upd_info, CCCS);
538 LDV_RECORD_CREATE(r);
539 StgThunk_payload(r,0) = z;
542 x = StgMutVar_var(mv);
543 StgThunk_payload(z,1) = x;
545 (h) = ccall cas(mv + SIZEOF_StgHeader + OFFSET_StgMutVar_var, x, y);
546 if (h != x) { goto retry; }
548 StgMutVar_var(mv) = y;
551 if (GET_INFO(mv) == stg_MUT_VAR_CLEAN_info) {
552 ccall dirty_MUT_VAR(BaseReg "ptr", mv "ptr");
558 /* -----------------------------------------------------------------------------
559 Weak Pointer Primitives
560 -------------------------------------------------------------------------- */
562 STRING(stg_weak_msg,"New weak pointer at %p\n")
564 stg_mkWeakzh ( gcptr key,
566 gcptr finalizer /* or stg_NO_FINALIZER_closure */ )
570 ALLOC_PRIM (SIZEOF_StgWeak)
572 w = Hp - SIZEOF_StgWeak + WDS(1);
573 SET_HDR(w, stg_WEAK_info, CCCS);
575 StgWeak_key(w) = key;
576 StgWeak_value(w) = value;
577 StgWeak_finalizer(w) = finalizer;
578 StgWeak_cfinalizers(w) = stg_NO_FINALIZER_closure;
580 ACQUIRE_LOCK(sm_mutex);
581 StgWeak_link(w) = generation_weak_ptr_list(W_[g0]);
582 generation_weak_ptr_list(W_[g0]) = w;
583 RELEASE_LOCK(sm_mutex);
585 IF_DEBUG(weak, ccall debugBelch(stg_weak_msg,w));
590 stg_mkWeakNoFinalizzerzh ( gcptr key, gcptr value )
592 jump stg_mkWeakzh (key, value, stg_NO_FINALIZER_closure);
595 STRING(stg_cfinalizer_msg,"Adding a finalizer to %p\n")
597 stg_addCFinalizzerToWeakzh ( W_ fptr, // finalizer
599 W_ flag, // has environment (0 or 1)
605 ALLOC_PRIM (SIZEOF_StgCFinalizerList)
607 c = Hp - SIZEOF_StgCFinalizerList + WDS(1);
608 SET_HDR(c, stg_C_FINALIZER_LIST_info, CCCS);
610 StgCFinalizerList_fptr(c) = fptr;
611 StgCFinalizerList_ptr(c) = ptr;
612 StgCFinalizerList_eptr(c) = eptr;
613 StgCFinalizerList_flag(c) = flag;
615 LOCK_CLOSURE(w, info);
617 if (info == stg_DEAD_WEAK_info) {
619 unlockClosure(w, info);
623 StgCFinalizerList_link(c) = StgWeak_cfinalizers(w);
624 StgWeak_cfinalizers(w) = c;
626 unlockClosure(w, info);
630 IF_DEBUG(weak, ccall debugBelch(stg_cfinalizer_msg,w));
635 stg_finalizzeWeakzh ( gcptr w )
640 LOCK_CLOSURE(w, info);
643 if (info == stg_DEAD_WEAK_info) {
644 unlockClosure(w, info);
645 return (0,stg_NO_FINALIZER_closure);
648 f = StgWeak_finalizer(w);
649 list = StgWeak_cfinalizers(w);
654 // A weak pointer is inherently used, so we do not need to call
655 // LDV_recordDead_FILL_SLOP_DYNAMIC():
656 // LDV_recordDead_FILL_SLOP_DYNAMIC((StgClosure *)w);
657 // or, LDV_recordDead():
658 // LDV_recordDead((StgClosure *)w, sizeofW(StgWeak) - sizeofW(StgProfHeader));
659 // Furthermore, when PROFILING is turned on, dead weak pointers are exactly as
660 // large as weak pointers, so there is no need to fill the slop, either.
661 // See stg_DEAD_WEAK_info in StgMiscClosures.hc.
665 // Todo: maybe use SET_HDR() and remove LDV_recordCreate()?
667 unlockClosure(w, stg_DEAD_WEAK_info);
669 LDV_RECORD_CREATE(w);
671 if (list != stg_NO_FINALIZER_closure) {
672 ccall runCFinalizers(list);
675 /* return the finalizer */
676 if (f == stg_NO_FINALIZER_closure) {
677 return (0,stg_NO_FINALIZER_closure);
683 stg_deRefWeakzh ( gcptr w )
690 if (info == stg_WHITEHOLE_info) {
691 // w is locked by another thread. Now it's not immediately clear if w is
692 // alive or not. We use lockClosure to wait for the info pointer to become
693 // something other than stg_WHITEHOLE_info.
695 LOCK_CLOSURE(w, info);
696 unlockClosure(w, info);
699 if (info == stg_WEAK_info) {
701 val = StgWeak_value(w);
709 /* -----------------------------------------------------------------------------
710 Floating point operations.
711 -------------------------------------------------------------------------- */
713 stg_decodeFloatzuIntzh ( F_ arg )
716 W_ tmp, mp_tmp1, mp_tmp_w, r1, r2;
718 STK_CHK_GEN_N (WDS(2));
722 mp_tmp1 = tmp + WDS(1);
725 /* Perform the operation */
726 ccall __decodeFloat_Int(mp_tmp1 "ptr", mp_tmp_w "ptr", arg);
732 /* returns: (Int# (mantissa), Int# (exponent)) */
736 stg_decodeDoublezu2Intzh ( D_ arg )
739 W_ mp_tmp1, mp_tmp2, mp_result1, mp_result2;
742 STK_CHK_GEN_N (WDS(4));
746 mp_tmp1 = tmp + WDS(3);
747 mp_tmp2 = tmp + WDS(2);
748 mp_result1 = tmp + WDS(1);
751 /* Perform the operation */
752 ccall __decodeDouble_2Int(mp_tmp1 "ptr", mp_tmp2 "ptr",
753 mp_result1 "ptr", mp_result2 "ptr",
763 (Int# (mant sign), Word# (mant high), Word# (mant low), Int# (expn)) */
764 return (r1, r2, r3, r4);
767 /* -----------------------------------------------------------------------------
768 * Concurrency primitives
769 * -------------------------------------------------------------------------- */
771 stg_forkzh ( gcptr closure )
773 MAYBE_GC_P(stg_forkzh, closure);
777 ("ptr" threadid) = ccall createIOThread( MyCapability() "ptr",
778 RtsFlags_GcFlags_initialStkSize(RtsFlags),
781 /* start blocked if the current thread is blocked */
782 StgTSO_flags(threadid) = %lobits16(
783 TO_W_(StgTSO_flags(threadid)) |
784 TO_W_(StgTSO_flags(CurrentTSO)) & (TSO_BLOCKEX | TSO_INTERRUPTIBLE));
786 ccall scheduleThread(MyCapability() "ptr", threadid "ptr");
788 // context switch soon, but not immediately: we don't want every
789 // forkIO to force a context-switch.
790 Capability_context_switch(MyCapability()) = 1 :: CInt;
795 stg_forkOnzh ( W_ cpu, gcptr closure )
797 again: MAYBE_GC(again);
801 ("ptr" threadid) = ccall createIOThread(
802 MyCapability() "ptr",
803 RtsFlags_GcFlags_initialStkSize(RtsFlags),
806 /* start blocked if the current thread is blocked */
807 StgTSO_flags(threadid) = %lobits16(
808 TO_W_(StgTSO_flags(threadid)) |
809 TO_W_(StgTSO_flags(CurrentTSO)) & (TSO_BLOCKEX | TSO_INTERRUPTIBLE));
811 ccall scheduleThreadOn(MyCapability() "ptr", cpu, threadid "ptr");
813 // context switch soon, but not immediately: we don't want every
814 // forkIO to force a context-switch.
815 Capability_context_switch(MyCapability()) = 1 :: CInt;
822 // when we yield to the scheduler, we have to tell it to put the
823 // current thread to the back of the queue by setting the
824 // context_switch flag. If we don't do this, it will run the same
826 Capability_context_switch(MyCapability()) = 1 :: CInt;
827 jump stg_yield_noregs();
835 stg_labelThreadzh ( gcptr threadid, W_ addr )
837 #if defined(DEBUG) || defined(TRACING) || defined(DTRACE)
838 ccall labelThread(MyCapability() "ptr", threadid "ptr", addr "ptr");
843 stg_isCurrentThreadBoundzh (/* no args */)
846 (r) = ccall isThreadBound(CurrentTSO);
850 stg_threadStatuszh ( gcptr tso )
856 what_next = TO_W_(StgTSO_what_next(tso));
857 why_blocked = TO_W_(StgTSO_why_blocked(tso));
858 // Note: these two reads are not atomic, so they might end up
859 // being inconsistent. It doesn't matter, since we
860 // only return one or the other. If we wanted to return the
861 // contents of block_info too, then we'd have to do some synchronisation.
863 if (what_next == ThreadComplete) {
864 ret = 16; // NB. magic, matches up with GHC.Conc.threadStatus
866 if (what_next == ThreadKilled) {
873 cap = TO_W_(Capability_no(StgTSO_cap(tso)));
875 if ((TO_W_(StgTSO_flags(tso)) & TSO_LOCKED) != 0) {
881 return (ret,cap,locked);
884 /* -----------------------------------------------------------------------------
886 * -------------------------------------------------------------------------- */
888 // Catch retry frame -----------------------------------------------------------
890 #define CATCH_RETRY_FRAME_FIELDS(w_,p_,info_ptr, \
896 PROF_HDR_FIELDS(w_,p1,p2) \
897 w_ running_alt_code, \
902 INFO_TABLE_RET(stg_catch_retry_frame, CATCH_RETRY_FRAME,
903 CATCH_RETRY_FRAME_FIELDS(W_,P_,
911 gcptr trec, outer, arg;
913 trec = StgTSO_trec(CurrentTSO);
914 outer = StgTRecHeader_enclosing_trec(trec);
915 (r) = ccall stmCommitNestedTransaction(MyCapability() "ptr", trec "ptr");
917 // Succeeded (either first branch or second branch)
918 StgTSO_trec(CurrentTSO) = outer;
921 // Did not commit: re-execute
923 ("ptr" new_trec) = ccall stmStartTransaction(MyCapability() "ptr",
925 StgTSO_trec(CurrentTSO) = new_trec;
926 if (running_alt_code != 0) {
928 (CATCH_RETRY_FRAME_FIELDS(,,info_ptr, p1, p2,
935 (CATCH_RETRY_FRAME_FIELDS(,,info_ptr, p1, p2,
944 // Atomically frame ------------------------------------------------------------
946 // This must match StgAtomicallyFrame in Closures.h
947 #define ATOMICALLY_FRAME_FIELDS(w_,p_,info_ptr,p1,p2,code,next,result) \
949 PROF_HDR_FIELDS(w_,p1,p2) \
955 INFO_TABLE_RET(stg_atomically_frame, ATOMICALLY_FRAME,
956 // layout of the frame, and bind the field names
957 ATOMICALLY_FRAME_FIELDS(W_,P_,
962 return (P_ result) // value returned to the frame
965 gcptr trec, outer, next_invariant, q;
967 trec = StgTSO_trec(CurrentTSO);
968 outer = StgTRecHeader_enclosing_trec(trec);
970 if (outer == NO_TREC) {
971 /* First time back at the atomically frame -- pick up invariants */
972 ("ptr" next_invariant) =
973 ccall stmGetInvariantsToCheck(MyCapability() "ptr", trec "ptr");
974 frame_result = result;
977 /* Second/subsequent time back at the atomically frame -- abort the
978 * tx that's checking the invariant and move on to the next one */
979 StgTSO_trec(CurrentTSO) = outer;
980 StgInvariantCheckQueue_my_execution(next_invariant) = trec;
981 ccall stmAbortTransaction(MyCapability() "ptr", trec "ptr");
982 /* Don't free trec -- it's linked from q and will be stashed in the
983 * invariant if we eventually commit. */
985 StgInvariantCheckQueue_next_queue_entry(next_invariant);
989 if (next_invariant != END_INVARIANT_CHECK_QUEUE) {
990 /* We can't commit yet: another invariant to check */
991 ("ptr" trec) = ccall stmStartTransaction(MyCapability() "ptr", trec "ptr");
992 StgTSO_trec(CurrentTSO) = trec;
993 q = StgInvariantCheckQueue_invariant(next_invariant);
995 (ATOMICALLY_FRAME_FIELDS(,,info_ptr,p1,p2,
996 code,next_invariant,frame_result))
997 (StgAtomicInvariant_code(q));
1001 /* We've got no more invariants to check, try to commit */
1002 (valid) = ccall stmCommitTransaction(MyCapability() "ptr", trec "ptr");
1004 /* Transaction was valid: commit succeeded */
1005 StgTSO_trec(CurrentTSO) = NO_TREC;
1006 return (frame_result);
1008 /* Transaction was not valid: try again */
1009 ("ptr" trec) = ccall stmStartTransaction(MyCapability() "ptr",
1011 StgTSO_trec(CurrentTSO) = trec;
1012 next_invariant = END_INVARIANT_CHECK_QUEUE;
1015 // push the StgAtomicallyFrame again: the code generator is
1016 // clever enough to only assign the fields that have changed.
1017 (ATOMICALLY_FRAME_FIELDS(,,info_ptr,p1,p2,
1018 code,next_invariant,frame_result))
1025 INFO_TABLE_RET(stg_atomically_waiting_frame, ATOMICALLY_FRAME,
1026 // layout of the frame, and bind the field names
1027 ATOMICALLY_FRAME_FIELDS(W_,P_,
1032 return (/* no return values */)
1036 /* The TSO is currently waiting: should we stop waiting? */
1037 (valid) = ccall stmReWait(MyCapability() "ptr", CurrentTSO "ptr");
1039 /* Previous attempt is still valid: no point trying again yet */
1040 jump stg_block_noregs
1041 (ATOMICALLY_FRAME_FIELDS(,,info_ptr, p1, p2,
1042 code,next_invariant,frame_result))
1045 /* Previous attempt is no longer valid: try again */
1046 ("ptr" trec) = ccall stmStartTransaction(MyCapability() "ptr", NO_TREC "ptr");
1047 StgTSO_trec(CurrentTSO) = trec;
1049 // change the frame header to stg_atomically_frame_info
1051 (ATOMICALLY_FRAME_FIELDS(,,stg_atomically_frame_info, p1, p2,
1052 code,next_invariant,frame_result))
1057 // STM catch frame -------------------------------------------------------------
1059 /* Catch frames are very similar to update frames, but when entering
1060 * one we just pop the frame off the stack and perform the correct
1061 * kind of return to the activation record underneath us on the stack.
1064 #define CATCH_STM_FRAME_FIELDS(w_,p_,info_ptr,p1,p2,code,handler) \
1066 PROF_HDR_FIELDS(w_,p1,p2) \
1070 INFO_TABLE_RET(stg_catch_stm_frame, CATCH_STM_FRAME,
1071 // layout of the frame, and bind the field names
1072 CATCH_STM_FRAME_FIELDS(W_,P_,info_ptr,p1,p2,code,handler))
1077 trec = StgTSO_trec(CurrentTSO);
1078 outer = StgTRecHeader_enclosing_trec(trec);
1079 (r) = ccall stmCommitNestedTransaction(MyCapability() "ptr", trec "ptr");
1081 /* Commit succeeded */
1082 StgTSO_trec(CurrentTSO) = outer;
1087 ("ptr" new_trec) = ccall stmStartTransaction(MyCapability() "ptr", outer "ptr");
1088 StgTSO_trec(CurrentTSO) = new_trec;
1091 (CATCH_STM_FRAME_FIELDS(,,info_ptr,p1,p2,code,handler))
1097 // Primop definition -----------------------------------------------------------
1099 stg_atomicallyzh (P_ stm)
1103 P_ code, next_invariant, frame_result;
1105 // stmStartTransaction may allocate
1106 MAYBE_GC_P(stg_atomicallyzh, stm);
1110 old_trec = StgTSO_trec(CurrentTSO);
1112 /* Nested transactions are not allowed; raise an exception */
1113 if (old_trec != NO_TREC) {
1114 jump stg_raisezh(base_ControlziExceptionziBase_nestedAtomically_closure);
1118 next_invariant = END_INVARIANT_CHECK_QUEUE;
1119 frame_result = NO_TREC;
1121 /* Start the memory transcation */
1122 ("ptr" new_trec) = ccall stmStartTransaction(MyCapability() "ptr", old_trec "ptr");
1123 StgTSO_trec(CurrentTSO) = new_trec;
1126 (ATOMICALLY_FRAME_FIELDS(,,stg_atomically_frame_info, CCCS, 0,
1127 code,next_invariant,frame_result))
1131 // A closure representing "atomically x". This is used when a thread
1132 // inside a transaction receives an asynchronous exception; see #5866.
1133 // It is somewhat similar to the stg_raise closure.
1135 INFO_TABLE(stg_atomically,1,0,THUNK_1_0,"atomically","atomically")
1138 jump stg_atomicallyzh(StgThunk_payload(thunk,0));
1142 stg_catchSTMzh (P_ code /* :: STM a */,
1143 P_ handler /* :: Exception -> STM a */)
1147 /* Start a nested transaction to run the body of the try block in */
1150 cur_trec = StgTSO_trec(CurrentTSO);
1151 ("ptr" new_trec) = ccall stmStartTransaction(MyCapability() "ptr",
1153 StgTSO_trec(CurrentTSO) = new_trec;
1156 (CATCH_STM_FRAME_FIELDS(,,stg_catch_stm_frame_info, CCCS, 0,
1162 stg_catchRetryzh (P_ first_code, /* :: STM a */
1163 P_ alt_code /* :: STM a */)
1167 // stmStartTransaction may allocate
1168 MAYBE_GC_PP (stg_catchRetryzh, first_code, alt_code);
1172 /* Start a nested transaction within which to run the first code */
1173 ("ptr" new_trec) = ccall stmStartTransaction(MyCapability() "ptr",
1174 StgTSO_trec(CurrentTSO) "ptr");
1175 StgTSO_trec(CurrentTSO) = new_trec;
1177 // push the CATCH_RETRY stack frame, and apply first_code to realWorld#
1179 (CATCH_RETRY_FRAME_FIELDS(,, stg_catch_retry_frame_info, CCCS, 0,
1180 0, /* not running_alt_code */
1187 stg_retryzh /* no arg list: explicit stack layout */
1195 // STM operations may allocate
1196 MAYBE_GC_ (stg_retryzh); // NB. not MAYBE_GC(), we cannot make a
1197 // function call in an explicit-stack proc
1199 // Find the enclosing ATOMICALLY_FRAME or CATCH_RETRY_FRAME
1201 SAVE_THREAD_STATE();
1202 (frame_type) = ccall findRetryFrameHelper(MyCapability(), CurrentTSO "ptr");
1203 LOAD_THREAD_STATE();
1205 trec = StgTSO_trec(CurrentTSO);
1206 outer = StgTRecHeader_enclosing_trec(trec);
1208 if (frame_type == CATCH_RETRY_FRAME) {
1209 // The retry reaches a CATCH_RETRY_FRAME before the atomic frame
1210 ASSERT(outer != NO_TREC);
1211 // Abort the transaction attempting the current branch
1212 ccall stmAbortTransaction(MyCapability() "ptr", trec "ptr");
1213 ccall stmFreeAbortedTRec(MyCapability() "ptr", trec "ptr");
1214 if (!StgCatchRetryFrame_running_alt_code(frame) != 0) {
1215 // Retry in the first branch: try the alternative
1216 ("ptr" trec) = ccall stmStartTransaction(MyCapability() "ptr", outer "ptr");
1217 StgTSO_trec(CurrentTSO) = trec;
1218 StgCatchRetryFrame_running_alt_code(frame) = 1 :: CInt; // true;
1219 R1 = StgCatchRetryFrame_alt_code(frame);
1220 jump stg_ap_v_fast [R1];
1222 // Retry in the alternative code: propagate the retry
1223 StgTSO_trec(CurrentTSO) = outer;
1224 Sp = Sp + SIZEOF_StgCatchRetryFrame;
1225 goto retry_pop_stack;
1229 // We've reached the ATOMICALLY_FRAME: attempt to wait
1230 ASSERT(frame_type == ATOMICALLY_FRAME);
1231 if (outer != NO_TREC) {
1232 // We called retry while checking invariants, so abort the current
1233 // invariant check (merging its TVar accesses into the parents read
1234 // set so we'll wait on them)
1235 ccall stmAbortTransaction(MyCapability() "ptr", trec "ptr");
1236 ccall stmFreeAbortedTRec(MyCapability() "ptr", trec "ptr");
1238 StgTSO_trec(CurrentTSO) = trec;
1239 outer = StgTRecHeader_enclosing_trec(trec);
1241 ASSERT(outer == NO_TREC);
1243 (r) = ccall stmWait(MyCapability() "ptr", CurrentTSO "ptr", trec "ptr");
1245 // Transaction was valid: stmWait put us on the TVars' queues, we now block
1246 StgHeader_info(frame) = stg_atomically_waiting_frame_info;
1248 R3 = trec; // passing to stmWaitUnblock()
1249 jump stg_block_stmwait [R3];
1251 // Transaction was not valid: retry immediately
1252 ("ptr" trec) = ccall stmStartTransaction(MyCapability() "ptr", outer "ptr");
1253 StgTSO_trec(CurrentTSO) = trec;
1255 R1 = StgAtomicallyFrame_code(frame);
1256 jump stg_ap_v_fast [R1];
1260 stg_checkzh (P_ closure /* STM a */)
1264 MAYBE_GC_P (stg_checkzh, closure);
1266 trec = StgTSO_trec(CurrentTSO);
1267 ccall stmAddInvariantToCheck(MyCapability() "ptr",
1274 stg_newTVarzh (P_ init)
1278 ALLOC_PRIM_P (SIZEOF_StgTVar, stg_newTVarzh, init);
1280 tv = Hp - SIZEOF_StgTVar + WDS(1);
1281 SET_HDR (tv, stg_TVAR_DIRTY_info, CCCS);
1283 StgTVar_current_value(tv) = init;
1284 StgTVar_first_watch_queue_entry(tv) = stg_END_STM_WATCH_QUEUE_closure;
1285 StgTVar_num_updates(tv) = 0;
1291 stg_readTVarzh (P_ tvar)
1296 // Call to stmReadTVar may allocate
1297 MAYBE_GC_P (stg_readTVarzh, tvar);
1299 trec = StgTSO_trec(CurrentTSO);
1300 ("ptr" result) = ccall stmReadTVar(MyCapability() "ptr", trec "ptr",
1305 stg_readTVarIOzh ( P_ tvar /* :: TVar a */ )
1310 result = StgTVar_current_value(tvar);
1311 if (%INFO_PTR(result) == stg_TREC_HEADER_info) {
1317 stg_writeTVarzh (P_ tvar, /* :: TVar a */
1318 P_ new_value /* :: a */)
1322 // Call to stmWriteTVar may allocate
1323 MAYBE_GC_PP (stg_writeTVarzh, tvar, new_value);
1325 trec = StgTSO_trec(CurrentTSO);
1326 ccall stmWriteTVar(MyCapability() "ptr", trec "ptr", tvar "ptr",
1332 /* -----------------------------------------------------------------------------
1335 * take & putMVar work as follows. Firstly, an important invariant:
1337 * If the MVar is full, then the blocking queue contains only
1338 * threads blocked on putMVar, and if the MVar is empty then the
1339 * blocking queue contains only threads blocked on takeMVar.
1342 * MVar empty : then add ourselves to the blocking queue
1343 * MVar full : remove the value from the MVar, and
1344 * blocking queue empty : return
1345 * blocking queue non-empty : perform the first blocked putMVar
1346 * from the queue, and wake up the
1347 * thread (MVar is now full again)
1349 * putMVar is just the dual of the above algorithm.
1351 * How do we "perform a putMVar"? Well, we have to fiddle around with
1352 * the stack of the thread waiting to do the putMVar. See
1353 * stg_block_putmvar and stg_block_takemvar in HeapStackCheck.c for
1354 * the stack layout, and the PerformPut and PerformTake macros below.
1356 * It is important that a blocked take or put is woken up with the
1357 * take/put already performed, because otherwise there would be a
1358 * small window of vulnerability where the thread could receive an
1359 * exception and never perform its take or put, and we'd end up with a
1362 * -------------------------------------------------------------------------- */
1364 stg_isEmptyMVarzh ( P_ mvar /* :: MVar a */ )
1366 if (StgMVar_value(mvar) == stg_END_TSO_QUEUE_closure) {
1377 ALLOC_PRIM_ (SIZEOF_StgMVar, stg_newMVarzh);
1379 mvar = Hp - SIZEOF_StgMVar + WDS(1);
1380 SET_HDR(mvar,stg_MVAR_DIRTY_info,CCCS);
1381 // MVARs start dirty: generation 0 has no mutable list
1382 StgMVar_head(mvar) = stg_END_TSO_QUEUE_closure;
1383 StgMVar_tail(mvar) = stg_END_TSO_QUEUE_closure;
1384 StgMVar_value(mvar) = stg_END_TSO_QUEUE_closure;
1389 #define PerformTake(stack, value) \
1391 sp = StgStack_sp(stack); \
1392 W_[sp + WDS(1)] = value; \
1393 W_[sp + WDS(0)] = stg_ret_p_info;
1395 #define PerformPut(stack,lval) \
1397 sp = StgStack_sp(stack) + WDS(3); \
1398 StgStack_sp(stack) = sp; \
1399 lval = W_[sp - WDS(1)];
1402 stg_takeMVarzh ( P_ mvar /* :: MVar a */ )
1404 W_ val, info, tso, q;
1406 LOCK_CLOSURE(mvar, info);
1408 /* If the MVar is empty, put ourselves on its blocking queue,
1409 * and wait until we're woken up.
1411 if (StgMVar_value(mvar) == stg_END_TSO_QUEUE_closure) {
1412 if (info == stg_MVAR_CLEAN_info) {
1413 ccall dirty_MVAR(BaseReg "ptr", mvar "ptr");
1416 // We want to put the heap check down here in the slow path,
1417 // but be careful to unlock the closure before returning to
1418 // the RTS if the check fails.
1419 ALLOC_PRIM_WITH_CUSTOM_FAILURE
1420 (SIZEOF_StgMVarTSOQueue,
1421 unlockClosure(mvar, stg_MVAR_DIRTY_info);
1422 GC_PRIM_P(stg_takeMVarzh, mvar));
1424 q = Hp - SIZEOF_StgMVarTSOQueue + WDS(1);
1426 SET_HDR(q, stg_MVAR_TSO_QUEUE_info, CCS_SYSTEM);
1427 StgMVarTSOQueue_link(q) = END_TSO_QUEUE;
1428 StgMVarTSOQueue_tso(q) = CurrentTSO;
1430 if (StgMVar_head(mvar) == stg_END_TSO_QUEUE_closure) {
1431 StgMVar_head(mvar) = q;
1433 StgMVarTSOQueue_link(StgMVar_tail(mvar)) = q;
1434 ccall recordClosureMutated(MyCapability() "ptr",
1435 StgMVar_tail(mvar));
1437 StgTSO__link(CurrentTSO) = q;
1438 StgTSO_block_info(CurrentTSO) = mvar;
1439 StgTSO_why_blocked(CurrentTSO) = BlockedOnMVar::I16;
1440 StgMVar_tail(mvar) = q;
1442 jump stg_block_takemvar(mvar);
1445 /* we got the value... */
1446 val = StgMVar_value(mvar);
1448 q = StgMVar_head(mvar);
1450 if (q == stg_END_TSO_QUEUE_closure) {
1451 /* No further putMVars, MVar is now empty */
1452 StgMVar_value(mvar) = stg_END_TSO_QUEUE_closure;
1453 // If the MVar is not already dirty, then we don't need to make
1454 // it dirty, as it is empty with nothing blocking on it.
1455 unlockClosure(mvar, info);
1458 if (StgHeader_info(q) == stg_IND_info ||
1459 StgHeader_info(q) == stg_MSG_NULL_info) {
1460 q = StgInd_indirectee(q);
1464 // There are putMVar(s) waiting... wake up the first thread on the queue
1466 if (info == stg_MVAR_CLEAN_info) {
1467 ccall dirty_MVAR(BaseReg "ptr", mvar "ptr");
1470 tso = StgMVarTSOQueue_tso(q);
1471 StgMVar_head(mvar) = StgMVarTSOQueue_link(q);
1472 if (StgMVar_head(mvar) == stg_END_TSO_QUEUE_closure) {
1473 StgMVar_tail(mvar) = stg_END_TSO_QUEUE_closure;
1476 ASSERT(StgTSO_why_blocked(tso) == BlockedOnMVar::I16);
1477 ASSERT(StgTSO_block_info(tso) == mvar);
1479 // actually perform the putMVar for the thread that we just woke up
1481 stack = StgTSO_stackobj(tso);
1482 PerformPut(stack, StgMVar_value(mvar));
1484 // indicate that the MVar operation has now completed.
1485 StgTSO__link(tso) = stg_END_TSO_QUEUE_closure;
1487 // no need to mark the TSO dirty, we have only written END_TSO_QUEUE.
1489 ccall tryWakeupThread(MyCapability() "ptr", tso);
1491 unlockClosure(mvar, stg_MVAR_DIRTY_info);
1495 stg_tryTakeMVarzh ( P_ mvar /* :: MVar a */ )
1497 W_ val, info, tso, q;
1499 LOCK_CLOSURE(mvar, info);
1501 /* If the MVar is empty, return 0. */
1502 if (StgMVar_value(mvar) == stg_END_TSO_QUEUE_closure) {
1503 #if defined(THREADED_RTS)
1504 unlockClosure(mvar, info);
1506 /* HACK: we need a pointer to pass back,
1507 * so we abuse NO_FINALIZER_closure
1509 return (0, stg_NO_FINALIZER_closure);
1512 /* we got the value... */
1513 val = StgMVar_value(mvar);
1515 q = StgMVar_head(mvar);
1517 if (q == stg_END_TSO_QUEUE_closure) {
1518 /* No further putMVars, MVar is now empty */
1519 StgMVar_value(mvar) = stg_END_TSO_QUEUE_closure;
1520 unlockClosure(mvar, info);
1524 if (StgHeader_info(q) == stg_IND_info ||
1525 StgHeader_info(q) == stg_MSG_NULL_info) {
1526 q = StgInd_indirectee(q);
1530 // There are putMVar(s) waiting... wake up the first thread on the queue
1532 if (info == stg_MVAR_CLEAN_info) {
1533 ccall dirty_MVAR(BaseReg "ptr", mvar "ptr");
1536 tso = StgMVarTSOQueue_tso(q);
1537 StgMVar_head(mvar) = StgMVarTSOQueue_link(q);
1538 if (StgMVar_head(mvar) == stg_END_TSO_QUEUE_closure) {
1539 StgMVar_tail(mvar) = stg_END_TSO_QUEUE_closure;
1542 ASSERT(StgTSO_why_blocked(tso) == BlockedOnMVar::I16);
1543 ASSERT(StgTSO_block_info(tso) == mvar);
1545 // actually perform the putMVar for the thread that we just woke up
1547 stack = StgTSO_stackobj(tso);
1548 PerformPut(stack, StgMVar_value(mvar));
1550 // indicate that the MVar operation has now completed.
1551 StgTSO__link(tso) = stg_END_TSO_QUEUE_closure;
1553 // no need to mark the TSO dirty, we have only written END_TSO_QUEUE.
1555 ccall tryWakeupThread(MyCapability() "ptr", tso);
1557 unlockClosure(mvar, stg_MVAR_DIRTY_info);
1561 stg_putMVarzh ( P_ mvar, /* :: MVar a */
1562 P_ val, /* :: a */ )
1566 LOCK_CLOSURE(mvar, info);
1568 if (StgMVar_value(mvar) != stg_END_TSO_QUEUE_closure) {
1570 if (info == stg_MVAR_CLEAN_info) {
1571 ccall dirty_MVAR(BaseReg "ptr", mvar "ptr");
1574 // We want to put the heap check down here in the slow path,
1575 // but be careful to unlock the closure before returning to
1576 // the RTS if the check fails.
1577 ALLOC_PRIM_WITH_CUSTOM_FAILURE
1578 (SIZEOF_StgMVarTSOQueue,
1579 unlockClosure(mvar, stg_MVAR_DIRTY_info);
1580 GC_PRIM_PP(stg_putMVarzh, mvar, val));
1582 q = Hp - SIZEOF_StgMVarTSOQueue + WDS(1);
1584 SET_HDR(q, stg_MVAR_TSO_QUEUE_info, CCS_SYSTEM);
1585 StgMVarTSOQueue_link(q) = END_TSO_QUEUE;
1586 StgMVarTSOQueue_tso(q) = CurrentTSO;
1588 if (StgMVar_head(mvar) == stg_END_TSO_QUEUE_closure) {
1589 StgMVar_head(mvar) = q;
1591 StgMVarTSOQueue_link(StgMVar_tail(mvar)) = q;
1592 ccall recordClosureMutated(MyCapability() "ptr",
1593 StgMVar_tail(mvar));
1595 StgTSO__link(CurrentTSO) = q;
1596 StgTSO_block_info(CurrentTSO) = mvar;
1597 StgTSO_why_blocked(CurrentTSO) = BlockedOnMVar::I16;
1598 StgMVar_tail(mvar) = q;
1600 jump stg_block_putmvar(mvar,val);
1603 q = StgMVar_head(mvar);
1605 if (q == stg_END_TSO_QUEUE_closure) {
1606 /* No further takes, the MVar is now full. */
1607 if (info == stg_MVAR_CLEAN_info) {
1608 ccall dirty_MVAR(BaseReg "ptr", mvar "ptr");
1610 StgMVar_value(mvar) = val;
1611 unlockClosure(mvar, stg_MVAR_DIRTY_info);
1614 if (StgHeader_info(q) == stg_IND_info ||
1615 StgHeader_info(q) == stg_MSG_NULL_info) {
1616 q = StgInd_indirectee(q);
1620 // There are readMVar/takeMVar(s) waiting: wake up the first one
1622 tso = StgMVarTSOQueue_tso(q);
1623 StgMVar_head(mvar) = StgMVarTSOQueue_link(q);
1624 if (StgMVar_head(mvar) == stg_END_TSO_QUEUE_closure) {
1625 StgMVar_tail(mvar) = stg_END_TSO_QUEUE_closure;
1628 ASSERT(StgTSO_block_info(tso) == mvar);
1629 // save why_blocked here, because waking up the thread destroys
1632 why_blocked = TO_W_(StgTSO_why_blocked(tso));
1634 // actually perform the takeMVar
1636 stack = StgTSO_stackobj(tso);
1637 PerformTake(stack, val);
1639 // indicate that the MVar operation has now completed.
1640 StgTSO__link(tso) = stg_END_TSO_QUEUE_closure;
1642 if (TO_W_(StgStack_dirty(stack)) == 0) {
1643 ccall dirty_STACK(MyCapability() "ptr", stack "ptr");
1646 ccall tryWakeupThread(MyCapability() "ptr", tso);
1648 // If it was an readMVar, then we can still do work,
1649 // so loop back. (XXX: This could take a while)
1650 if (why_blocked == BlockedOnMVarRead) {
1651 q = StgMVarTSOQueue_link(q);
1655 ASSERT(why_blocked == BlockedOnMVar);
1657 unlockClosure(mvar, info);
1662 stg_tryPutMVarzh ( P_ mvar, /* :: MVar a */
1663 P_ val, /* :: a */ )
1667 LOCK_CLOSURE(mvar, info);
1669 if (StgMVar_value(mvar) != stg_END_TSO_QUEUE_closure) {
1670 #if defined(THREADED_RTS)
1671 unlockClosure(mvar, info);
1676 q = StgMVar_head(mvar);
1678 if (q == stg_END_TSO_QUEUE_closure) {
1679 /* No further takes, the MVar is now full. */
1680 if (info == stg_MVAR_CLEAN_info) {
1681 ccall dirty_MVAR(BaseReg "ptr", mvar "ptr");
1684 StgMVar_value(mvar) = val;
1685 unlockClosure(mvar, stg_MVAR_DIRTY_info);
1688 if (StgHeader_info(q) == stg_IND_info ||
1689 StgHeader_info(q) == stg_MSG_NULL_info) {
1690 q = StgInd_indirectee(q);
1694 // There are takeMVar(s) waiting: wake up the first one
1696 tso = StgMVarTSOQueue_tso(q);
1697 StgMVar_head(mvar) = StgMVarTSOQueue_link(q);
1698 if (StgMVar_head(mvar) == stg_END_TSO_QUEUE_closure) {
1699 StgMVar_tail(mvar) = stg_END_TSO_QUEUE_closure;
1702 ASSERT(StgTSO_block_info(tso) == mvar);
1703 // save why_blocked here, because waking up the thread destroys
1706 why_blocked = TO_W_(StgTSO_why_blocked(tso));
1708 // actually perform the takeMVar
1710 stack = StgTSO_stackobj(tso);
1711 PerformTake(stack, val);
1713 // indicate that the MVar operation has now completed.
1714 StgTSO__link(tso) = stg_END_TSO_QUEUE_closure;
1716 if (TO_W_(StgStack_dirty(stack)) == 0) {
1717 ccall dirty_STACK(MyCapability() "ptr", stack "ptr");
1720 ccall tryWakeupThread(MyCapability() "ptr", tso);
1722 // If it was an readMVar, then we can still do work,
1723 // so loop back. (XXX: This could take a while)
1724 if (why_blocked == BlockedOnMVarRead) {
1725 q = StgMVarTSOQueue_link(q);
1729 ASSERT(why_blocked == BlockedOnMVar);
1731 unlockClosure(mvar, info);
1735 stg_readMVarzh ( P_ mvar, /* :: MVar a */ )
1737 W_ val, info, tso, q;
1739 LOCK_CLOSURE(mvar, info);
1741 /* If the MVar is empty, put ourselves on the blocked readers
1742 * list and wait until we're woken up.
1744 if (StgMVar_value(mvar) == stg_END_TSO_QUEUE_closure) {
1746 if (info == stg_MVAR_CLEAN_info) {
1747 ccall dirty_MVAR(BaseReg "ptr", mvar "ptr");
1750 ALLOC_PRIM_WITH_CUSTOM_FAILURE
1751 (SIZEOF_StgMVarTSOQueue,
1752 unlockClosure(mvar, stg_MVAR_DIRTY_info);
1753 GC_PRIM_P(stg_readMVarzh, mvar));
1755 q = Hp - SIZEOF_StgMVarTSOQueue + WDS(1);
1757 // readMVars are pushed to the front of the queue, so
1758 // they get handled immediately
1759 SET_HDR(q, stg_MVAR_TSO_QUEUE_info, CCS_SYSTEM);
1760 StgMVarTSOQueue_link(q) = StgMVar_head(mvar);
1761 StgMVarTSOQueue_tso(q) = CurrentTSO;
1763 StgTSO__link(CurrentTSO) = q;
1764 StgTSO_block_info(CurrentTSO) = mvar;
1765 StgTSO_why_blocked(CurrentTSO) = BlockedOnMVarRead::I16;
1766 StgMVar_head(mvar) = q;
1768 if (StgMVar_tail(mvar) == stg_END_TSO_QUEUE_closure) {
1769 StgMVar_tail(mvar) = q;
1772 jump stg_block_readmvar(mvar);
1775 val = StgMVar_value(mvar);
1777 unlockClosure(mvar, info);
1781 stg_tryReadMVarzh ( P_ mvar, /* :: MVar a */ )
1783 W_ val, info, tso, q;
1785 LOCK_CLOSURE(mvar, info);
1787 if (StgMVar_value(mvar) == stg_END_TSO_QUEUE_closure) {
1788 return (0, stg_NO_FINALIZER_closure);
1791 val = StgMVar_value(mvar);
1793 unlockClosure(mvar, info);
1797 /* -----------------------------------------------------------------------------
1798 Stable pointer primitives
1799 ------------------------------------------------------------------------- */
1801 stg_makeStableNamezh ( P_ obj )
1805 (index) = ccall lookupStableName(obj "ptr");
1807 /* Is there already a StableName for this heap object?
1808 * stable_name_table is a pointer to an array of snEntry structs.
1810 if ( snEntry_sn_obj(W_[stable_name_table] + index*SIZEOF_snEntry) == NULL ) {
1811 ALLOC_PRIM (SIZEOF_StgStableName);
1812 sn_obj = Hp - SIZEOF_StgStableName + WDS(1);
1813 SET_HDR(sn_obj, stg_STABLE_NAME_info, CCCS);
1814 StgStableName_sn(sn_obj) = index;
1815 snEntry_sn_obj(W_[stable_name_table] + index*SIZEOF_snEntry) = sn_obj;
1817 sn_obj = snEntry_sn_obj(W_[stable_name_table] + index*SIZEOF_snEntry);
1823 stg_makeStablePtrzh ( P_ obj )
1827 ("ptr" sp) = ccall getStablePtr(obj "ptr");
1831 stg_deRefStablePtrzh ( P_ sp )
1834 r = spEntry_addr(W_[stable_ptr_table] + sp*SIZEOF_spEntry);
1838 /* -----------------------------------------------------------------------------
1839 Bytecode object primitives
1840 ------------------------------------------------------------------------- */
1842 stg_newBCOzh ( P_ instrs,
1848 W_ bco, bytes, words;
1850 words = BYTES_TO_WDS(SIZEOF_StgBCO) + BYTE_ARR_WDS(bitmap_arr);
1855 bco = Hp - bytes + WDS(1);
1856 SET_HDR(bco, stg_BCO_info, CCCS);
1858 StgBCO_instrs(bco) = instrs;
1859 StgBCO_literals(bco) = literals;
1860 StgBCO_ptrs(bco) = ptrs;
1861 StgBCO_arity(bco) = HALF_W_(arity);
1862 StgBCO_size(bco) = HALF_W_(words);
1864 // Copy the arity/bitmap info into the BCO
1868 if (i < BYTE_ARR_WDS(bitmap_arr)) {
1869 StgBCO_bitmap(bco,i) = StgArrWords_payload(bitmap_arr,i);
1877 stg_mkApUpd0zh ( P_ bco )
1881 // This function is *only* used to wrap zero-arity BCOs in an
1882 // updatable wrapper (see ByteCodeLink.lhs). An AP thunk is always
1883 // saturated and always points directly to a FUN or BCO.
1884 ASSERT(%INFO_TYPE(%GET_STD_INFO(bco)) == HALF_W_(BCO) &&
1885 StgBCO_arity(bco) == HALF_W_(0));
1887 HP_CHK_P(SIZEOF_StgAP, stg_mkApUpd0zh, bco);
1888 TICK_ALLOC_UP_THK(0, 0);
1889 CCCS_ALLOC(SIZEOF_StgAP);
1891 ap = Hp - SIZEOF_StgAP + WDS(1);
1892 SET_HDR(ap, stg_AP_info, CCCS);
1894 StgAP_n_args(ap) = HALF_W_(0);
1895 StgAP_fun(ap) = bco;
1900 stg_unpackClosurezh ( P_ closure )
1902 // TODO: Consider the absence of ptrs or nonptrs as a special case ?
1904 W_ info, ptrs, nptrs, p, ptrs_arr, nptrs_arr;
1905 info = %GET_STD_INFO(UNTAG(closure));
1907 // Some closures have non-standard layout, so we omit those here.
1909 type = TO_W_(%INFO_TYPE(info));
1910 switch [0 .. N_CLOSURE_TYPES] type {
1911 case THUNK_SELECTOR : {
1916 case THUNK, THUNK_1_0, THUNK_0_1, THUNK_2_0, THUNK_1_1,
1917 THUNK_0_2, THUNK_STATIC, AP, PAP, AP_STACK, BCO : {
1923 ptrs = TO_W_(%INFO_PTRS(info));
1924 nptrs = TO_W_(%INFO_NPTRS(info));
1929 W_ ptrs_arr_sz, ptrs_arr_cards, nptrs_arr_sz;
1930 nptrs_arr_sz = SIZEOF_StgArrWords + WDS(nptrs);
1931 ptrs_arr_cards = mutArrPtrsCardWords(ptrs);
1932 ptrs_arr_sz = SIZEOF_StgMutArrPtrs + WDS(ptrs) + WDS(ptrs_arr_cards);
1934 ALLOC_PRIM_P (ptrs_arr_sz + nptrs_arr_sz, stg_unpackClosurezh, closure);
1937 clos = UNTAG(closure);
1939 ptrs_arr = Hp - nptrs_arr_sz - ptrs_arr_sz + WDS(1);
1940 nptrs_arr = Hp - nptrs_arr_sz + WDS(1);
1942 SET_HDR(ptrs_arr, stg_MUT_ARR_PTRS_FROZEN_info, CCCS);
1943 StgMutArrPtrs_ptrs(ptrs_arr) = ptrs;
1944 StgMutArrPtrs_size(ptrs_arr) = ptrs + ptrs_arr_cards;
1949 W_[ptrs_arr + SIZEOF_StgMutArrPtrs + WDS(p)] = StgClosure_payload(clos,p);
1953 /* We can leave the card table uninitialised, since the array is
1954 allocated in the nursery. The GC will fill it in if/when the array
1957 SET_HDR(nptrs_arr, stg_ARR_WORDS_info, CCCS);
1958 StgArrWords_bytes(nptrs_arr) = WDS(nptrs);
1962 W_[BYTE_ARR_CTS(nptrs_arr) + WDS(p)] = StgClosure_payload(clos, p+ptrs);
1966 return (info, ptrs_arr, nptrs_arr);
1969 /* -----------------------------------------------------------------------------
1970 Thread I/O blocking primitives
1971 -------------------------------------------------------------------------- */
1973 /* Add a thread to the end of the blocked queue. (C-- version of the C
1974 * macro in Schedule.h).
1976 #define APPEND_TO_BLOCKED_QUEUE(tso) \
1977 ASSERT(StgTSO__link(tso) == END_TSO_QUEUE); \
1978 if (W_[blocked_queue_hd] == END_TSO_QUEUE) { \
1979 W_[blocked_queue_hd] = tso; \
1981 ccall setTSOLink(MyCapability() "ptr", W_[blocked_queue_tl] "ptr", tso); \
1983 W_[blocked_queue_tl] = tso;
1985 stg_waitReadzh ( W_ fd )
1988 ccall barf("waitRead# on threaded RTS") never returns;
1991 ASSERT(StgTSO_why_blocked(CurrentTSO) == NotBlocked::I16);
1992 StgTSO_why_blocked(CurrentTSO) = BlockedOnRead::I16;
1993 StgTSO_block_info(CurrentTSO) = fd;
1994 // No locking - we're not going to use this interface in the
1995 // threaded RTS anyway.
1996 APPEND_TO_BLOCKED_QUEUE(CurrentTSO);
1997 jump stg_block_noregs();
2001 stg_waitWritezh ( W_ fd )
2004 ccall barf("waitWrite# on threaded RTS") never returns;
2007 ASSERT(StgTSO_why_blocked(CurrentTSO) == NotBlocked::I16);
2008 StgTSO_why_blocked(CurrentTSO) = BlockedOnWrite::I16;
2009 StgTSO_block_info(CurrentTSO) = fd;
2010 // No locking - we're not going to use this interface in the
2011 // threaded RTS anyway.
2012 APPEND_TO_BLOCKED_QUEUE(CurrentTSO);
2013 jump stg_block_noregs();
2018 STRING(stg_delayzh_malloc_str, "stg_delayzh")
2019 stg_delayzh ( W_ us_delay )
2021 #ifdef mingw32_HOST_OS
2029 ccall barf("delay# on threaded RTS") never returns;
2032 ASSERT(StgTSO_why_blocked(CurrentTSO) == NotBlocked::I16);
2033 StgTSO_why_blocked(CurrentTSO) = BlockedOnDelay::I16;
2035 #ifdef mingw32_HOST_OS
2037 /* could probably allocate this on the heap instead */
2038 ("ptr" ares) = ccall stgMallocBytes(SIZEOF_StgAsyncIOResult,
2039 stg_delayzh_malloc_str);
2040 (reqID) = ccall addDelayRequest(us_delay);
2041 StgAsyncIOResult_reqID(ares) = reqID;
2042 StgAsyncIOResult_len(ares) = 0;
2043 StgAsyncIOResult_errCode(ares) = 0;
2044 StgTSO_block_info(CurrentTSO) = ares;
2046 /* Having all async-blocked threads reside on the blocked_queue
2047 * simplifies matters, so change the status to OnDoProc put the
2048 * delayed thread on the blocked_queue.
2050 StgTSO_why_blocked(CurrentTSO) = BlockedOnDoProc::I16;
2051 APPEND_TO_BLOCKED_QUEUE(CurrentTSO);
2052 jump stg_block_async_void();
2057 (target) = ccall getDelayTarget(us_delay);
2059 StgTSO_block_info(CurrentTSO) = target;
2061 /* Insert the new thread in the sleeping queue. */
2063 t = W_[sleeping_queue];
2065 if (t != END_TSO_QUEUE && StgTSO_block_info(t) < target) {
2067 t = StgTSO__link(t);
2071 StgTSO__link(CurrentTSO) = t;
2073 W_[sleeping_queue] = CurrentTSO;
2075 ccall setTSOLink(MyCapability() "ptr", prev "ptr", CurrentTSO);
2077 jump stg_block_noregs();
2079 #endif /* !THREADED_RTS */
2083 #ifdef mingw32_HOST_OS
2084 STRING(stg_asyncReadzh_malloc_str, "stg_asyncReadzh")
2085 stg_asyncReadzh ( W_ fd, W_ is_sock, W_ len, W_ buf )
2091 ccall barf("asyncRead# on threaded RTS") never returns;
2094 ASSERT(StgTSO_why_blocked(CurrentTSO) == NotBlocked::I16);
2095 StgTSO_why_blocked(CurrentTSO) = BlockedOnRead::I16;
2097 /* could probably allocate this on the heap instead */
2098 ("ptr" ares) = ccall stgMallocBytes(SIZEOF_StgAsyncIOResult,
2099 stg_asyncReadzh_malloc_str);
2100 (reqID) = ccall addIORequest(fd, 0/*FALSE*/,is_sock,len,buf "ptr");
2101 StgAsyncIOResult_reqID(ares) = reqID;
2102 StgAsyncIOResult_len(ares) = 0;
2103 StgAsyncIOResult_errCode(ares) = 0;
2104 StgTSO_block_info(CurrentTSO) = ares;
2105 APPEND_TO_BLOCKED_QUEUE(CurrentTSO);
2106 jump stg_block_async();
2110 STRING(stg_asyncWritezh_malloc_str, "stg_asyncWritezh")
2111 stg_asyncWritezh ( W_ fd, W_ is_sock, W_ len, W_ buf )
2117 ccall barf("asyncWrite# on threaded RTS") never returns;
2120 ASSERT(StgTSO_why_blocked(CurrentTSO) == NotBlocked::I16);
2121 StgTSO_why_blocked(CurrentTSO) = BlockedOnWrite::I16;
2123 ("ptr" ares) = ccall stgMallocBytes(SIZEOF_StgAsyncIOResult,
2124 stg_asyncWritezh_malloc_str);
2125 (reqID) = ccall addIORequest(fd, 1/*TRUE*/,is_sock,len,buf "ptr");
2127 StgAsyncIOResult_reqID(ares) = reqID;
2128 StgAsyncIOResult_len(ares) = 0;
2129 StgAsyncIOResult_errCode(ares) = 0;
2130 StgTSO_block_info(CurrentTSO) = ares;
2131 APPEND_TO_BLOCKED_QUEUE(CurrentTSO);
2132 jump stg_block_async();
2136 STRING(stg_asyncDoProczh_malloc_str, "stg_asyncDoProczh")
2137 stg_asyncDoProczh ( W_ proc, W_ param )
2143 ccall barf("asyncDoProc# on threaded RTS") never returns;
2146 ASSERT(StgTSO_why_blocked(CurrentTSO) == NotBlocked::I16);
2147 StgTSO_why_blocked(CurrentTSO) = BlockedOnDoProc::I16;
2149 /* could probably allocate this on the heap instead */
2150 ("ptr" ares) = ccall stgMallocBytes(SIZEOF_StgAsyncIOResult,
2151 stg_asyncDoProczh_malloc_str);
2152 (reqID) = ccall addDoProcRequest(proc "ptr",param "ptr");
2153 StgAsyncIOResult_reqID(ares) = reqID;
2154 StgAsyncIOResult_len(ares) = 0;
2155 StgAsyncIOResult_errCode(ares) = 0;
2156 StgTSO_block_info(CurrentTSO) = ares;
2157 APPEND_TO_BLOCKED_QUEUE(CurrentTSO);
2158 jump stg_block_async();
2163 /* -----------------------------------------------------------------------------
2166 * noDuplicate# tries to ensure that none of the thunks under
2167 * evaluation by the current thread are also under evaluation by
2168 * another thread. It relies on *both* threads doing noDuplicate#;
2169 * the second one will get blocked if they are duplicating some work.
2171 * The idea is that noDuplicate# is used within unsafePerformIO to
2172 * ensure that the IO operation is performed at most once.
2173 * noDuplicate# calls threadPaused which acquires an exclusive lock on
2174 * all the thunks currently under evaluation by the current thread.
2176 * Consider the following scenario. There is a thunk A, whose
2177 * evaluation requires evaluating thunk B, where thunk B is an
2178 * unsafePerformIO. Two threads, 1 and 2, bother enter A. Thread 2
2179 * is pre-empted before it enters B, and claims A by blackholing it
2180 * (in threadPaused). Thread 1 now enters B, and calls noDuplicate#.
2183 * +-----------+ +---------------+
2184 * | -------+-----> A <-------+------- |
2185 * | update | BLACKHOLE | marked_update |
2186 * +-----------+ +---------------+
2189 * | | +---------------+
2192 * | update | BLACKHOLE
2195 * At this point: A is a blackhole, owned by thread 2. noDuplicate#
2196 * calls threadPaused, which walks up the stack and
2197 * - claims B on behalf of thread 1
2198 * - then it reaches the update frame for A, which it sees is already
2199 * a BLACKHOLE and is therefore owned by another thread. Since
2200 * thread 1 is duplicating work, the computation up to the update
2201 * frame for A is suspended, including thunk B.
2202 * - thunk B, which is an unsafePerformIO, has now been reverted to
2203 * an AP_STACK which could be duplicated - BAD!
2204 * - The solution is as follows: before calling threadPaused, we
2205 * leave a frame on the stack (stg_noDuplicate_info) that will call
2206 * noDuplicate# again if the current computation is suspended and
2209 * See the test program in concurrent/prog003 for a way to demonstrate
2210 * this. It needs to be run with +RTS -N3 or greater, and the bug
2211 * only manifests occasionally (once very 10 runs or so).
2212 * -------------------------------------------------------------------------- */
2214 INFO_TABLE_RET(stg_noDuplicate, RET_SMALL, W_ info_ptr)
2215 return (/* no return values */)
2217 jump stg_noDuplicatezh();
2220 stg_noDuplicatezh /* no arg list: explicit stack layout */
2222 // With a single capability there's no chance of work duplication.
2223 if (CInt[n_capabilities] == 1 :: CInt) {
2224 jump %ENTRY_CODE(Sp(0)) [];
2227 STK_CHK_LL (WDS(1), stg_noDuplicatezh);
2229 // leave noDuplicate frame in case the current
2230 // computation is suspended and restarted (see above).
2232 Sp(0) = stg_noDuplicate_info;
2234 SAVE_THREAD_STATE();
2235 ASSERT(StgTSO_what_next(CurrentTSO) == ThreadRunGHC::I16);
2236 ccall threadPaused (MyCapability() "ptr", CurrentTSO "ptr");
2238 if (StgTSO_what_next(CurrentTSO) == ThreadKilled::I16) {
2239 jump stg_threadFinished [];
2241 LOAD_THREAD_STATE();
2242 ASSERT(StgTSO_what_next(CurrentTSO) == ThreadRunGHC::I16);
2243 // remove the stg_noDuplicate frame if it is still there.
2244 if (Sp(0) == stg_noDuplicate_info) {
2247 jump %ENTRY_CODE(Sp(0)) [];
2251 /* -----------------------------------------------------------------------------
2253 -------------------------------------------------------------------------- */
2255 stg_getApStackValzh ( P_ ap_stack, W_ offset )
2257 if (%INFO_PTR(ap_stack) == stg_AP_STACK_info) {
2258 return (1,StgAP_STACK_payload(ap_stack,offset));
2260 return (0,ap_stack);
2264 // Write the cost center stack of the first argument on stderr; return
2265 // the second. Possibly only makes sense for already evaluated
2267 stg_traceCcszh ( P_ obj, P_ ret )
2272 ccs = StgHeader_ccs(UNTAG(obj));
2273 ccall fprintCCS_stderr(ccs "ptr");
2276 jump stg_ap_0_fast(ret);
2283 #ifndef THREADED_RTS
2284 return (0,ghczmprim_GHCziTypes_False_closure);
2286 (spark) = ccall findSpark(MyCapability());
2290 return (0,ghczmprim_GHCziTypes_False_closure);
2299 (n) = ccall dequeElements(Capability_sparks(MyCapability()));
2306 stg_traceEventzh ( W_ msg )
2308 #if defined(TRACING) || defined(DEBUG)
2310 ccall traceUserMsg(MyCapability() "ptr", msg "ptr");
2312 #elif defined(DTRACE)
2316 // We should go through the macro HASKELLEVENT_USER_MSG_ENABLED from
2317 // RtsProbes.h, but that header file includes unistd.h, which doesn't
2319 #if !defined(solaris2_TARGET_OS)
2320 (enabled) = ccall __dtrace_isenabled$HaskellEvent$user__msg$v1();
2322 // Solaris' DTrace can't handle the
2323 // __dtrace_isenabled$HaskellEvent$user__msg$v1
2324 // call above. This call is just for testing whether the user__msg
2325 // probe is enabled, and is here for just performance optimization.
2326 // Since preparation for the probe is not that complex I disable usage of
2327 // this test above for Solaris and enable the probe usage manually
2328 // here. Please note that this does not mean that the probe will be
2329 // used during the runtime! You still need to enable it by consumption
2330 // in your dtrace script as you do with any other probe.
2334 ccall dtraceUserMsgWrapper(MyCapability() "ptr", msg "ptr");
2341 // Same code as stg_traceEventzh above but a different kind of event
2342 // Before changing this code, read the comments in the impl above
2343 stg_traceMarkerzh ( W_ msg )
2345 #if defined(TRACING) || defined(DEBUG)
2347 ccall traceUserMarker(MyCapability() "ptr", msg "ptr");
2349 #elif defined(DTRACE)
2353 #if !defined(solaris2_TARGET_OS)
2354 (enabled) = ccall __dtrace_isenabled$HaskellEvent$user__marker$v1();
2359 ccall dtraceUserMarkerWrapper(MyCapability() "ptr", msg "ptr");