entryHeapCheck: fix calls to stg_gc_fun and stg_gc_enter_1
[ghc.git] / compiler / codeGen / StgCmmHeap.hs
1 -----------------------------------------------------------------------------
2 --
3 -- Stg to C--: heap management functions
4 --
5 -- (c) The University of Glasgow 2004-2006
6 --
7 -----------------------------------------------------------------------------
8
9 module StgCmmHeap (
10 getVirtHp, setVirtHp, setRealHp,
11 getHpRelOffset, hpRel,
12
13 entryHeapCheck, altHeapCheck, altHeapCheckReturnsTo,
14
15 mkVirtHeapOffsets, mkVirtConstrOffsets,
16 mkStaticClosureFields, mkStaticClosure,
17
18 allocDynClosure, allocDynClosureCmm,
19 emitSetDynHdr
20 ) where
21
22 #include "HsVersions.h"
23
24 import StgSyn
25 import CLabel
26 import StgCmmLayout
27 import StgCmmUtils
28 import StgCmmMonad
29 import StgCmmProf
30 import StgCmmTicky
31 import StgCmmGran
32 import StgCmmClosure
33 import StgCmmEnv
34
35 import MkGraph
36
37 import Hoopl
38 import SMRep
39 import Cmm
40 import CmmUtils
41 import CostCentre
42 import Outputable
43 import IdInfo( CafInfo(..), mayHaveCafRefs )
44 import Module
45 import DynFlags
46 import FastString( mkFastString, fsLit )
47 import Constants
48 import Util
49
50 import Control.Monad (when)
51
52 -----------------------------------------------------------
53 -- Initialise dynamic heap objects
54 -----------------------------------------------------------
55
56 allocDynClosure
57 :: CmmInfoTable
58 -> LambdaFormInfo
59 -> CmmExpr -- Cost Centre to stick in the object
60 -> CmmExpr -- Cost Centre to blame for this alloc
61 -- (usually the same; sometimes "OVERHEAD")
62
63 -> [(NonVoid StgArg, VirtualHpOffset)] -- Offsets from start of object
64 -- ie Info ptr has offset zero.
65 -- No void args in here
66 -> FCode CmmExpr -- returns Hp+n
67
68 allocDynClosureCmm
69 :: CmmInfoTable -> LambdaFormInfo -> CmmExpr -> CmmExpr
70 -> [(CmmExpr, VirtualHpOffset)]
71 -> FCode CmmExpr -- returns Hp+n
72
73 -- allocDynClosure allocates the thing in the heap,
74 -- and modifies the virtual Hp to account for this.
75 -- The second return value is the graph that sets the value of the
76 -- returned LocalReg, which should point to the closure after executing
77 -- the graph.
78
79 -- allocDynClosure returns an (Hp+8) CmmExpr, and hence the result is
80 -- only valid until Hp is changed. The caller should assign the
81 -- result to a LocalReg if it is required to remain live.
82 --
83 -- The reason we don't assign it to a LocalReg here is that the caller
84 -- is often about to call regIdInfo, which immediately assigns the
85 -- result of allocDynClosure to a new temp in order to add the tag.
86 -- So by not generating a LocalReg here we avoid a common source of
87 -- new temporaries and save some compile time. This can be quite
88 -- significant - see test T4801.
89
90
91 allocDynClosure info_tbl lf_info use_cc _blame_cc args_w_offsets
92 = do { let (args, offsets) = unzip args_w_offsets
93 ; cmm_args <- mapM getArgAmode args -- No void args
94 ; allocDynClosureCmm info_tbl lf_info
95 use_cc _blame_cc (zip cmm_args offsets)
96 }
97
98 allocDynClosureCmm info_tbl lf_info use_cc _blame_cc amodes_w_offsets
99 = do { virt_hp <- getVirtHp
100
101 -- SAY WHAT WE ARE ABOUT TO DO
102 ; let rep = cit_rep info_tbl
103 ; tickyDynAlloc rep lf_info
104 ; profDynAlloc rep use_cc
105
106 -- FIND THE OFFSET OF THE INFO-PTR WORD
107 ; let info_offset = virt_hp + 1
108 -- info_offset is the VirtualHpOffset of the first
109 -- word of the new object
110 -- Remember, virtHp points to last allocated word,
111 -- ie 1 *before* the info-ptr word of new object.
112
113 info_ptr = CmmLit (CmmLabel (cit_lbl info_tbl))
114
115 -- ALLOCATE THE OBJECT
116 ; base <- getHpRelOffset info_offset
117 ; emitComment $ mkFastString "allocDynClosure"
118 ; emitSetDynHdr base info_ptr use_cc
119 ; let (cmm_args, offsets) = unzip amodes_w_offsets
120 ; hpStore base cmm_args offsets
121
122 -- BUMP THE VIRTUAL HEAP POINTER
123 ; dflags <- getDynFlags
124 ; setVirtHp (virt_hp + heapClosureSize dflags rep)
125
126 ; getHpRelOffset info_offset
127 }
128
129 emitSetDynHdr :: CmmExpr -> CmmExpr -> CmmExpr -> FCode ()
130 emitSetDynHdr base info_ptr ccs
131 = do dflags <- getDynFlags
132 hpStore base (header dflags) [0..]
133 where
134 header :: DynFlags -> [CmmExpr]
135 header dflags = [info_ptr] ++ dynProfHdr dflags ccs
136 -- ToDo: Gransim stuff
137 -- ToDo: Parallel stuff
138 -- No ticky header
139
140 hpStore :: CmmExpr -> [CmmExpr] -> [VirtualHpOffset] -> FCode ()
141 -- Store the item (expr,off) in base[off]
142 hpStore base vals offs
143 = emit (catAGraphs (zipWith mk_store vals offs))
144 where
145 mk_store val off = mkStore (cmmOffsetW base off) val
146
147
148 -----------------------------------------------------------
149 -- Layout of static closures
150 -----------------------------------------------------------
151
152 -- Make a static closure, adding on any extra padding needed for CAFs,
153 -- and adding a static link field if necessary.
154
155 mkStaticClosureFields
156 :: DynFlags
157 -> CmmInfoTable
158 -> CostCentreStack
159 -> CafInfo
160 -> [CmmLit] -- Payload
161 -> [CmmLit] -- The full closure
162 mkStaticClosureFields dflags info_tbl ccs caf_refs payload
163 = mkStaticClosure dflags info_lbl ccs payload padding
164 static_link_field saved_info_field
165 where
166 info_lbl = cit_lbl info_tbl
167
168 -- CAFs must have consistent layout, regardless of whether they
169 -- are actually updatable or not. The layout of a CAF is:
170 --
171 -- 3 saved_info
172 -- 2 static_link
173 -- 1 indirectee
174 -- 0 info ptr
175 --
176 -- the static_link and saved_info fields must always be in the
177 -- same place. So we use isThunkRep rather than closureUpdReqd
178 -- here:
179
180 is_caf = isThunkRep (cit_rep info_tbl)
181
182 padding
183 | not is_caf = []
184 | otherwise = ASSERT(null payload) [mkIntCLit 0]
185
186 static_link_field
187 | is_caf || staticClosureNeedsLink (mayHaveCafRefs caf_refs) info_tbl
188 = [static_link_value]
189 | otherwise
190 = []
191
192 saved_info_field
193 | is_caf = [mkIntCLit 0]
194 | otherwise = []
195
196 -- For a static constructor which has NoCafRefs, we set the
197 -- static link field to a non-zero value so the garbage
198 -- collector will ignore it.
199 static_link_value
200 | mayHaveCafRefs caf_refs = mkIntCLit 0
201 | otherwise = mkIntCLit 1 -- No CAF refs
202
203
204 mkStaticClosure :: DynFlags -> CLabel -> CostCentreStack -> [CmmLit]
205 -> [CmmLit] -> [CmmLit] -> [CmmLit] -> [CmmLit]
206 mkStaticClosure dflags info_lbl ccs payload padding static_link_field saved_info_field
207 = [CmmLabel info_lbl]
208 ++ variable_header_words
209 ++ concatMap padLitToWord payload
210 ++ padding
211 ++ static_link_field
212 ++ saved_info_field
213 where
214 variable_header_words
215 = staticGranHdr
216 ++ staticParHdr
217 ++ staticProfHdr dflags ccs
218 ++ staticTickyHdr
219
220 -- JD: Simon had ellided this padding, but without it the C back end asserts
221 -- failure. Maybe it's a bad assertion, and this padding is indeed unnecessary?
222 padLitToWord :: CmmLit -> [CmmLit]
223 padLitToWord lit = lit : padding pad_length
224 where width = typeWidth (cmmLitType lit)
225 pad_length = wORD_SIZE - widthInBytes width :: Int
226
227 padding n | n <= 0 = []
228 | n `rem` 2 /= 0 = CmmInt 0 W8 : padding (n-1)
229 | n `rem` 4 /= 0 = CmmInt 0 W16 : padding (n-2)
230 | n `rem` 8 /= 0 = CmmInt 0 W32 : padding (n-4)
231 | otherwise = CmmInt 0 W64 : padding (n-8)
232
233 -----------------------------------------------------------
234 -- Heap overflow checking
235 -----------------------------------------------------------
236
237 {- Note [Heap checks]
238 ~~~~~~~~~~~~~~~~~~
239 Heap checks come in various forms. We provide the following entry
240 points to the runtime system, all of which use the native C-- entry
241 convention.
242
243 * gc() performs garbage collection and returns
244 nothing to its caller
245
246 * A series of canned entry points like
247 r = gc_1p( r )
248 where r is a pointer. This performs gc, and
249 then returns its argument r to its caller.
250
251 * A series of canned entry points like
252 gcfun_2p( f, x, y )
253 where f is a function closure of arity 2
254 This performs garbage collection, keeping alive the
255 three argument ptrs, and then tail-calls f(x,y)
256
257 These are used in the following circumstances
258
259 * entryHeapCheck: Function entry
260 (a) With a canned GC entry sequence
261 f( f_clo, x:ptr, y:ptr ) {
262 Hp = Hp+8
263 if Hp > HpLim goto L
264 ...
265 L: HpAlloc = 8
266 jump gcfun_2p( f_clo, x, y ) }
267 Note the tail call to the garbage collector;
268 it should do no register shuffling
269
270 (b) No canned sequence
271 f( f_clo, x:ptr, y:ptr, ...etc... ) {
272 T: Hp = Hp+8
273 if Hp > HpLim goto L
274 ...
275 L: HpAlloc = 8
276 call gc() -- Needs an info table
277 goto T }
278
279 * altHeapCheck: Immediately following an eval
280 Started as
281 case f x y of r { (p,q) -> rhs }
282 (a) With a canned sequence for the results of f
283 (which is the very common case since
284 all boxed cases return just one pointer
285 ...
286 r = f( x, y )
287 K: -- K needs an info table
288 Hp = Hp+8
289 if Hp > HpLim goto L
290 ...code for rhs...
291
292 L: r = gc_1p( r )
293 goto K }
294
295 Here, the info table needed by the call
296 to gc_1p should be the *same* as the
297 one for the call to f; the C-- optimiser
298 spots this sharing opportunity)
299
300 (b) No canned sequence for results of f
301 Note second info table
302 ...
303 (r1,r2,r3) = call f( x, y )
304 K:
305 Hp = Hp+8
306 if Hp > HpLim goto L
307 ...code for rhs...
308
309 L: call gc() -- Extra info table here
310 goto K
311
312 * generalHeapCheck: Anywhere else
313 e.g. entry to thunk
314 case branch *not* following eval,
315 or let-no-escape
316 Exactly the same as the previous case:
317
318 K: -- K needs an info table
319 Hp = Hp+8
320 if Hp > HpLim goto L
321 ...
322
323 L: call gc()
324 goto K
325 -}
326
327 --------------------------------------------------------------
328 -- A heap/stack check at a function or thunk entry point.
329
330 entryHeapCheck :: ClosureInfo
331 -> Maybe LocalReg -- Function (closure environment)
332 -> Int -- Arity -- not same as len args b/c of voids
333 -> [LocalReg] -- Non-void args (empty for thunk)
334 -> FCode ()
335 -> FCode ()
336
337 entryHeapCheck cl_info nodeSet arity args code
338 = do dflags <- getDynFlags
339 let is_thunk = arity == 0
340 is_fastf = case closureFunInfo cl_info of
341 Just (_, ArgGen _) -> False
342 _otherwise -> True
343
344 args' = map (CmmReg . CmmLocal) args
345 node = case nodeSet of
346 Just r -> CmmReg (CmmLocal r)
347 Nothing -> CmmLit (CmmLabel $ staticClosureLabel cl_info)
348 stg_gc_fun = CmmReg (CmmGlobal GCFun)
349 stg_gc_enter1 = CmmReg (CmmGlobal GCEnter1)
350
351 {- Thunks: jump stg_gc_enter_1
352
353 Function (fast): call (NativeNode) stg_gc_fun(fun, args)
354
355 Function (slow): R1 = fun
356 call (slow) stg_gc_fun(args)
357 XXX: this is a bit naughty, we should really pass R1 as an
358 argument and use a special calling convention.
359 -}
360 gc_call upd
361 | is_thunk
362 = mkJump dflags stg_gc_enter1 [node] upd
363
364 | is_fastf
365 = mkJump dflags stg_gc_fun (node : args') upd
366
367 | otherwise
368 = mkAssign nodeReg node <*>
369 mkForeignJump dflags Slow stg_gc_fun args' upd
370
371 updfr_sz <- getUpdFrameOff
372
373 loop_id <- newLabelC
374 emitLabel loop_id
375 heapCheck True (gc_call updfr_sz <*> mkBranch loop_id) code
376
377 {-
378 -- This code is slightly outdated now and we could easily keep the above
379 -- GC methods. However, there may be some performance gains to be made by
380 -- using more specialised GC entry points. Since the semi generic GCFun
381 -- entry needs to check the node and figure out what registers to save...
382 -- if we provided and used more specialised GC entry points then these
383 -- runtime decisions could be turned into compile time decisions.
384
385 args' = case fun of Just f -> f : args
386 Nothing -> args
387 arg_exprs = map (CmmReg . CmmLocal) args'
388 gc_call updfr_sz
389 | arity == 0 = mkJumpGC (CmmReg (CmmGlobal GCEnter1)) arg_exprs updfr_sz
390 | otherwise =
391 case gc_lbl args' of
392 Just _lbl -> panic "StgCmmHeap.entryHeapCheck: not finished"
393 -- mkJumpGC (CmmLit (CmmLabel (mkRtsCodeLabel lbl)))
394 -- arg_exprs updfr_sz
395 Nothing -> mkCall generic_gc (GC, GC) [] [] updfr_sz
396
397 gc_lbl :: [LocalReg] -> Maybe FastString
398 gc_lbl [reg]
399 | isGcPtrType ty = Just (sLit "stg_gc_unpt_r1") -- "stg_gc_fun_1p"
400 | isFloatType ty = case width of
401 W32 -> Just (sLit "stg_gc_f1")
402 W64 -> Just (sLit "stg_gc_d1")
403 _other -> Nothing
404 | width == wordWidth = Just (mkGcLabel "stg_gc_unbx_r1")
405 | width == W64 = Just (mkGcLabel "stg_gc_l1")
406 | otherwise = Nothing
407 where
408 ty = localRegType reg
409 width = typeWidth ty
410
411 gc_lbl regs = gc_lbl_ptrs (map (isGcPtrType . localRegType) regs)
412
413 gc_lbl_ptrs :: [Bool] -> Maybe FastString
414 -- JD: TEMPORARY -- UNTIL THESE FUNCTIONS EXIST...
415 --gc_lbl_ptrs [True,True] = Just (sLit "stg_gc_fun_2p")
416 --gc_lbl_ptrs [True,True,True] = Just (sLit "stg_gc_fun_3p")
417 gc_lbl_ptrs _ = Nothing
418 -}
419
420
421 -- ------------------------------------------------------------
422 -- A heap/stack check in a case alternative
423
424
425 -- If there are multiple alts and we need to GC, but don't have a
426 -- continuation already (the scrut was simple), then we should
427 -- pre-generate the continuation. (if there are multiple alts it is
428 -- always a canned GC point).
429
430 -- altHeapCheck:
431 -- If we have a return continuation,
432 -- then if it is a canned GC pattern,
433 -- then we do mkJumpReturnsTo
434 -- else we do a normal call to stg_gc_noregs
435 -- else if it is a canned GC pattern,
436 -- then generate the continuation and do mkCallReturnsTo
437 -- else we do a normal call to stg_gc_noregs
438
439 altHeapCheck :: [LocalReg] -> FCode a -> FCode a
440 altHeapCheck regs code
441 = case cannedGCEntryPoint regs of
442 Nothing -> genericGC code
443 Just gc -> do
444 dflags <- getDynFlags
445 lret <- newLabelC
446 let (off, copyin) = copyInOflow dflags NativeReturn (Young lret) regs
447 lcont <- newLabelC
448 emitOutOfLine lret (copyin <*> mkBranch lcont)
449 emitLabel lcont
450 cannedGCReturnsTo False gc regs lret off code
451
452 altHeapCheckReturnsTo :: [LocalReg] -> Label -> ByteOff -> FCode a -> FCode a
453 altHeapCheckReturnsTo regs lret off code
454 = case cannedGCEntryPoint regs of
455 Nothing -> genericGC code
456 Just gc -> cannedGCReturnsTo True gc regs lret off code
457
458 cannedGCReturnsTo :: Bool -> CmmExpr -> [LocalReg] -> Label -> ByteOff
459 -> FCode a
460 -> FCode a
461 cannedGCReturnsTo cont_on_stack gc regs lret off code
462 = do dflags <- getDynFlags
463 updfr_sz <- getUpdFrameOff
464 heapCheck False (gc_call dflags gc updfr_sz) code
465 where
466 reg_exprs = map (CmmReg . CmmLocal) regs
467 -- Note [stg_gc arguments]
468
469 gc_call dflags label sp
470 | cont_on_stack = mkJumpReturnsTo dflags label GC reg_exprs lret off sp
471 | otherwise = mkCallReturnsTo dflags label GC reg_exprs lret off sp (0,[])
472
473 genericGC :: FCode a -> FCode a
474 genericGC code
475 = do updfr_sz <- getUpdFrameOff
476 lretry <- newLabelC
477 emitLabel lretry
478 call <- mkCall generic_gc (GC, GC) [] [] updfr_sz (0,[])
479 heapCheck False (call <*> mkBranch lretry) code
480
481 cannedGCEntryPoint :: [LocalReg] -> Maybe CmmExpr
482 cannedGCEntryPoint regs
483 = case regs of
484 [] -> Just (mkGcLabel "stg_gc_noregs")
485 [reg]
486 | isGcPtrType ty -> Just (mkGcLabel "stg_gc_unpt_r1")
487 | isFloatType ty -> case width of
488 W32 -> Just (mkGcLabel "stg_gc_f1")
489 W64 -> Just (mkGcLabel "stg_gc_d1")
490 _ -> Nothing
491
492 | width == wordWidth -> Just (mkGcLabel "stg_gc_unbx_r1")
493 | width == W64 -> Just (mkGcLabel "stg_gc_l1")
494 | otherwise -> Nothing
495 where
496 ty = localRegType reg
497 width = typeWidth ty
498 _otherwise -> Nothing
499
500 -- Note [stg_gc arguments]
501 -- It might seem that we could avoid passing the arguments to the
502 -- stg_gc function, because they are already in the right registers.
503 -- While this is usually the case, it isn't always. Sometimes the
504 -- code generator has cleverly avoided the eval in a case, e.g. in
505 -- ffi/should_run/4221.hs we found
506 --
507 -- case a_r1mb of z
508 -- FunPtr x y -> ...
509 --
510 -- where a_r1mb is bound a top-level constructor, and is known to be
511 -- evaluated. The codegen just assigns x, y and z, and continues;
512 -- R1 is never assigned.
513 --
514 -- So we'll have to rely on optimisations to eliminatethese
515 -- assignments where possible.
516
517
518 -- | The generic GC procedure; no params, no results
519 generic_gc :: CmmExpr
520 generic_gc = mkGcLabel "stg_gc_noregs"
521
522 -- | Create a CLabel for calling a garbage collector entry point
523 mkGcLabel :: String -> CmmExpr
524 mkGcLabel s = CmmLit (CmmLabel (mkCmmCodeLabel rtsPackageId (fsLit s)))
525
526 -------------------------------
527 heapCheck :: Bool -> CmmAGraph -> FCode a -> FCode a
528 heapCheck checkStack do_gc code
529 = getHeapUsage $ \ hpHw ->
530 -- Emit heap checks, but be sure to do it lazily so
531 -- that the conditionals on hpHw don't cause a black hole
532 do { codeOnly $ do_checks checkStack hpHw do_gc
533 ; tickyAllocHeap hpHw
534 ; doGranAllocate hpHw
535 ; setRealHp hpHw
536 ; code }
537
538 do_checks :: Bool -- Should we check the stack?
539 -> WordOff -- Heap headroom
540 -> CmmAGraph -- What to do on failure
541 -> FCode ()
542 do_checks checkStack alloc do_gc = do
543 gc_id <- newLabelC
544
545 when checkStack $
546 emit =<< mkCmmIfGoto sp_oflo gc_id
547
548 when (alloc /= 0) $ do
549 emitAssign hpReg bump_hp
550 emit =<< mkCmmIfThen hp_oflo (alloc_n <*> mkBranch gc_id)
551
552 emitOutOfLine gc_id $
553 do_gc -- this is expected to jump back somewhere
554
555 -- Test for stack pointer exhaustion, then
556 -- bump heap pointer, and test for heap exhaustion
557 -- Note that we don't move the heap pointer unless the
558 -- stack check succeeds. Otherwise we might end up
559 -- with slop at the end of the current block, which can
560 -- confuse the LDV profiler.
561 where
562 alloc_lit = CmmLit (mkIntCLit (alloc*wORD_SIZE)) -- Bytes
563 bump_hp = cmmOffsetExprB (CmmReg hpReg) alloc_lit
564
565 -- Sp overflow if (Sp - CmmHighStack < SpLim)
566 sp_oflo = CmmMachOp mo_wordULt
567 [CmmMachOp (MO_Sub (typeWidth (cmmRegType spReg)))
568 [CmmReg spReg, CmmLit CmmHighStackMark],
569 CmmReg spLimReg]
570
571 -- Hp overflow if (Hp > HpLim)
572 -- (Hp has been incremented by now)
573 -- HpLim points to the LAST WORD of valid allocation space.
574 hp_oflo = CmmMachOp mo_wordUGt
575 [CmmReg hpReg, CmmReg (CmmGlobal HpLim)]
576
577 alloc_n = mkAssign (CmmGlobal HpAlloc) alloc_lit
578
579 {-
580
581 {- Unboxed tuple alternatives and let-no-escapes (the two most annoying
582 constructs to generate code for!) For unboxed tuple returns, there
583 are an arbitrary number of possibly unboxed return values, some of
584 which will be in registers, and the others will be on the stack. We
585 always organise the stack-resident fields into pointers &
586 non-pointers, and pass the number of each to the heap check code. -}
587
588 unbxTupleHeapCheck
589 :: [(Id, GlobalReg)] -- Live registers
590 -> WordOff -- no. of stack slots containing ptrs
591 -> WordOff -- no. of stack slots containing nonptrs
592 -> CmmAGraph -- code to insert in the failure path
593 -> FCode ()
594 -> FCode ()
595
596 unbxTupleHeapCheck regs ptrs nptrs fail_code code
597 -- We can't manage more than 255 pointers/non-pointers
598 -- in a generic heap check.
599 | ptrs > 255 || nptrs > 255 = panic "altHeapCheck"
600 | otherwise
601 = initHeapUsage $ \ hpHw -> do
602 { codeOnly $ do { do_checks 0 {- no stack check -} hpHw
603 full_fail_code rts_label
604 ; tickyAllocHeap hpHw }
605 ; setRealHp hpHw
606 ; code }
607 where
608 full_fail_code = fail_code `plusStmts` oneStmt assign_liveness
609 assign_liveness = CmmAssign (CmmGlobal (VanillaReg 9)) -- Ho ho ho!
610 (CmmLit (mkWordCLit liveness))
611 liveness = mkRegLiveness regs ptrs nptrs
612 rts_label = CmmLit (CmmLabel (mkRtsCodeLabel (sLit "stg_gc_ut")))
613
614
615 {- Old Gransim com -- I have no idea whether it still makes sense (SLPJ Sep07)
616 For GrAnSim the code for doing a heap check and doing a context switch
617 has been separated. Especially, the HEAP_CHK macro only performs a
618 heap check. THREAD_CONTEXT_SWITCH should be used for doing a context
619 switch. GRAN_FETCH_AND_RESCHEDULE must be put at the beginning of
620 every slow entry code in order to simulate the fetching of
621 closures. If fetching is necessary (i.e. current closure is not local)
622 then an automatic context switch is done. -}
623
624
625 When failing a check, we save a return address on the stack and
626 jump to a pre-compiled code fragment that saves the live registers
627 and returns to the scheduler.
628
629 The return address in most cases will be the beginning of the basic
630 block in which the check resides, since we need to perform the check
631 again on re-entry because someone else might have stolen the resource
632 in the meantime.
633
634 %************************************************************************
635 %* *
636 Generic Heap/Stack Checks - used in the RTS
637 %* *
638 %************************************************************************
639
640 \begin{code}
641 hpChkGen :: CmmExpr -> CmmExpr -> CmmExpr -> FCode ()
642 hpChkGen bytes liveness reentry
643 = do_checks' bytes True assigns stg_gc_gen
644 where
645 assigns = mkStmts [
646 CmmAssign (CmmGlobal (VanillaReg 9)) liveness,
647 CmmAssign (CmmGlobal (VanillaReg 10)) reentry
648 ]
649
650 -- a heap check where R1 points to the closure to enter on return, and
651 -- we want to assign to Sp[0] on failure (used in AutoApply.cmm:BUILD_PAP).
652 hpChkNodePointsAssignSp0 :: CmmExpr -> CmmExpr -> FCode ()
653 hpChkNodePointsAssignSp0 bytes sp0
654 = do_checks' bytes True assign stg_gc_enter1
655 where assign = oneStmt (CmmStore (CmmReg spReg) sp0)
656
657 stg_gc_gen = CmmLit (CmmLabel (mkRtsCodeLabel (sLit "stg_gc_gen")))
658 \end{code}
659
660 -}