Make Applicative a superclass of Monad
[ghc.git] / compiler / codeGen / StgCmmHeap.hs
1 {-# LANGUAGE CPP #-}
2
3 -----------------------------------------------------------------------------
4 --
5 -- Stg to C--: heap management functions
6 --
7 -- (c) The University of Glasgow 2004-2006
8 --
9 -----------------------------------------------------------------------------
10
11 module StgCmmHeap (
12 getVirtHp, setVirtHp, setRealHp,
13 getHpRelOffset,
14
15 entryHeapCheck, altHeapCheck, noEscapeHeapCheck, altHeapCheckReturnsTo,
16 heapStackCheckGen,
17 entryHeapCheck',
18
19 mkStaticClosureFields, mkStaticClosure,
20
21 allocDynClosure, allocDynClosureCmm, allocHeapClosure,
22 emitSetDynHdr
23 ) where
24
25 #include "HsVersions.h"
26
27 import StgSyn
28 import CLabel
29 import StgCmmLayout
30 import StgCmmUtils
31 import StgCmmMonad
32 import StgCmmProf (profDynAlloc, dynProfHdr, staticProfHdr)
33 import StgCmmTicky
34 import StgCmmClosure
35 import StgCmmEnv
36
37 import MkGraph
38
39 import Hoopl
40 import SMRep
41 import Cmm
42 import CmmUtils
43 import CostCentre
44 import IdInfo( CafInfo(..), mayHaveCafRefs )
45 import Id ( Id )
46 import Module
47 import DynFlags
48 import FastString( mkFastString, fsLit )
49
50 #if __GLASGOW_HASKELL__ >= 709
51 import Prelude hiding ((<*>))
52 #endif
53
54 import Control.Monad (when)
55 import Data.Maybe (isJust)
56
57 -----------------------------------------------------------
58 -- Initialise dynamic heap objects
59 -----------------------------------------------------------
60
61 allocDynClosure
62 :: Maybe Id
63 -> CmmInfoTable
64 -> LambdaFormInfo
65 -> CmmExpr -- Cost Centre to stick in the object
66 -> CmmExpr -- Cost Centre to blame for this alloc
67 -- (usually the same; sometimes "OVERHEAD")
68
69 -> [(NonVoid StgArg, VirtualHpOffset)] -- Offsets from start of object
70 -- ie Info ptr has offset zero.
71 -- No void args in here
72 -> FCode CmmExpr -- returns Hp+n
73
74 allocDynClosureCmm
75 :: Maybe Id -> CmmInfoTable -> LambdaFormInfo -> CmmExpr -> CmmExpr
76 -> [(CmmExpr, ByteOff)]
77 -> FCode CmmExpr -- returns Hp+n
78
79 -- allocDynClosure allocates the thing in the heap,
80 -- and modifies the virtual Hp to account for this.
81 -- The second return value is the graph that sets the value of the
82 -- returned LocalReg, which should point to the closure after executing
83 -- the graph.
84
85 -- allocDynClosure returns an (Hp+8) CmmExpr, and hence the result is
86 -- only valid until Hp is changed. The caller should assign the
87 -- result to a LocalReg if it is required to remain live.
88 --
89 -- The reason we don't assign it to a LocalReg here is that the caller
90 -- is often about to call regIdInfo, which immediately assigns the
91 -- result of allocDynClosure to a new temp in order to add the tag.
92 -- So by not generating a LocalReg here we avoid a common source of
93 -- new temporaries and save some compile time. This can be quite
94 -- significant - see test T4801.
95
96
97 allocDynClosure mb_id info_tbl lf_info use_cc _blame_cc args_w_offsets = do
98 let (args, offsets) = unzip args_w_offsets
99 cmm_args <- mapM getArgAmode args -- No void args
100 allocDynClosureCmm mb_id info_tbl lf_info
101 use_cc _blame_cc (zip cmm_args offsets)
102
103
104 allocDynClosureCmm mb_id info_tbl lf_info use_cc _blame_cc amodes_w_offsets = do
105 -- SAY WHAT WE ARE ABOUT TO DO
106 let rep = cit_rep info_tbl
107 tickyDynAlloc mb_id rep lf_info
108 let info_ptr = CmmLit (CmmLabel (cit_lbl info_tbl))
109 allocHeapClosure rep info_ptr use_cc amodes_w_offsets
110
111
112 -- | Low-level heap object allocation.
113 allocHeapClosure
114 :: SMRep -- ^ representation of the object
115 -> CmmExpr -- ^ info pointer
116 -> CmmExpr -- ^ cost centre
117 -> [(CmmExpr,ByteOff)] -- ^ payload
118 -> FCode CmmExpr -- ^ returns the address of the object
119 allocHeapClosure rep info_ptr use_cc payload = do
120 profDynAlloc rep use_cc
121
122 virt_hp <- getVirtHp
123
124 -- Find the offset of the info-ptr word
125 let info_offset = virt_hp + 1
126 -- info_offset is the VirtualHpOffset of the first
127 -- word of the new object
128 -- Remember, virtHp points to last allocated word,
129 -- ie 1 *before* the info-ptr word of new object.
130
131 base <- getHpRelOffset info_offset
132 emitComment $ mkFastString "allocHeapClosure"
133 emitSetDynHdr base info_ptr use_cc
134
135 -- Fill in the fields
136 hpStore base payload
137
138 -- Bump the virtual heap pointer
139 dflags <- getDynFlags
140 setVirtHp (virt_hp + heapClosureSizeW dflags rep)
141
142 return base
143
144
145 emitSetDynHdr :: CmmExpr -> CmmExpr -> CmmExpr -> FCode ()
146 emitSetDynHdr base info_ptr ccs
147 = do dflags <- getDynFlags
148 hpStore base (zip (header dflags) [0, wORD_SIZE dflags ..])
149 where
150 header :: DynFlags -> [CmmExpr]
151 header dflags = [info_ptr] ++ dynProfHdr dflags ccs
152 -- ToDof: Parallel stuff
153 -- No ticky header
154
155 -- Store the item (expr,off) in base[off]
156 hpStore :: CmmExpr -> [(CmmExpr, ByteOff)] -> FCode ()
157 hpStore base vals = do
158 dflags <- getDynFlags
159 sequence_ $
160 [ emitStore (cmmOffsetB dflags base off) val | (val,off) <- vals ]
161
162 -----------------------------------------------------------
163 -- Layout of static closures
164 -----------------------------------------------------------
165
166 -- Make a static closure, adding on any extra padding needed for CAFs,
167 -- and adding a static link field if necessary.
168
169 mkStaticClosureFields
170 :: DynFlags
171 -> CmmInfoTable
172 -> CostCentreStack
173 -> CafInfo
174 -> [CmmLit] -- Payload
175 -> [CmmLit] -- The full closure
176 mkStaticClosureFields dflags info_tbl ccs caf_refs payload
177 = mkStaticClosure dflags info_lbl ccs payload padding
178 static_link_field saved_info_field
179 where
180 info_lbl = cit_lbl info_tbl
181
182 -- CAFs must have consistent layout, regardless of whether they
183 -- are actually updatable or not. The layout of a CAF is:
184 --
185 -- 3 saved_info
186 -- 2 static_link
187 -- 1 indirectee
188 -- 0 info ptr
189 --
190 -- the static_link and saved_info fields must always be in the
191 -- same place. So we use isThunkRep rather than closureUpdReqd
192 -- here:
193
194 is_caf = isThunkRep (cit_rep info_tbl)
195
196 padding
197 | is_caf && null payload = [mkIntCLit dflags 0]
198 | otherwise = []
199
200 static_link_field
201 | is_caf || staticClosureNeedsLink (mayHaveCafRefs caf_refs) info_tbl
202 = [static_link_value]
203 | otherwise
204 = []
205
206 saved_info_field
207 | is_caf = [mkIntCLit dflags 0]
208 | otherwise = []
209
210 -- For a static constructor which has NoCafRefs, we set the
211 -- static link field to a non-zero value so the garbage
212 -- collector will ignore it.
213 static_link_value
214 | mayHaveCafRefs caf_refs = mkIntCLit dflags 0
215 | otherwise = mkIntCLit dflags 1 -- No CAF refs
216
217
218 mkStaticClosure :: DynFlags -> CLabel -> CostCentreStack -> [CmmLit]
219 -> [CmmLit] -> [CmmLit] -> [CmmLit] -> [CmmLit]
220 mkStaticClosure dflags info_lbl ccs payload padding static_link_field saved_info_field
221 = [CmmLabel info_lbl]
222 ++ staticProfHdr dflags ccs
223 ++ concatMap (padLitToWord dflags) payload
224 ++ padding
225 ++ static_link_field
226 ++ saved_info_field
227
228 -- JD: Simon had ellided this padding, but without it the C back end asserts
229 -- failure. Maybe it's a bad assertion, and this padding is indeed unnecessary?
230 padLitToWord :: DynFlags -> CmmLit -> [CmmLit]
231 padLitToWord dflags lit = lit : padding pad_length
232 where width = typeWidth (cmmLitType dflags lit)
233 pad_length = wORD_SIZE dflags - widthInBytes width :: Int
234
235 padding n | n <= 0 = []
236 | n `rem` 2 /= 0 = CmmInt 0 W8 : padding (n-1)
237 | n `rem` 4 /= 0 = CmmInt 0 W16 : padding (n-2)
238 | n `rem` 8 /= 0 = CmmInt 0 W32 : padding (n-4)
239 | otherwise = CmmInt 0 W64 : padding (n-8)
240
241 -----------------------------------------------------------
242 -- Heap overflow checking
243 -----------------------------------------------------------
244
245 {- Note [Heap checks]
246 ~~~~~~~~~~~~~~~~~~
247 Heap checks come in various forms. We provide the following entry
248 points to the runtime system, all of which use the native C-- entry
249 convention.
250
251 * gc() performs garbage collection and returns
252 nothing to its caller
253
254 * A series of canned entry points like
255 r = gc_1p( r )
256 where r is a pointer. This performs gc, and
257 then returns its argument r to its caller.
258
259 * A series of canned entry points like
260 gcfun_2p( f, x, y )
261 where f is a function closure of arity 2
262 This performs garbage collection, keeping alive the
263 three argument ptrs, and then tail-calls f(x,y)
264
265 These are used in the following circumstances
266
267 * entryHeapCheck: Function entry
268 (a) With a canned GC entry sequence
269 f( f_clo, x:ptr, y:ptr ) {
270 Hp = Hp+8
271 if Hp > HpLim goto L
272 ...
273 L: HpAlloc = 8
274 jump gcfun_2p( f_clo, x, y ) }
275 Note the tail call to the garbage collector;
276 it should do no register shuffling
277
278 (b) No canned sequence
279 f( f_clo, x:ptr, y:ptr, ...etc... ) {
280 T: Hp = Hp+8
281 if Hp > HpLim goto L
282 ...
283 L: HpAlloc = 8
284 call gc() -- Needs an info table
285 goto T }
286
287 * altHeapCheck: Immediately following an eval
288 Started as
289 case f x y of r { (p,q) -> rhs }
290 (a) With a canned sequence for the results of f
291 (which is the very common case since
292 all boxed cases return just one pointer
293 ...
294 r = f( x, y )
295 K: -- K needs an info table
296 Hp = Hp+8
297 if Hp > HpLim goto L
298 ...code for rhs...
299
300 L: r = gc_1p( r )
301 goto K }
302
303 Here, the info table needed by the call
304 to gc_1p should be the *same* as the
305 one for the call to f; the C-- optimiser
306 spots this sharing opportunity)
307
308 (b) No canned sequence for results of f
309 Note second info table
310 ...
311 (r1,r2,r3) = call f( x, y )
312 K:
313 Hp = Hp+8
314 if Hp > HpLim goto L
315 ...code for rhs...
316
317 L: call gc() -- Extra info table here
318 goto K
319
320 * generalHeapCheck: Anywhere else
321 e.g. entry to thunk
322 case branch *not* following eval,
323 or let-no-escape
324 Exactly the same as the previous case:
325
326 K: -- K needs an info table
327 Hp = Hp+8
328 if Hp > HpLim goto L
329 ...
330
331 L: call gc()
332 goto K
333 -}
334
335 --------------------------------------------------------------
336 -- A heap/stack check at a function or thunk entry point.
337
338 entryHeapCheck :: ClosureInfo
339 -> Maybe LocalReg -- Function (closure environment)
340 -> Int -- Arity -- not same as len args b/c of voids
341 -> [LocalReg] -- Non-void args (empty for thunk)
342 -> FCode ()
343 -> FCode ()
344
345 entryHeapCheck cl_info nodeSet arity args code
346 = entryHeapCheck' is_fastf node arity args code
347 where
348 node = case nodeSet of
349 Just r -> CmmReg (CmmLocal r)
350 Nothing -> CmmLit (CmmLabel $ staticClosureLabel cl_info)
351
352 is_fastf = case closureFunInfo cl_info of
353 Just (_, ArgGen _) -> False
354 _otherwise -> True
355
356 -- | lower-level version for CmmParse
357 entryHeapCheck' :: Bool -- is a known function pattern
358 -> CmmExpr -- expression for the closure pointer
359 -> Int -- Arity -- not same as len args b/c of voids
360 -> [LocalReg] -- Non-void args (empty for thunk)
361 -> FCode ()
362 -> FCode ()
363 entryHeapCheck' is_fastf node arity args code
364 = do dflags <- getDynFlags
365 let is_thunk = arity == 0
366
367 args' = map (CmmReg . CmmLocal) args
368 stg_gc_fun = CmmReg (CmmGlobal GCFun)
369 stg_gc_enter1 = CmmReg (CmmGlobal GCEnter1)
370
371 {- Thunks: jump stg_gc_enter_1
372
373 Function (fast): call (NativeNode) stg_gc_fun(fun, args)
374
375 Function (slow): call (slow) stg_gc_fun(fun, args)
376 -}
377 gc_call upd
378 | is_thunk
379 = mkJump dflags NativeNodeCall stg_gc_enter1 [node] upd
380
381 | is_fastf
382 = mkJump dflags NativeNodeCall stg_gc_fun (node : args') upd
383
384 | otherwise
385 = mkJump dflags Slow stg_gc_fun (node : args') upd
386
387 updfr_sz <- getUpdFrameOff
388
389 loop_id <- newLabelC
390 emitLabel loop_id
391 heapCheck True True (gc_call updfr_sz <*> mkBranch loop_id) code
392
393 -- ------------------------------------------------------------
394 -- A heap/stack check in a case alternative
395
396
397 -- If there are multiple alts and we need to GC, but don't have a
398 -- continuation already (the scrut was simple), then we should
399 -- pre-generate the continuation. (if there are multiple alts it is
400 -- always a canned GC point).
401
402 -- altHeapCheck:
403 -- If we have a return continuation,
404 -- then if it is a canned GC pattern,
405 -- then we do mkJumpReturnsTo
406 -- else we do a normal call to stg_gc_noregs
407 -- else if it is a canned GC pattern,
408 -- then generate the continuation and do mkCallReturnsTo
409 -- else we do a normal call to stg_gc_noregs
410
411 altHeapCheck :: [LocalReg] -> FCode a -> FCode a
412 altHeapCheck regs code = altOrNoEscapeHeapCheck False regs code
413
414 altOrNoEscapeHeapCheck :: Bool -> [LocalReg] -> FCode a -> FCode a
415 altOrNoEscapeHeapCheck checkYield regs code = do
416 dflags <- getDynFlags
417 case cannedGCEntryPoint dflags regs of
418 Nothing -> genericGC checkYield code
419 Just gc -> do
420 lret <- newLabelC
421 let (off, _, copyin) = copyInOflow dflags NativeReturn (Young lret) regs []
422 lcont <- newLabelC
423 emitOutOfLine lret (copyin <*> mkBranch lcont)
424 emitLabel lcont
425 cannedGCReturnsTo checkYield False gc regs lret off code
426
427 altHeapCheckReturnsTo :: [LocalReg] -> Label -> ByteOff -> FCode a -> FCode a
428 altHeapCheckReturnsTo regs lret off code
429 = do dflags <- getDynFlags
430 case cannedGCEntryPoint dflags regs of
431 Nothing -> genericGC False code
432 Just gc -> cannedGCReturnsTo False True gc regs lret off code
433
434 -- noEscapeHeapCheck is implemented identically to altHeapCheck (which
435 -- is more efficient), but cannot be optimized away in the non-allocating
436 -- case because it may occur in a loop
437 noEscapeHeapCheck :: [LocalReg] -> FCode a -> FCode a
438 noEscapeHeapCheck regs code = altOrNoEscapeHeapCheck True regs code
439
440 cannedGCReturnsTo :: Bool -> Bool -> CmmExpr -> [LocalReg] -> Label -> ByteOff
441 -> FCode a
442 -> FCode a
443 cannedGCReturnsTo checkYield cont_on_stack gc regs lret off code
444 = do dflags <- getDynFlags
445 updfr_sz <- getUpdFrameOff
446 heapCheck False checkYield (gc_call dflags gc updfr_sz) code
447 where
448 reg_exprs = map (CmmReg . CmmLocal) regs
449 -- Note [stg_gc arguments]
450
451 -- NB. we use the NativeReturn convention for passing arguments
452 -- to the canned heap-check routines, because we are in a case
453 -- alternative and hence the [LocalReg] was passed to us in the
454 -- NativeReturn convention.
455 gc_call dflags label sp
456 | cont_on_stack
457 = mkJumpReturnsTo dflags label NativeReturn reg_exprs lret off sp
458 | otherwise
459 = mkCallReturnsTo dflags label NativeReturn reg_exprs lret off sp []
460
461 genericGC :: Bool -> FCode a -> FCode a
462 genericGC checkYield code
463 = do updfr_sz <- getUpdFrameOff
464 lretry <- newLabelC
465 emitLabel lretry
466 call <- mkCall generic_gc (GC, GC) [] [] updfr_sz []
467 heapCheck False checkYield (call <*> mkBranch lretry) code
468
469 cannedGCEntryPoint :: DynFlags -> [LocalReg] -> Maybe CmmExpr
470 cannedGCEntryPoint dflags regs
471 = case map localRegType regs of
472 [] -> Just (mkGcLabel "stg_gc_noregs")
473 [ty]
474 | isGcPtrType ty -> Just (mkGcLabel "stg_gc_unpt_r1")
475 | isFloatType ty -> case width of
476 W32 -> Just (mkGcLabel "stg_gc_f1")
477 W64 -> Just (mkGcLabel "stg_gc_d1")
478 _ -> Nothing
479
480 | width == wordWidth dflags -> Just (mkGcLabel "stg_gc_unbx_r1")
481 | width == W64 -> Just (mkGcLabel "stg_gc_l1")
482 | otherwise -> Nothing
483 where
484 width = typeWidth ty
485 [ty1,ty2]
486 | isGcPtrType ty1
487 && isGcPtrType ty2 -> Just (mkGcLabel "stg_gc_pp")
488 [ty1,ty2,ty3]
489 | isGcPtrType ty1
490 && isGcPtrType ty2
491 && isGcPtrType ty3 -> Just (mkGcLabel "stg_gc_ppp")
492 [ty1,ty2,ty3,ty4]
493 | isGcPtrType ty1
494 && isGcPtrType ty2
495 && isGcPtrType ty3
496 && isGcPtrType ty4 -> Just (mkGcLabel "stg_gc_pppp")
497 _otherwise -> Nothing
498
499 -- Note [stg_gc arguments]
500 -- It might seem that we could avoid passing the arguments to the
501 -- stg_gc function, because they are already in the right registers.
502 -- While this is usually the case, it isn't always. Sometimes the
503 -- code generator has cleverly avoided the eval in a case, e.g. in
504 -- ffi/should_run/4221.hs we found
505 --
506 -- case a_r1mb of z
507 -- FunPtr x y -> ...
508 --
509 -- where a_r1mb is bound a top-level constructor, and is known to be
510 -- evaluated. The codegen just assigns x, y and z, and continues;
511 -- R1 is never assigned.
512 --
513 -- So we'll have to rely on optimisations to eliminatethese
514 -- assignments where possible.
515
516
517 -- | The generic GC procedure; no params, no results
518 generic_gc :: CmmExpr
519 generic_gc = mkGcLabel "stg_gc_noregs"
520
521 -- | Create a CLabel for calling a garbage collector entry point
522 mkGcLabel :: String -> CmmExpr
523 mkGcLabel s = CmmLit (CmmLabel (mkCmmCodeLabel rtsPackageKey (fsLit s)))
524
525 -------------------------------
526 heapCheck :: Bool -> Bool -> CmmAGraph -> FCode a -> FCode a
527 heapCheck checkStack checkYield do_gc code
528 = getHeapUsage $ \ hpHw ->
529 -- Emit heap checks, but be sure to do it lazily so
530 -- that the conditionals on hpHw don't cause a black hole
531 do { dflags <- getDynFlags
532 ; let mb_alloc_bytes
533 | hpHw > 0 = Just (mkIntExpr dflags (hpHw * (wORD_SIZE dflags)))
534 | otherwise = Nothing
535 stk_hwm | checkStack = Just (CmmLit CmmHighStackMark)
536 | otherwise = Nothing
537 ; codeOnly $ do_checks stk_hwm checkYield mb_alloc_bytes do_gc
538 ; tickyAllocHeap True hpHw
539 ; setRealHp hpHw
540 ; code }
541
542 heapStackCheckGen :: Maybe CmmExpr -> Maybe CmmExpr -> FCode ()
543 heapStackCheckGen stk_hwm mb_bytes
544 = do updfr_sz <- getUpdFrameOff
545 lretry <- newLabelC
546 emitLabel lretry
547 call <- mkCall generic_gc (GC, GC) [] [] updfr_sz []
548 do_checks stk_hwm False mb_bytes (call <*> mkBranch lretry)
549
550 -- Note [Single stack check]
551 -- ~~~~~~~~~~~~~~~~~~~~~~~~~
552 -- When compiling a function we can determine how much stack space it
553 -- will use. We therefore need to perform only a single stack check at
554 -- the beginning of a function to see if we have enough stack space.
555 --
556 -- The check boils down to comparing Sp-N with SpLim, where N is the
557 -- amount of stack space needed (see Note [Stack usage] below). *BUT*
558 -- at this stage of the pipeline we are not supposed to refer to Sp
559 -- itself, because the stack is not yet manifest, so we don't quite
560 -- know where Sp pointing.
561
562 -- So instead of referring directly to Sp - as we used to do in the
563 -- past - the code generator uses (old + 0) in the stack check. That
564 -- is the address of the first word of the old area, so if we add N
565 -- we'll get the address of highest used word.
566 --
567 -- This makes the check robust. For example, while we need to perform
568 -- only one stack check for each function, we could in theory place
569 -- more stack checks later in the function. They would be redundant,
570 -- but not incorrect (in a sense that they should not change program
571 -- behaviour). We need to make sure however that a stack check
572 -- inserted after incrementing the stack pointer checks for a
573 -- respectively smaller stack space. This would not be the case if the
574 -- code generator produced direct references to Sp. By referencing
575 -- (old + 0) we make sure that we always check for a correct amount of
576 -- stack: when converting (old + 0) to Sp the stack layout phase takes
577 -- into account changes already made to stack pointer. The idea for
578 -- this change came from observations made while debugging #8275.
579
580 -- Note [Stack usage]
581 -- ~~~~~~~~~~~~~~~~~~
582 -- At the moment we convert from STG to Cmm we don't know N, the
583 -- number of bytes of stack that the function will use, so we use a
584 -- special late-bound CmmLit, namely
585 -- CmmHighStackMark
586 -- to stand for the number of bytes needed. When the stack is made
587 -- manifest, the number of bytes needed is calculated, and used to
588 -- replace occurrences of CmmHighStackMark
589 --
590 -- The (Maybe CmmExpr) passed to do_checks is usually
591 -- Just (CmmLit CmmHighStackMark)
592 -- but can also (in certain hand-written RTS functions)
593 -- Just (CmmLit 8) or some other fixed valuet
594 -- If it is Nothing, we don't generate a stack check at all.
595
596 do_checks :: Maybe CmmExpr -- Should we check the stack?
597 -- See Note [Stack usage]
598 -> Bool -- Should we check for preemption?
599 -> Maybe CmmExpr -- Heap headroom (bytes)
600 -> CmmAGraph -- What to do on failure
601 -> FCode ()
602 do_checks mb_stk_hwm checkYield mb_alloc_lit do_gc = do
603 dflags <- getDynFlags
604 gc_id <- newLabelC
605
606 let
607 Just alloc_lit = mb_alloc_lit
608
609 bump_hp = cmmOffsetExprB dflags (CmmReg hpReg) alloc_lit
610
611 -- Sp overflow if ((old + 0) - CmmHighStack < SpLim)
612 -- At the beginning of a function old + 0 = Sp
613 -- See Note [Single stack check]
614 sp_oflo sp_hwm =
615 CmmMachOp (mo_wordULt dflags)
616 [CmmMachOp (MO_Sub (typeWidth (cmmRegType dflags spReg)))
617 [CmmStackSlot Old 0, sp_hwm],
618 CmmReg spLimReg]
619
620 -- Hp overflow if (Hp > HpLim)
621 -- (Hp has been incremented by now)
622 -- HpLim points to the LAST WORD of valid allocation space.
623 hp_oflo = CmmMachOp (mo_wordUGt dflags)
624 [CmmReg hpReg, CmmReg (CmmGlobal HpLim)]
625
626 alloc_n = mkAssign (CmmGlobal HpAlloc) alloc_lit
627
628 case mb_stk_hwm of
629 Nothing -> return ()
630 Just stk_hwm -> tickyStackCheck >> (emit =<< mkCmmIfGoto (sp_oflo stk_hwm) gc_id)
631
632 -- Emit new label that might potentially be a header
633 -- of a self-recursive tail call.
634 -- See Note [Self-recursive loop header].
635 self_loop_info <- getSelfLoop
636 case self_loop_info of
637 Just (_, loop_header_id, _)
638 | checkYield && isJust mb_stk_hwm -> emitLabel loop_header_id
639 _otherwise -> return ()
640
641 if (isJust mb_alloc_lit)
642 then do
643 tickyHeapCheck
644 emitAssign hpReg bump_hp
645 emit =<< mkCmmIfThen hp_oflo (alloc_n <*> mkBranch gc_id)
646 else do
647 when (checkYield && not (gopt Opt_OmitYields dflags)) $ do
648 -- Yielding if HpLim == 0
649 let yielding = CmmMachOp (mo_wordEq dflags)
650 [CmmReg (CmmGlobal HpLim),
651 CmmLit (zeroCLit dflags)]
652 emit =<< mkCmmIfGoto yielding gc_id
653
654 emitOutOfLine gc_id $
655 do_gc -- this is expected to jump back somewhere
656
657 -- Test for stack pointer exhaustion, then
658 -- bump heap pointer, and test for heap exhaustion
659 -- Note that we don't move the heap pointer unless the
660 -- stack check succeeds. Otherwise we might end up
661 -- with slop at the end of the current block, which can
662 -- confuse the LDV profiler.
663
664 -- Note [Self-recursive loop header]
665 -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
666 --
667 -- Self-recursive loop header is required by loopification optimization (See
668 -- Note [Self-recursive tail calls] in StgCmmExpr). We emit it if:
669 --
670 -- 1. There is information about self-loop in the FCode environment. We don't
671 -- check the binder (first component of the self_loop_info) because we are
672 -- certain that if the self-loop info is present then we are compiling the
673 -- binder body. Reason: the only possible way to get here with the
674 -- self_loop_info present is from closureCodeBody.
675 --
676 -- 2. checkYield && isJust mb_stk_hwm. checkYield tells us that it is possible
677 -- to preempt the heap check (see #367 for motivation behind this check). It
678 -- is True for heap checks placed at the entry to a function and
679 -- let-no-escape heap checks but false for other heap checks (eg. in case
680 -- alternatives or created from hand-written high-level Cmm). The second
681 -- check (isJust mb_stk_hwm) is true for heap checks at the entry to a
682 -- function and some heap checks created in hand-written Cmm. Otherwise it
683 -- is Nothing. In other words the only situation when both conditions are
684 -- true is when compiling stack and heap checks at the entry to a
685 -- function. This is the only situation when we want to emit a self-loop
686 -- label.