New stack layout algorithm
[ghc.git] / compiler / codeGen / StgCmmLayout.hs
1 -----------------------------------------------------------------------------
2 --
3 -- Building info tables.
4 --
5 -- (c) The University of Glasgow 2004-2006
6 --
7 -----------------------------------------------------------------------------
8
9 {-# OPTIONS -fno-warn-tabs #-}
10 -- The above warning supression flag is a temporary kludge.
11 -- While working on this module you are encouraged to remove it and
12 -- detab the module (please do the detabbing in a separate patch). See
13 -- http://hackage.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#TabsvsSpaces
14 -- for details
15
16 module StgCmmLayout (
17 mkArgDescr,
18 emitCall, emitReturn, adjustHpBackwards,
19
20 emitClosureProcAndInfoTable,
21 emitClosureAndInfoTable,
22
23 slowCall, directCall,
24
25 mkVirtHeapOffsets, mkVirtConstrOffsets, getHpRelOffset, hpRel,
26
27 stdInfoTableSizeB,
28 entryCode, closureInfoPtr,
29 getConstrTag,
30 cmmGetClosureType,
31 infoTable, infoTableClosureType,
32 infoTablePtrs, infoTableNonPtrs,
33 funInfoTable
34 ) where
35
36
37 #include "HsVersions.h"
38
39 import StgCmmClosure
40 import StgCmmEnv
41 import StgCmmTicky
42 import StgCmmMonad
43 import StgCmmUtils
44 import StgCmmProf
45
46 import MkGraph
47 import SMRep
48 import Cmm
49 import CmmUtils
50 import CLabel
51 import StgSyn
52 import Id
53 import Name
54 import TyCon ( PrimRep(..) )
55 import BasicTypes ( Arity )
56 import DynFlags
57 import StaticFlags
58 import Module
59
60 import Constants
61 import Util
62 import Data.List
63 import Outputable
64 import FastString ( mkFastString, FastString, fsLit )
65
66 ------------------------------------------------------------------------
67 -- Call and return sequences
68 ------------------------------------------------------------------------
69
70 emitReturn :: [CmmExpr] -> FCode ()
71 -- Return multiple values to the sequel
72 --
73 -- If the sequel is Return
74 -- return (x,y)
75 -- If the sequel is AssignTo [p,q]
76 -- p=x; q=y;
77 emitReturn results
78 = do { sequel <- getSequel;
79 ; updfr_off <- getUpdFrameOff
80 ; emitComment $ mkFastString ("emitReturn: " ++ show sequel)
81 ; case sequel of
82 Return _ ->
83 do { adjustHpBackwards
84 ; emit (mkReturnSimple results updfr_off) }
85 AssignTo regs adjust ->
86 do { if adjust then adjustHpBackwards else return ()
87 ; emitMultiAssign regs results }
88 }
89
90 emitCall :: (Convention, Convention) -> CmmExpr -> [CmmExpr] -> FCode ()
91 -- (cgCall fun args) makes a call to the entry-code of 'fun',
92 -- passing 'args', and returning the results to the current sequel
93 emitCall convs@(callConv, _) fun args
94 = do { adjustHpBackwards
95 ; sequel <- getSequel
96 ; updfr_off <- getUpdFrameOff
97 ; emitComment $ mkFastString ("emitCall: " ++ show sequel)
98 ; case sequel of
99 Return _ ->
100 emit $ mkForeignJump callConv fun args updfr_off
101 AssignTo res_regs _ ->
102 emit =<< mkCall fun convs res_regs args updfr_off (0,[])
103 }
104
105 emitCallWithExtraStack
106 :: (Convention, Convention) -> CmmExpr -> [CmmExpr]
107 -> (ByteOff, [(CmmExpr,ByteOff)]) -> FCode ()
108 -- (cgCall fun args) makes a call to the entry-code of 'fun',
109 -- passing 'args', and returning the results to the current sequel
110 emitCallWithExtraStack convs@(callConv, _) fun args extra_stack
111 = do { adjustHpBackwards
112 ; sequel <- getSequel
113 ; updfr_off <- getUpdFrameOff
114 ; emitComment $ mkFastString ("emitCallWithExtraStack: " ++ show sequel)
115 ; case sequel of
116 Return _ ->
117 emit $ mkForeignJumpExtra callConv fun args updfr_off extra_stack
118 AssignTo res_regs _ -> do
119 emit =<< mkCall fun convs res_regs args updfr_off extra_stack
120 }
121
122
123
124 adjustHpBackwards :: FCode ()
125 -- This function adjusts and heap pointers just before a tail call or
126 -- return. At a call or return, the virtual heap pointer may be less
127 -- than the real Hp, because the latter was advanced to deal with
128 -- the worst-case branch of the code, and we may be in a better-case
129 -- branch. In that case, move the real Hp *back* and retract some
130 -- ticky allocation count.
131 --
132 -- It *does not* deal with high-water-mark adjustment.
133 -- That's done by functions which allocate heap.
134 adjustHpBackwards
135 = do { hp_usg <- getHpUsage
136 ; let rHp = realHp hp_usg
137 vHp = virtHp hp_usg
138 adjust_words = vHp -rHp
139 ; new_hp <- getHpRelOffset vHp
140
141 ; emit (if adjust_words == 0
142 then mkNop
143 else mkAssign hpReg new_hp) -- Generates nothing when vHp==rHp
144
145 ; tickyAllocHeap adjust_words -- ...ditto
146
147 ; setRealHp vHp
148 }
149
150
151 -------------------------------------------------------------------------
152 -- Making calls: directCall and slowCall
153 -------------------------------------------------------------------------
154
155 -- General plan is:
156 -- - we'll make *one* fast call, either to the function itself
157 -- (directCall) or to stg_ap_<pat>_fast (slowCall)
158 -- Any left-over arguments will be pushed on the stack,
159 --
160 -- e.g. Sp[old+8] = arg1
161 -- Sp[old+16] = arg2
162 -- Sp[old+32] = stg_ap_pp_info
163 -- R2 = arg3
164 -- R3 = arg4
165 -- call f() return to Nothing updfr_off: 32
166
167
168 directCall :: CLabel -> Arity -> [StgArg] -> FCode ()
169 -- (directCall f n args)
170 -- calls f(arg1, ..., argn), and applies the result to the remaining args
171 -- The function f has arity n, and there are guaranteed at least n args
172 -- Both arity and args include void args
173 directCall lbl arity stg_args
174 = do { cmm_args <- getNonVoidArgAmodes stg_args
175 ; direct_call "directCall" lbl arity cmm_args (argsReps stg_args) }
176
177 slowCall :: CmmExpr -> [StgArg] -> FCode ()
178 -- (slowCall fun args) applies fun to args, returning the results to Sequel
179 slowCall fun stg_args
180 = do { dflags <- getDynFlags
181 ; cmm_args <- getNonVoidArgAmodes stg_args
182 ; let platform = targetPlatform dflags
183 ; call <- getCode $ direct_call "slow_call"
184 (mkRtsApFastLabel rts_fun) arity cmm_args reps
185 ; emitComment $ mkFastString ("slow_call for " ++
186 showSDoc (pprPlatform platform fun) ++
187 " with pat " ++ showSDoc (ftext rts_fun))
188 ; emit (mkAssign nodeReg fun <*> call)
189 }
190 where
191 reps = argsReps stg_args
192 (rts_fun, arity) = slowCallPattern reps
193
194 --------------
195 direct_call :: String -> CLabel -> Arity -> [CmmExpr] -> [ArgRep] -> FCode ()
196 -- NB1: (length args) may be less than (length reps), because
197 -- the args exclude the void ones
198 -- NB2: 'arity' refers to the *reps*
199 direct_call caller lbl arity args reps
200 | debugIsOn && arity > length reps -- Too few args
201 = do -- Caller should ensure that there enough args!
202 dflags <- getDynFlags
203 let platform = targetPlatform dflags
204 pprPanic "direct_call" (text caller <+> ppr arity
205 <+> pprPlatform platform lbl <+> ppr (length reps)
206 <+> pprPlatform platform args <+> ppr reps )
207
208
209 | null rest_reps -- Precisely the right number of arguments
210 = emitCall (NativeDirectCall, NativeReturn) target args
211
212 | otherwise -- Note [over-saturated calls]
213 = ASSERT( arity == length initial_reps )
214 emitCallWithExtraStack (NativeDirectCall, NativeReturn)
215 target fast_args (mkStkOffsets stack_args)
216 where
217 target = CmmLit (CmmLabel lbl)
218 (initial_reps, rest_reps) = splitAt arity reps
219 arg_arity = count isNonV initial_reps
220 (fast_args, rest_args) = splitAt arg_arity args
221 stack_args = slowArgs (zip rest_reps rest_args)
222
223
224 {-
225 Note [over-saturated calls]
226
227 The natural thing to do for an over-saturated call would be to call
228 the function with the correct number of arguments, and then apply the
229 remaining arguments to the value returned, e.g.
230
231 f a b c d (where f has arity 2)
232 -->
233 r = call f(a,b)
234 call r(c,d)
235
236 but this entails
237 - saving c and d on the stack
238 - making a continuation info table
239 - at the continuation, loading c and d off the stack into regs
240 - finally, call r
241
242 Note that since there are a fixed number of different r's
243 (e.g. stg_ap_pp_fast), we can also pre-compile continuations
244 that correspond to each of them, rather than generating a fresh
245 one for each over-saturated call.
246
247 Not only does this generate much less code, it is faster too. We will
248 generate something like:
249
250 Sp[old+16] = c
251 Sp[old+24] = d
252 Sp[old+32] = stg_ap_pp_info
253 call f(a,b) -- usual calling convention
254
255 For the purposes of the CmmCall node, we count this extra stack as
256 just more arguments that we are passing on the stack (cml_args).
257 -}
258
259 -- | 'slowArgs' takes a list of function arguments and prepares them for
260 -- pushing on the stack for "extra" arguments to a function which requires
261 -- fewer arguments than we currently have.
262 slowArgs :: [(ArgRep,CmmExpr)] -> [(ArgRep,CmmExpr)]
263 slowArgs [] = []
264 slowArgs amodes
265 | opt_SccProfilingOn = save_cccs ++ this_pat ++ slowArgs rest
266 | otherwise = this_pat ++ slowArgs rest
267 where
268 (arg_pat, args, rest) = matchSlowPattern amodes
269 stg_ap_pat = mkCmmRetInfoLabel rtsPackageId arg_pat
270 this_pat = (N, mkLblExpr stg_ap_pat) : args
271 save_cccs = [(N, mkLblExpr save_cccs_lbl), (N, curCCS)]
272 save_cccs_lbl = mkCmmRetInfoLabel rtsPackageId (fsLit "stg_restore_cccs")
273
274 matchSlowPattern :: [(ArgRep,CmmExpr)]
275 -> (FastString, [(ArgRep,CmmExpr)], [(ArgRep,CmmExpr)])
276 matchSlowPattern amodes = (arg_pat, these, rest)
277 where (arg_pat, n) = slowCallPattern (map fst amodes)
278 (these, rest) = splitAt n amodes
279
280 -- These cases were found to cover about 99% of all slow calls:
281 slowCallPattern :: [ArgRep] -> (FastString, Arity)
282 -- Returns the generic apply function and arity
283 slowCallPattern (P: P: P: P: P: P: _) = (fsLit "stg_ap_pppppp", 6)
284 slowCallPattern (P: P: P: P: P: _) = (fsLit "stg_ap_ppppp", 5)
285 slowCallPattern (P: P: P: P: _) = (fsLit "stg_ap_pppp", 4)
286 slowCallPattern (P: P: P: V: _) = (fsLit "stg_ap_pppv", 4)
287 slowCallPattern (P: P: P: _) = (fsLit "stg_ap_ppp", 3)
288 slowCallPattern (P: P: V: _) = (fsLit "stg_ap_ppv", 3)
289 slowCallPattern (P: P: _) = (fsLit "stg_ap_pp", 2)
290 slowCallPattern (P: V: _) = (fsLit "stg_ap_pv", 2)
291 slowCallPattern (P: _) = (fsLit "stg_ap_p", 1)
292 slowCallPattern (V: _) = (fsLit "stg_ap_v", 1)
293 slowCallPattern (N: _) = (fsLit "stg_ap_n", 1)
294 slowCallPattern (F: _) = (fsLit "stg_ap_f", 1)
295 slowCallPattern (D: _) = (fsLit "stg_ap_d", 1)
296 slowCallPattern (L: _) = (fsLit "stg_ap_l", 1)
297 slowCallPattern [] = (fsLit "stg_ap_0", 0)
298
299
300 -------------------------------------------------------------------------
301 -- Fix the byte-offsets of a bunch of things to push on the stack
302
303 -- This is used for pushing slow-call continuations.
304 -- See Note [over-saturated calls].
305
306 mkStkOffsets
307 :: [(ArgRep,CmmExpr)] -- things to make offsets for
308 -> ( ByteOff -- OUTPUTS: Topmost allocated word
309 , [(CmmExpr, ByteOff)] ) -- things with offsets (voids filtered out)
310 mkStkOffsets things
311 = loop 0 [] (reverse things)
312 where
313 loop offset offs [] = (offset,offs)
314 loop offset offs ((V,_):things) = loop offset offs things
315 -- ignore Void arguments
316 loop offset offs ((rep,thing):things)
317 = loop thing_off ((thing, thing_off):offs) things
318 where
319 thing_off = offset + argRepSizeW rep * wORD_SIZE
320 -- offset of thing is offset+size, because we're
321 -- growing the stack *downwards* as the offsets increase.
322
323
324 -------------------------------------------------------------------------
325 -- Classifying arguments: ArgRep
326 -------------------------------------------------------------------------
327
328 -- ArgRep is not exported (even abstractly)
329 -- It's a local helper type for classification
330
331 data ArgRep = P -- GC Ptr
332 | N -- One-word non-ptr
333 | L -- Two-word non-ptr (long)
334 | V -- Void
335 | F -- Float
336 | D -- Double
337 instance Outputable ArgRep where
338 ppr P = text "P"
339 ppr N = text "N"
340 ppr L = text "L"
341 ppr V = text "V"
342 ppr F = text "F"
343 ppr D = text "D"
344
345 toArgRep :: PrimRep -> ArgRep
346 toArgRep VoidRep = V
347 toArgRep PtrRep = P
348 toArgRep IntRep = N
349 toArgRep WordRep = N
350 toArgRep AddrRep = N
351 toArgRep Int64Rep = L
352 toArgRep Word64Rep = L
353 toArgRep FloatRep = F
354 toArgRep DoubleRep = D
355
356 isNonV :: ArgRep -> Bool
357 isNonV V = False
358 isNonV _ = True
359
360 argsReps :: [StgArg] -> [ArgRep]
361 argsReps = map (toArgRep . argPrimRep)
362
363 argRepSizeW :: ArgRep -> WordOff -- Size in words
364 argRepSizeW N = 1
365 argRepSizeW P = 1
366 argRepSizeW F = 1
367 argRepSizeW L = wORD64_SIZE `quot` wORD_SIZE
368 argRepSizeW D = dOUBLE_SIZE `quot` wORD_SIZE
369 argRepSizeW V = 0
370
371 idArgRep :: Id -> ArgRep
372 idArgRep = toArgRep . idPrimRep
373
374 -------------------------------------------------------------------------
375 ---- Laying out objects on the heap and stack
376 -------------------------------------------------------------------------
377
378 -- The heap always grows upwards, so hpRel is easy
379 hpRel :: VirtualHpOffset -- virtual offset of Hp
380 -> VirtualHpOffset -- virtual offset of The Thing
381 -> WordOff -- integer word offset
382 hpRel hp off = off - hp
383
384 getHpRelOffset :: VirtualHpOffset -> FCode CmmExpr
385 getHpRelOffset virtual_offset
386 = do { hp_usg <- getHpUsage
387 ; return (cmmRegOffW hpReg (hpRel (realHp hp_usg) virtual_offset)) }
388
389 mkVirtHeapOffsets
390 :: Bool -- True <=> is a thunk
391 -> [(PrimRep,a)] -- Things to make offsets for
392 -> (WordOff, -- _Total_ number of words allocated
393 WordOff, -- Number of words allocated for *pointers*
394 [(NonVoid a, VirtualHpOffset)])
395
396 -- Things with their offsets from start of object in order of
397 -- increasing offset; BUT THIS MAY BE DIFFERENT TO INPUT ORDER
398 -- First in list gets lowest offset, which is initial offset + 1.
399 --
400 -- Void arguments are removed, so output list may be shorter than
401 -- input list
402 --
403 -- mkVirtHeapOffsets always returns boxed things with smaller offsets
404 -- than the unboxed things
405
406 mkVirtHeapOffsets is_thunk things
407 = let non_void_things = filterOut (isVoidRep . fst) things
408 (ptrs, non_ptrs) = partition (isGcPtrRep . fst) non_void_things
409 (wds_of_ptrs, ptrs_w_offsets) = mapAccumL computeOffset 0 ptrs
410 (tot_wds, non_ptrs_w_offsets) = mapAccumL computeOffset wds_of_ptrs non_ptrs
411 in
412 (tot_wds, wds_of_ptrs, ptrs_w_offsets ++ non_ptrs_w_offsets)
413 where
414 hdr_size | is_thunk = thunkHdrSize
415 | otherwise = fixedHdrSize
416
417 computeOffset wds_so_far (rep, thing)
418 = (wds_so_far + argRepSizeW (toArgRep rep),
419 (NonVoid thing, hdr_size + wds_so_far))
420
421 mkVirtConstrOffsets :: [(PrimRep,a)] -> (WordOff, WordOff, [(NonVoid a, VirtualHpOffset)])
422 -- Just like mkVirtHeapOffsets, but for constructors
423 mkVirtConstrOffsets = mkVirtHeapOffsets False
424
425
426 -------------------------------------------------------------------------
427 --
428 -- Making argument descriptors
429 --
430 -- An argument descriptor describes the layout of args on the stack,
431 -- both for * GC (stack-layout) purposes, and
432 -- * saving/restoring registers when a heap-check fails
433 --
434 -- Void arguments aren't important, therefore (contrast constructSlowCall)
435 --
436 -------------------------------------------------------------------------
437
438 -- bring in ARG_P, ARG_N, etc.
439 #include "../includes/rts/storage/FunTypes.h"
440
441 mkArgDescr :: Name -> [Id] -> FCode ArgDescr
442 mkArgDescr _nm args
443 = case stdPattern arg_reps of
444 Just spec_id -> return (ArgSpec spec_id)
445 Nothing -> return (ArgGen arg_bits)
446 where
447 arg_bits = argBits arg_reps
448 arg_reps = filter isNonV (map idArgRep args)
449 -- Getting rid of voids eases matching of standard patterns
450
451 argBits :: [ArgRep] -> [Bool] -- True for non-ptr, False for ptr
452 argBits [] = []
453 argBits (P : args) = False : argBits args
454 argBits (arg : args) = take (argRepSizeW arg) (repeat True) ++ argBits args
455
456 ----------------------
457 stdPattern :: [ArgRep] -> Maybe StgHalfWord
458 stdPattern reps
459 = case reps of
460 [] -> Just ARG_NONE -- just void args, probably
461 [N] -> Just ARG_N
462 [P] -> Just ARG_P
463 [F] -> Just ARG_F
464 [D] -> Just ARG_D
465 [L] -> Just ARG_L
466
467 [N,N] -> Just ARG_NN
468 [N,P] -> Just ARG_NP
469 [P,N] -> Just ARG_PN
470 [P,P] -> Just ARG_PP
471
472 [N,N,N] -> Just ARG_NNN
473 [N,N,P] -> Just ARG_NNP
474 [N,P,N] -> Just ARG_NPN
475 [N,P,P] -> Just ARG_NPP
476 [P,N,N] -> Just ARG_PNN
477 [P,N,P] -> Just ARG_PNP
478 [P,P,N] -> Just ARG_PPN
479 [P,P,P] -> Just ARG_PPP
480
481 [P,P,P,P] -> Just ARG_PPPP
482 [P,P,P,P,P] -> Just ARG_PPPPP
483 [P,P,P,P,P,P] -> Just ARG_PPPPPP
484
485 _ -> Nothing
486
487 -------------------------------------------------------------------------
488 --
489 -- Generating the info table and code for a closure
490 --
491 -------------------------------------------------------------------------
492
493 -- Here we make an info table of type 'CmmInfo'. The concrete
494 -- representation as a list of 'CmmAddr' is handled later
495 -- in the pipeline by 'cmmToRawCmm'.
496 -- When loading the free variables, a function closure pointer may be tagged,
497 -- so we must take it into account.
498
499 emitClosureProcAndInfoTable :: Bool -- top-level?
500 -> Id -- name of the closure
501 -> LambdaFormInfo
502 -> CmmInfoTable
503 -> [NonVoid Id] -- incoming arguments
504 -> ((Int, LocalReg, [LocalReg]) -> FCode ()) -- function body
505 -> FCode ()
506 emitClosureProcAndInfoTable top_lvl bndr lf_info info_tbl args body
507 = do {
508 -- Bind the binder itself, but only if it's not a top-level
509 -- binding. We need non-top let-bindings to refer to the
510 -- top-level binding, which this binding would incorrectly shadow.
511 ; node <- if top_lvl then return $ idToReg (NonVoid bndr)
512 else bindToReg (NonVoid bndr) lf_info
513 ; let node_points = nodeMustPointToIt lf_info
514 ; arg_regs <- bindArgsToRegs args
515 ; let args' = if node_points then (node : arg_regs) else arg_regs
516 conv = if nodeMustPointToIt lf_info then NativeNodeCall
517 else NativeDirectCall
518 (offset, _) = mkCallEntry conv args'
519 ; emitClosureAndInfoTable info_tbl conv args' $ body (offset, node, arg_regs)
520 }
521
522 -- Data constructors need closures, but not with all the argument handling
523 -- needed for functions. The shared part goes here.
524 emitClosureAndInfoTable ::
525 CmmInfoTable -> Convention -> [LocalReg] -> FCode () -> FCode ()
526 emitClosureAndInfoTable info_tbl conv args body
527 = do { dflags <- getDynFlags
528 ; blks <- getCode body
529 ; let entry_lbl = toEntryLbl (targetPlatform dflags) (cit_lbl info_tbl)
530 ; emitProcWithConvention conv info_tbl entry_lbl args blks
531 }
532
533 -----------------------------------------------------------------------------
534 --
535 -- Info table offsets
536 --
537 -----------------------------------------------------------------------------
538
539 stdInfoTableSizeW :: WordOff
540 -- The size of a standard info table varies with profiling/ticky etc,
541 -- so we can't get it from Constants
542 -- It must vary in sync with mkStdInfoTable
543 stdInfoTableSizeW
544 = size_fixed + size_prof
545 where
546 size_fixed = 2 -- layout, type
547 size_prof | opt_SccProfilingOn = 2
548 | otherwise = 0
549
550 stdInfoTableSizeB :: ByteOff
551 stdInfoTableSizeB = stdInfoTableSizeW * wORD_SIZE :: ByteOff
552
553 stdSrtBitmapOffset :: ByteOff
554 -- Byte offset of the SRT bitmap half-word which is
555 -- in the *higher-addressed* part of the type_lit
556 stdSrtBitmapOffset = stdInfoTableSizeB - hALF_WORD_SIZE
557
558 stdClosureTypeOffset :: ByteOff
559 -- Byte offset of the closure type half-word
560 stdClosureTypeOffset = stdInfoTableSizeB - wORD_SIZE
561
562 stdPtrsOffset, stdNonPtrsOffset :: ByteOff
563 stdPtrsOffset = stdInfoTableSizeB - 2*wORD_SIZE
564 stdNonPtrsOffset = stdInfoTableSizeB - 2*wORD_SIZE + hALF_WORD_SIZE
565
566 -------------------------------------------------------------------------
567 --
568 -- Accessing fields of an info table
569 --
570 -------------------------------------------------------------------------
571
572 closureInfoPtr :: CmmExpr -> CmmExpr
573 -- Takes a closure pointer and returns the info table pointer
574 closureInfoPtr e = CmmLoad e bWord
575
576 entryCode :: CmmExpr -> CmmExpr
577 -- Takes an info pointer (the first word of a closure)
578 -- and returns its entry code
579 entryCode e | tablesNextToCode = e
580 | otherwise = CmmLoad e bWord
581
582 getConstrTag :: CmmExpr -> CmmExpr
583 -- Takes a closure pointer, and return the *zero-indexed*
584 -- constructor tag obtained from the info table
585 -- This lives in the SRT field of the info table
586 -- (constructors don't need SRTs).
587 getConstrTag closure_ptr
588 = CmmMachOp (MO_UU_Conv halfWordWidth wordWidth) [infoTableConstrTag info_table]
589 where
590 info_table = infoTable (closureInfoPtr closure_ptr)
591
592 cmmGetClosureType :: CmmExpr -> CmmExpr
593 -- Takes a closure pointer, and return the closure type
594 -- obtained from the info table
595 cmmGetClosureType closure_ptr
596 = CmmMachOp (MO_UU_Conv halfWordWidth wordWidth) [infoTableClosureType info_table]
597 where
598 info_table = infoTable (closureInfoPtr closure_ptr)
599
600 infoTable :: CmmExpr -> CmmExpr
601 -- Takes an info pointer (the first word of a closure)
602 -- and returns a pointer to the first word of the standard-form
603 -- info table, excluding the entry-code word (if present)
604 infoTable info_ptr
605 | tablesNextToCode = cmmOffsetB info_ptr (- stdInfoTableSizeB)
606 | otherwise = cmmOffsetW info_ptr 1 -- Past the entry code pointer
607
608 infoTableConstrTag :: CmmExpr -> CmmExpr
609 -- Takes an info table pointer (from infoTable) and returns the constr tag
610 -- field of the info table (same as the srt_bitmap field)
611 infoTableConstrTag = infoTableSrtBitmap
612
613 infoTableSrtBitmap :: CmmExpr -> CmmExpr
614 -- Takes an info table pointer (from infoTable) and returns the srt_bitmap
615 -- field of the info table
616 infoTableSrtBitmap info_tbl
617 = CmmLoad (cmmOffsetB info_tbl stdSrtBitmapOffset) bHalfWord
618
619 infoTableClosureType :: CmmExpr -> CmmExpr
620 -- Takes an info table pointer (from infoTable) and returns the closure type
621 -- field of the info table.
622 infoTableClosureType info_tbl
623 = CmmLoad (cmmOffsetB info_tbl stdClosureTypeOffset) bHalfWord
624
625 infoTablePtrs :: CmmExpr -> CmmExpr
626 infoTablePtrs info_tbl
627 = CmmLoad (cmmOffsetB info_tbl stdPtrsOffset) bHalfWord
628
629 infoTableNonPtrs :: CmmExpr -> CmmExpr
630 infoTableNonPtrs info_tbl
631 = CmmLoad (cmmOffsetB info_tbl stdNonPtrsOffset) bHalfWord
632
633 funInfoTable :: CmmExpr -> CmmExpr
634 -- Takes the info pointer of a function,
635 -- and returns a pointer to the first word of the StgFunInfoExtra struct
636 -- in the info table.
637 funInfoTable info_ptr
638 | tablesNextToCode
639 = cmmOffsetB info_ptr (- stdInfoTableSizeB - sIZEOF_StgFunInfoExtraRev)
640 | otherwise
641 = cmmOffsetW info_ptr (1 + stdInfoTableSizeW)
642 -- Past the entry code pointer
643