1 -- -----------------------------------------------------------------------------
3 -- (c) The University of Glasgow 1993-2004
5 -- This is the top-level module in the native code generator.
7 -- -----------------------------------------------------------------------------
10 module AsmCodeGen ( nativeCodeGen ) where
12 #include "HsVersions.h"
13 #include "nativeGen/NCG.h"
16 import qualified X86.CodeGen
17 import qualified X86.Regs
18 import qualified X86.Instr
19 import qualified X86.Ppr
21 import qualified SPARC.CodeGen
22 import qualified SPARC.Regs
23 import qualified SPARC.Instr
24 import qualified SPARC.Ppr
25 import qualified SPARC.ShortcutJump
26 import qualified SPARC.CodeGen.Expand
28 import qualified PPC.CodeGen
29 import qualified PPC.Cond
30 import qualified PPC.Regs
31 import qualified PPC.RegInfo
32 import qualified PPC.Instr
33 import qualified PPC.Ppr
35 import RegAlloc.Liveness
36 import qualified RegAlloc.Linear.Main as Linear
38 import qualified GraphColor as Color
39 import qualified RegAlloc.Graph.Main as Color
40 import qualified RegAlloc.Graph.Stats as Color
41 import qualified RegAlloc.Graph.TrivColorable as Color
52 import CgUtils ( fixStgRegisters )
54 import CmmOpt ( cmmEliminateDeadBlocks, cmmMiniInline, cmmMachOpFold )
59 import Unique ( Unique, getUnique )
65 import BasicTypes ( Alignment )
67 import qualified Pretty
84 The native-code generator has machine-independent and
85 machine-dependent modules.
87 This module ("AsmCodeGen") is the top-level machine-independent
88 module. Before entering machine-dependent land, we do some
89 machine-independent optimisations (defined below) on the
92 We convert to the machine-specific 'Instr' datatype with
93 'cmmCodeGen', assuming an infinite supply of registers. We then use
94 a machine-independent register allocator ('regAlloc') to rejoin
95 reality. Obviously, 'regAlloc' has machine-specific helper
96 functions (see about "RegAllocInfo" below).
98 Finally, we order the basic blocks of the function so as to minimise
99 the number of jumps between blocks, by utilising fallthrough wherever
102 The machine-dependent bits break down as follows:
104 * ["MachRegs"] Everything about the target platform's machine
105 registers (and immediate operands, and addresses, which tend to
106 intermingle/interact with registers).
108 * ["MachInstrs"] Includes the 'Instr' datatype (possibly should
109 have a module of its own), plus a miscellany of other things
110 (e.g., 'targetDoubleSize', 'smStablePtrTable', ...)
112 * ["MachCodeGen"] is where 'Cmm' stuff turns into
113 machine instructions.
115 * ["PprMach"] 'pprInstr' turns an 'Instr' into text (well, really
118 * ["RegAllocInfo"] In the register allocator, we manipulate
119 'MRegsState's, which are 'BitSet's, one bit per machine register.
120 When we want to say something about a specific machine register
121 (e.g., ``it gets clobbered by this instruction''), we set/unset
122 its bit. Obviously, we do this 'BitSet' thing for efficiency
125 The 'RegAllocInfo' module collects together the machine-specific
126 info needed to do register allocation.
128 * ["RegisterAlloc"] The (machine-independent) register allocator.
131 -- -----------------------------------------------------------------------------
132 -- Top-level of the native codegen
134 data NcgImpl statics instr jumpDest = NcgImpl {
135 cmmTopCodeGen :: RawCmmDecl -> NatM [NatCmmDecl statics instr],
136 generateJumpTableForInstr :: instr -> Maybe (NatCmmDecl statics instr),
137 getJumpDestBlockId :: jumpDest -> Maybe BlockId,
138 canShortcut :: instr -> Maybe jumpDest,
139 shortcutStatics :: (BlockId -> Maybe jumpDest) -> statics -> statics,
140 shortcutJump :: (BlockId -> Maybe jumpDest) -> instr -> instr,
141 pprNatCmmDecl :: Platform -> NatCmmDecl statics instr -> SDoc,
142 maxSpillSlots :: Int,
143 allocatableRegs :: [RealReg],
144 ncg_x86fp_kludge :: [NatCmmDecl statics instr] -> [NatCmmDecl statics instr],
145 ncgExpandTop :: [NatCmmDecl statics instr] -> [NatCmmDecl statics instr],
146 ncgMakeFarBranches :: [NatBasicBlock instr] -> [NatBasicBlock instr]
150 nativeCodeGen :: DynFlags -> Handle -> UniqSupply -> [RawCmmGroup] -> IO ()
151 nativeCodeGen dflags h us cmms
152 = let platform = targetPlatform dflags
153 nCG' :: (Outputable statics, Outputable instr, Instruction instr) => NcgImpl statics instr jumpDest -> IO ()
154 nCG' ncgImpl = nativeCodeGen' dflags ncgImpl h us cmms
155 x86NcgImpl = NcgImpl {
156 cmmTopCodeGen = X86.CodeGen.cmmTopCodeGen
157 ,generateJumpTableForInstr = X86.CodeGen.generateJumpTableForInstr
158 ,getJumpDestBlockId = X86.Instr.getJumpDestBlockId
159 ,canShortcut = X86.Instr.canShortcut
160 ,shortcutStatics = X86.Instr.shortcutStatics
161 ,shortcutJump = X86.Instr.shortcutJump
162 ,pprNatCmmDecl = X86.Ppr.pprNatCmmDecl
163 ,maxSpillSlots = X86.Instr.maxSpillSlots (target32Bit platform)
164 ,allocatableRegs = X86.Regs.allocatableRegs
165 ,ncg_x86fp_kludge = id
167 ,ncgMakeFarBranches = id
169 in case platformArch platform of
170 ArchX86 -> nCG' (x86NcgImpl { ncg_x86fp_kludge = map x86fp_kludge })
171 ArchX86_64 -> nCG' x86NcgImpl
174 cmmTopCodeGen = PPC.CodeGen.cmmTopCodeGen
175 ,generateJumpTableForInstr = PPC.CodeGen.generateJumpTableForInstr
176 ,getJumpDestBlockId = PPC.RegInfo.getJumpDestBlockId
177 ,canShortcut = PPC.RegInfo.canShortcut
178 ,shortcutStatics = PPC.RegInfo.shortcutStatics
179 ,shortcutJump = PPC.RegInfo.shortcutJump
180 ,pprNatCmmDecl = PPC.Ppr.pprNatCmmDecl
181 ,maxSpillSlots = PPC.Instr.maxSpillSlots
182 ,allocatableRegs = PPC.Regs.allocatableRegs
183 ,ncg_x86fp_kludge = id
185 ,ncgMakeFarBranches = makeFarBranches
189 cmmTopCodeGen = SPARC.CodeGen.cmmTopCodeGen
190 ,generateJumpTableForInstr = SPARC.CodeGen.generateJumpTableForInstr
191 ,getJumpDestBlockId = SPARC.ShortcutJump.getJumpDestBlockId
192 ,canShortcut = SPARC.ShortcutJump.canShortcut
193 ,shortcutStatics = SPARC.ShortcutJump.shortcutStatics
194 ,shortcutJump = SPARC.ShortcutJump.shortcutJump
195 ,pprNatCmmDecl = SPARC.Ppr.pprNatCmmDecl
196 ,maxSpillSlots = SPARC.Instr.maxSpillSlots
197 ,allocatableRegs = SPARC.Regs.allocatableRegs
198 ,ncg_x86fp_kludge = id
199 ,ncgExpandTop = map SPARC.CodeGen.Expand.expandTop
200 ,ncgMakeFarBranches = id
203 panic "nativeCodeGen: No NCG for ARM"
205 panic "nativeCodeGen: No NCG for PPC 64"
207 panic "nativeCodeGen: No NCG for unknown arch"
209 nativeCodeGen' :: (Outputable statics, Outputable instr, Instruction instr)
211 -> NcgImpl statics instr jumpDest
212 -> Handle -> UniqSupply -> [RawCmmGroup] -> IO ()
213 nativeCodeGen' dflags ncgImpl h us cmms
215 let platform = targetPlatform dflags
216 split_cmms = concat $ map add_split cmms
217 -- BufHandle is a performance hack. We could hide it inside
218 -- Pretty if it weren't for the fact that we do lots of little
219 -- printDocs here (in order to do codegen in constant space).
220 bufh <- newBufHandle h
221 (imports, prof) <- cmmNativeGens dflags ncgImpl bufh us split_cmms [] [] 0
224 let (native, colorStats, linearStats)
229 Opt_D_dump_asm "Asm code"
230 (vcat $ map (pprNatCmmDecl ncgImpl platform) $ concat native)
232 -- dump global NCG stats for graph coloring allocator
233 (case concat $ catMaybes colorStats of
236 -- build the global register conflict graph
238 = foldl Color.union Color.initGraph
239 $ [ Color.raGraph stat
240 | stat@Color.RegAllocStatsStart{} <- stats]
242 dumpSDoc dflags Opt_D_dump_asm_stats "NCG stats"
243 $ Color.pprStats stats graphGlobal
246 Opt_D_dump_asm_conflicts "Register conflict graph"
248 (targetRegDotColor platform)
249 (Color.trivColorable platform
250 (targetVirtualRegSqueeze platform)
251 (targetRealRegSqueeze platform))
255 -- dump global NCG stats for linear allocator
256 (case concat $ catMaybes linearStats of
258 stats -> dumpSDoc dflags Opt_D_dump_asm_stats "NCG stats"
259 $ Linear.pprStats (concat native) stats)
261 -- write out the imports
262 Pretty.printDoc Pretty.LeftMode (pprCols dflags) h
263 $ withPprStyleDoc dflags (mkCodeStyle AsmStyle)
264 $ makeImportsDoc dflags (concat imports)
269 | dopt Opt_SplitObjs dflags = split_marker : tops
272 split_marker = CmmProc Nothing mkSplitMarkerLabel (ListGraph [])
275 -- | Do native code generation on all these cmms.
277 cmmNativeGens :: (Outputable statics, Outputable instr, Instruction instr)
279 -> NcgImpl statics instr jumpDest
284 -> [ ([NatCmmDecl statics instr],
285 Maybe [Color.RegAllocStats statics instr],
286 Maybe [Linear.RegAllocStats]) ]
289 [([NatCmmDecl statics instr],
290 Maybe [Color.RegAllocStats statics instr],
291 Maybe [Linear.RegAllocStats])] )
293 cmmNativeGens _ _ _ _ [] impAcc profAcc _
294 = return (reverse impAcc, reverse profAcc)
296 cmmNativeGens dflags ncgImpl h us (cmm : cmms) impAcc profAcc count
298 let platform = targetPlatform dflags
300 (us', native, imports, colorStats, linearStats)
301 <- {-# SCC "cmmNativeGen" #-} cmmNativeGen dflags ncgImpl us cmm count
303 {-# SCC "pprNativeCode" #-} Pretty.bufLeftRender h
304 $ withPprStyleDoc dflags (mkCodeStyle AsmStyle)
305 $ vcat $ map (pprNatCmmDecl ncgImpl platform) native
307 -- carefully evaluate this strictly. Binding it with 'let'
308 -- and then using 'seq' doesn't work, because the let
309 -- apparently gets inlined first.
310 lsPprNative <- return $!
311 if dopt Opt_D_dump_asm dflags
312 || dopt Opt_D_dump_asm_stats dflags
316 count' <- return $! count + 1;
318 -- force evaulation all this stuff to avoid space leaks
319 {-# SCC "seqString" #-} seqString (showSDoc dflags $ vcat $ map ppr imports) `seq` return ()
321 cmmNativeGens dflags ncgImpl
324 ((lsPprNative, colorStats, linearStats) : profAcc)
327 where seqString [] = ()
328 seqString (x:xs) = x `seq` seqString xs `seq` ()
331 -- | Complete native code generation phase for a single top-level chunk of Cmm.
332 -- Dumping the output of each stage along the way.
333 -- Global conflict graph and NGC stats
335 :: (Outputable statics, Outputable instr, Instruction instr)
337 -> NcgImpl statics instr jumpDest
339 -> RawCmmDecl -- ^ the cmm to generate code for
340 -> Int -- ^ sequence number of this top thing
342 , [NatCmmDecl statics instr] -- native code
343 , [CLabel] -- things imported by this cmm
344 , Maybe [Color.RegAllocStats statics instr] -- stats for the coloring register allocator
345 , Maybe [Linear.RegAllocStats]) -- stats for the linear register allocators
347 cmmNativeGen dflags ncgImpl us cmm count
349 let platform = targetPlatform dflags
351 -- rewrite assignments to global regs
353 {-# SCC "fixStgRegisters" #-}
356 -- cmm to cmm optimisations
357 let (opt_cmm, imports) =
358 {-# SCC "cmmToCmm" #-}
359 cmmToCmm dflags fixed_cmm
362 Opt_D_dump_opt_cmm "Optimised Cmm"
363 (pprCmmGroup platform [opt_cmm])
365 -- generate native code from cmm
366 let ((native, lastMinuteImports), usGen) =
367 {-# SCC "genMachCode" #-}
368 initUs us $ genMachCode dflags (cmmTopCodeGen ncgImpl) opt_cmm
371 Opt_D_dump_asm_native "Native code"
372 (vcat $ map (pprNatCmmDecl ncgImpl platform) native)
374 -- tag instructions with register liveness information
375 let (withLiveness, usLive) =
376 {-# SCC "regLiveness" #-}
379 $ map natCmmTopToLive native
382 Opt_D_dump_asm_liveness "Liveness annotations added"
383 (vcat $ map ppr withLiveness)
385 -- allocate registers
386 (alloced, usAlloc, ppr_raStatsColor, ppr_raStatsLinear) <-
387 if ( dopt Opt_RegsGraph dflags
388 || dopt Opt_RegsIterative dflags)
390 -- the regs usable for allocation
391 let (alloc_regs :: UniqFM (UniqSet RealReg))
392 = foldr (\r -> plusUFM_C unionUniqSets
393 $ unitUFM (targetClassOfRealReg platform r) (unitUniqSet r))
395 $ allocatableRegs ncgImpl
397 -- do the graph coloring register allocation
398 let ((alloced, regAllocStats), usAlloc)
399 = {-# SCC "RegAlloc" #-}
404 (mkUniqSet [0 .. maxSpillSlots ncgImpl])
407 -- dump out what happened during register allocation
409 Opt_D_dump_asm_regalloc "Registers allocated"
410 (vcat $ map (pprNatCmmDecl ncgImpl platform) alloced)
413 Opt_D_dump_asm_regalloc_stages "Build/spill stages"
414 (vcat $ map (\(stage, stats)
415 -> text "# --------------------------"
416 $$ text "# cmm " <> int count <> text " Stage " <> int stage
418 $ zip [0..] regAllocStats)
421 if dopt Opt_D_dump_asm_stats dflags
422 then Just regAllocStats else Nothing
424 -- force evaluation of the Maybe to avoid space leak
425 mPprStats `seq` return ()
427 return ( alloced, usAlloc
432 -- do linear register allocation
433 let ((alloced, regAllocStats), usAlloc)
434 = {-# SCC "RegAlloc" #-}
437 $ mapM (Linear.regAlloc dflags) withLiveness
440 Opt_D_dump_asm_regalloc "Registers allocated"
441 (vcat $ map (pprNatCmmDecl ncgImpl platform) alloced)
444 if dopt Opt_D_dump_asm_stats dflags
445 then Just (catMaybes regAllocStats) else Nothing
447 -- force evaluation of the Maybe to avoid space leak
448 mPprStats `seq` return ()
450 return ( alloced, usAlloc
454 ---- x86fp_kludge. This pass inserts ffree instructions to clear
455 ---- the FPU stack on x86. The x86 ABI requires that the FPU stack
456 ---- is clear, and library functions can return odd results if it
459 ---- NB. must happen before shortcutBranches, because that
460 ---- generates JXX_GBLs which we can't fix up in x86fp_kludge.
461 let kludged = {-# SCC "x86fp_kludge" #-} ncg_x86fp_kludge ncgImpl alloced
463 ---- generate jump tables
465 {-# SCC "generateJumpTables" #-}
466 generateJumpTables ncgImpl kludged
468 ---- shortcut branches
470 {-# SCC "shortcutBranches" #-}
471 shortcutBranches dflags ncgImpl tabled
475 {-# SCC "sequenceBlocks" #-}
476 map (sequenceTop ncgImpl) shorted
478 ---- expansion of SPARC synthetic instrs
480 {-# SCC "sparc_expand" #-}
481 ncgExpandTop ncgImpl sequenced
484 Opt_D_dump_asm_expanded "Synthetic instructions expanded"
485 (vcat $ map (pprNatCmmDecl ncgImpl platform) expanded)
489 , lastMinuteImports ++ imports
494 x86fp_kludge :: NatCmmDecl (Alignment, CmmStatics) X86.Instr.Instr -> NatCmmDecl (Alignment, CmmStatics) X86.Instr.Instr
495 x86fp_kludge top@(CmmData _ _) = top
496 x86fp_kludge (CmmProc info lbl (ListGraph code)) =
497 CmmProc info lbl (ListGraph $ X86.Instr.i386_insert_ffrees code)
500 -- | Build a doc for all the imports.
502 makeImportsDoc :: DynFlags -> [CLabel] -> SDoc
503 makeImportsDoc dflags imports
506 -- On recent versions of Darwin, the linker supports
507 -- dead-stripping of code and data on a per-symbol basis.
508 -- There's a hack to make this work in PprMach.pprNatCmmDecl.
509 (if platformHasSubsectionsViaSymbols (targetPlatform dflags)
510 then text ".subsections_via_symbols"
513 -- On recent GNU ELF systems one can mark an object file
514 -- as not requiring an executable stack. If all objects
515 -- linked into a program have this note then the program
516 -- will not use an executable stack, which is good for
517 -- security. GHC generated code does not need an executable
518 -- stack so add the note in:
519 (if platformHasGnuNonexecStack (targetPlatform dflags)
520 then text ".section .note.GNU-stack,\"\",@progbits"
523 -- And just because every other compiler does, lets stick in
524 -- an identifier directive: .ident "GHC x.y.z"
525 (if platformHasIdentDirective (targetPlatform dflags)
526 then let compilerIdent = text "GHC" <+> text cProjectVersion
527 in text ".ident" <+> doubleQuotes compilerIdent
531 -- Generate "symbol stubs" for all external symbols that might
532 -- come from a dynamic library.
533 dyld_stubs :: [CLabel] -> SDoc
534 {- dyld_stubs imps = vcat $ map pprDyldSymbolStub $
535 map head $ group $ sort imps-}
537 platform = targetPlatform dflags
538 arch = platformArch platform
539 os = platformOS platform
541 -- (Hack) sometimes two Labels pretty-print the same, but have
542 -- different uniques; so we compare their text versions...
544 | needImportedSymbols arch os
546 (pprGotDeclaration arch os :) $
547 map ( pprImportedSymbol platform . fst . head) $
548 groupBy (\(_,a) (_,b) -> a == b) $
549 sortBy (\(_,a) (_,b) -> compare a b) $
555 doPpr lbl = (lbl, renderWithStyle dflags (pprCLabel platform lbl) astyle)
556 astyle = mkCodeStyle AsmStyle
559 -- -----------------------------------------------------------------------------
560 -- Sequencing the basic blocks
562 -- Cmm BasicBlocks are self-contained entities: they always end in a
563 -- jump, either non-local or to another basic block in the same proc.
564 -- In this phase, we attempt to place the basic blocks in a sequence
565 -- such that as many of the local jumps as possible turn into
570 => NcgImpl statics instr jumpDest -> NatCmmDecl statics instr -> NatCmmDecl statics instr
572 sequenceTop _ top@(CmmData _ _) = top
573 sequenceTop ncgImpl (CmmProc info lbl (ListGraph blocks)) =
574 CmmProc info lbl (ListGraph $ ncgMakeFarBranches ncgImpl $ sequenceBlocks blocks)
576 -- The algorithm is very simple (and stupid): we make a graph out of
577 -- the blocks where there is an edge from one block to another iff the
578 -- first block ends by jumping to the second. Then we topologically
579 -- sort this graph. Then traverse the list: for each block, we first
580 -- output the block, then if it has an out edge, we move the
581 -- destination of the out edge to the front of the list, and continue.
583 -- FYI, the classic layout for basic blocks uses postorder DFS; this
584 -- algorithm is implemented in Hoopl.
588 => [NatBasicBlock instr]
589 -> [NatBasicBlock instr]
591 sequenceBlocks [] = []
592 sequenceBlocks (entry:blocks) =
593 seqBlocks (mkNode entry : reverse (flattenSCCs (sccBlocks blocks)))
594 -- the first block is the entry point ==> it must remain at the start.
599 => [NatBasicBlock instr]
600 -> [SCC ( NatBasicBlock instr
604 sccBlocks blocks = stronglyConnCompFromEdgedVerticesR (map mkNode blocks)
606 -- we're only interested in the last instruction of
607 -- the block, and only if it has a single destination.
610 => [instr] -> [Unique]
613 = case jumpDestsOfInstr (last instrs) of
614 [one] -> [getUnique one]
617 mkNode :: (Instruction t)
619 -> (GenBasicBlock t, Unique, [Unique])
620 mkNode block@(BasicBlock id instrs) = (block, getUnique id, getOutEdges instrs)
622 seqBlocks :: (Eq t) => [(GenBasicBlock t1, t, [t])] -> [GenBasicBlock t1]
624 seqBlocks ((block,_,[]) : rest)
625 = block : seqBlocks rest
626 seqBlocks ((block@(BasicBlock id instrs),_,[next]) : rest)
627 | can_fallthrough = BasicBlock id (init instrs) : seqBlocks rest'
628 | otherwise = block : seqBlocks rest'
630 (can_fallthrough, rest') = reorder next [] rest
631 -- TODO: we should do a better job for cycles; try to maximise the
632 -- fallthroughs within a loop.
633 seqBlocks _ = panic "AsmCodegen:seqBlocks"
635 reorder :: (Eq a) => a -> [(t, a, t1)] -> [(t, a, t1)] -> (Bool, [(t, a, t1)])
636 reorder _ accum [] = (False, reverse accum)
637 reorder id accum (b@(block,id',out) : rest)
638 | id == id' = (True, (block,id,out) : reverse accum ++ rest)
639 | otherwise = reorder id (b:accum) rest
642 -- -----------------------------------------------------------------------------
643 -- Making far branches
645 -- Conditional branches on PowerPC are limited to +-32KB; if our Procs get too
646 -- big, we have to work around this limitation.
649 :: [NatBasicBlock PPC.Instr.Instr]
650 -> [NatBasicBlock PPC.Instr.Instr]
651 makeFarBranches blocks
652 | last blockAddresses < nearLimit = blocks
653 | otherwise = zipWith handleBlock blockAddresses blocks
655 blockAddresses = scanl (+) 0 $ map blockLen blocks
656 blockLen (BasicBlock _ instrs) = length instrs
658 handleBlock addr (BasicBlock id instrs)
659 = BasicBlock id (zipWith makeFar [addr..] instrs)
661 makeFar _ (PPC.Instr.BCC PPC.Cond.ALWAYS tgt) = PPC.Instr.BCC PPC.Cond.ALWAYS tgt
662 makeFar addr (PPC.Instr.BCC cond tgt)
663 | abs (addr - targetAddr) >= nearLimit
664 = PPC.Instr.BCCFAR cond tgt
666 = PPC.Instr.BCC cond tgt
667 where Just targetAddr = lookupUFM blockAddressMap tgt
668 makeFar _ other = other
670 nearLimit = 7000 -- 8192 instructions are allowed; let's keep some
671 -- distance, as we have a few pseudo-insns that are
672 -- pretty-printed as multiple instructions,
673 -- and it's just not worth the effort to calculate
676 blockAddressMap = listToUFM $ zip (map blockId blocks) blockAddresses
678 -- -----------------------------------------------------------------------------
679 -- Generate jump tables
681 -- Analyzes all native code and generates data sections for all jump
682 -- table instructions.
684 :: NcgImpl statics instr jumpDest
685 -> [NatCmmDecl statics instr] -> [NatCmmDecl statics instr]
686 generateJumpTables ncgImpl xs = concatMap f xs
687 where f p@(CmmProc _ _ (ListGraph xs)) = p : concatMap g xs
689 g (BasicBlock _ xs) = catMaybes (map (generateJumpTableForInstr ncgImpl) xs)
691 -- -----------------------------------------------------------------------------
696 -> NcgImpl statics instr jumpDest
697 -> [NatCmmDecl statics instr]
698 -> [NatCmmDecl statics instr]
700 shortcutBranches dflags ncgImpl tops
701 | optLevel dflags < 1 = tops -- only with -O or higher
702 | otherwise = map (apply_mapping ncgImpl mapping) tops'
704 (tops', mappings) = mapAndUnzip (build_mapping ncgImpl) tops
705 mapping = foldr plusUFM emptyUFM mappings
707 build_mapping :: NcgImpl statics instr jumpDest
708 -> GenCmmDecl d t (ListGraph instr)
709 -> (GenCmmDecl d t (ListGraph instr), UniqFM jumpDest)
710 build_mapping _ top@(CmmData _ _) = (top, emptyUFM)
711 build_mapping _ (CmmProc info lbl (ListGraph []))
712 = (CmmProc info lbl (ListGraph []), emptyUFM)
713 build_mapping ncgImpl (CmmProc info lbl (ListGraph (head:blocks)))
714 = (CmmProc info lbl (ListGraph (head:others)), mapping)
715 -- drop the shorted blocks, but don't ever drop the first one,
716 -- because it is pointed to by a global label.
718 -- find all the blocks that just consist of a jump that can be
720 -- Don't completely eliminate loops here -- that can leave a dangling jump!
721 (_, shortcut_blocks, others) = foldl split (emptyBlockSet, [], []) blocks
722 split (s, shortcut_blocks, others) b@(BasicBlock id [insn])
723 | Just jd <- canShortcut ncgImpl insn,
724 Just dest <- getJumpDestBlockId ncgImpl jd,
725 (setMember dest s) || dest == id -- loop checks
726 = (s, shortcut_blocks, b : others)
727 split (s, shortcut_blocks, others) (BasicBlock id [insn])
728 | Just dest <- canShortcut ncgImpl insn
729 = (setInsert id s, (id,dest) : shortcut_blocks, others)
730 split (s, shortcut_blocks, others) other = (s, shortcut_blocks, other : others)
733 -- build a mapping from BlockId to JumpDest for shorting branches
734 mapping = foldl add emptyUFM shortcut_blocks
735 add ufm (id,dest) = addToUFM ufm id dest
737 apply_mapping :: NcgImpl statics instr jumpDest
739 -> GenCmmDecl statics h (ListGraph instr)
740 -> GenCmmDecl statics h (ListGraph instr)
741 apply_mapping ncgImpl ufm (CmmData sec statics)
742 = CmmData sec (shortcutStatics ncgImpl (lookupUFM ufm) statics)
743 apply_mapping ncgImpl ufm (CmmProc info lbl (ListGraph blocks))
744 = CmmProc info lbl (ListGraph $ map short_bb blocks)
746 short_bb (BasicBlock id insns) = BasicBlock id $! map short_insn insns
747 short_insn i = shortcutJump ncgImpl (lookupUFM ufm) i
748 -- shortcutJump should apply the mapping repeatedly,
749 -- just in case we can short multiple branches.
751 -- -----------------------------------------------------------------------------
752 -- Instruction selection
754 -- Native code instruction selection for a chunk of stix code. For
755 -- this part of the computation, we switch from the UniqSM monad to
756 -- the NatM monad. The latter carries not only a Unique, but also an
757 -- Int denoting the current C stack pointer offset in the generated
758 -- code; this is needed for creating correct spill offsets on
759 -- architectures which don't offer, or for which it would be
760 -- prohibitively expensive to employ, a frame pointer register. Viz,
763 -- The offset is measured in bytes, and indicates the difference
764 -- between the current (simulated) C stack-ptr and the value it was at
765 -- the beginning of the block. For stacks which grow down, this value
766 -- should be either zero or negative.
768 -- Switching between the two monads whilst carrying along the same
769 -- Unique supply breaks abstraction. Is that bad?
773 -> (RawCmmDecl -> NatM [NatCmmDecl statics instr])
776 ( [NatCmmDecl statics instr]
779 genMachCode dflags cmmTopCodeGen cmm_top
780 = do { initial_us <- getUs
781 ; let initial_st = mkNatM_State initial_us 0 dflags
782 (new_tops, final_st) = initNat initial_st (cmmTopCodeGen cmm_top)
783 final_delta = natm_delta final_st
784 final_imports = natm_imports final_st
785 ; if final_delta == 0
786 then return (new_tops, final_imports)
787 else pprPanic "genMachCode: nonzero final delta" (int final_delta)
790 -- -----------------------------------------------------------------------------
791 -- Generic Cmm optimiser
797 (b) Simple inlining: a temporary which is assigned to and then
798 used, once, can be shorted.
799 (c) Position independent code and dynamic linking
800 (i) introduce the appropriate indirections
801 and position independent refs
802 (ii) compile a list of imported symbols
803 (d) Some arch-specific optimizations
805 (a) and (b) will be moving to the new Hoopl pipeline, however, (c) and
806 (d) are only needed by the native backend and will continue to live
809 Ideas for other things we could do (put these in Hoopl please!):
811 - shortcut jumps-to-jumps
812 - simple CSE: if an expr is assigned to a temp, then replace later occs of
813 that expr with the temp, until the expr is no longer valid (can push through
814 temp assignments, and certain assigns to mem...)
817 cmmToCmm :: DynFlags -> RawCmmDecl -> (RawCmmDecl, [CLabel])
818 cmmToCmm _ top@(CmmData _ _) = (top, [])
819 cmmToCmm dflags (CmmProc info lbl (ListGraph blocks)) = runCmmOpt dflags $ do
820 blocks' <- mapM cmmBlockConFold (cmmMiniInline dflags (cmmEliminateDeadBlocks blocks))
821 return $ CmmProc info lbl (ListGraph blocks')
823 newtype CmmOptM a = CmmOptM (([CLabel], DynFlags) -> (# a, [CLabel] #))
825 instance Monad CmmOptM where
826 return x = CmmOptM $ \(imports, _) -> (# x,imports #)
828 CmmOptM $ \(imports, dflags) ->
829 case f (imports, dflags) of
832 CmmOptM g' -> g' (imports', dflags)
834 addImportCmmOpt :: CLabel -> CmmOptM ()
835 addImportCmmOpt lbl = CmmOptM $ \(imports, _dflags) -> (# (), lbl:imports #)
837 instance HasDynFlags CmmOptM where
838 getDynFlags = CmmOptM $ \(imports, dflags) -> (# dflags, imports #)
840 runCmmOpt :: DynFlags -> CmmOptM a -> (a, [CLabel])
841 runCmmOpt dflags (CmmOptM f) = case f ([], dflags) of
842 (# result, imports #) -> (result, imports)
844 cmmBlockConFold :: CmmBasicBlock -> CmmOptM CmmBasicBlock
845 cmmBlockConFold (BasicBlock id stmts) = do
846 stmts' <- mapM cmmStmtConFold stmts
847 return $ BasicBlock id stmts'
849 -- This does three optimizations, but they're very quick to check, so we don't
850 -- bother turning them off even when the Hoopl code is active. Since
851 -- this is on the old Cmm representation, we can't reuse the code either:
852 -- * reg = reg --> nop
853 -- * if 0 then jump --> nop
854 -- * if 1 then jump --> jump
855 -- We might be tempted to skip this step entirely of not opt_PIC, but
856 -- there is some PowerPC code for the non-PIC case, which would also
857 -- have to be separated.
858 cmmStmtConFold :: CmmStmt -> CmmOptM CmmStmt
862 -> do src' <- cmmExprConFold DataReference src
863 return $ case src' of
864 CmmReg reg' | reg == reg' -> CmmNop
865 new_src -> CmmAssign reg new_src
868 -> do addr' <- cmmExprConFold DataReference addr
869 src' <- cmmExprConFold DataReference src
870 return $ CmmStore addr' src'
873 -> do addr' <- cmmExprConFold JumpReference addr
874 return $ CmmJump addr' live
876 CmmCall target regs args returns
877 -> do target' <- case target of
878 CmmCallee e conv -> do
879 e' <- cmmExprConFold CallReference e
880 return $ CmmCallee e' conv
881 op@(CmmPrim _ Nothing) ->
883 CmmPrim op (Just stmts) ->
884 do stmts' <- mapM cmmStmtConFold stmts
885 return $ CmmPrim op (Just stmts')
886 args' <- mapM (\(CmmHinted arg hint) -> do
887 arg' <- cmmExprConFold DataReference arg
888 return (CmmHinted arg' hint)) args
889 return $ CmmCall target' regs args' returns
891 CmmCondBranch test dest
892 -> do test' <- cmmExprConFold DataReference test
893 dflags <- getDynFlags
894 let platform = targetPlatform dflags
895 return $ case test' of
896 CmmLit (CmmInt 0 _) ->
897 CmmComment (mkFastString ("deleted: " ++
898 showSDoc dflags (pprStmt platform stmt)))
900 CmmLit (CmmInt _ _) -> CmmBranch dest
901 _other -> CmmCondBranch test' dest
904 -> do expr' <- cmmExprConFold DataReference expr
905 return $ CmmSwitch expr' ids
910 cmmExprConFold :: ReferenceKind -> CmmExpr -> CmmOptM CmmExpr
911 cmmExprConFold referenceKind expr = do
912 dflags <- getDynFlags
913 -- Skip constant folding if new code generator is running
914 -- (this optimization is done in Hoopl)
915 let expr' = if dopt Opt_TryNewCodeGen dflags
917 else cmmExprCon (targetPlatform dflags) expr
918 cmmExprNative referenceKind expr'
920 cmmExprCon :: Platform -> CmmExpr -> CmmExpr
921 cmmExprCon platform (CmmLoad addr rep) = CmmLoad (cmmExprCon platform addr) rep
922 cmmExprCon platform (CmmMachOp mop args)
923 = cmmMachOpFold platform mop (map (cmmExprCon platform) args)
924 cmmExprCon _ other = other
926 -- handles both PIC and non-PIC cases... a very strange mixture
928 cmmExprNative :: ReferenceKind -> CmmExpr -> CmmOptM CmmExpr
929 cmmExprNative referenceKind expr = do
930 dflags <- getDynFlags
931 let platform = targetPlatform dflags
932 arch = platformArch platform
935 -> do addr' <- cmmExprNative DataReference addr
936 return $ CmmLoad addr' rep
939 -> do args' <- mapM (cmmExprNative DataReference) args
940 return $ CmmMachOp mop args'
942 CmmLit (CmmLabel lbl)
944 cmmMakeDynamicReference dflags addImportCmmOpt referenceKind lbl
945 CmmLit (CmmLabelOff lbl off)
947 dynRef <- cmmMakeDynamicReference dflags addImportCmmOpt referenceKind lbl
948 -- need to optimize here, since it's late
949 return $ cmmMachOpFold platform (MO_Add wordWidth) [
951 (CmmLit $ CmmInt (fromIntegral off) wordWidth)
954 -- On powerpc (non-PIC), it's easier to jump directly to a label than
955 -- to use the register table, so we replace these registers
956 -- with the corresponding labels:
957 CmmReg (CmmGlobal EagerBlackholeInfo)
958 | arch == ArchPPC && not opt_PIC
959 -> cmmExprNative referenceKind $
960 CmmLit (CmmLabel (mkCmmCodeLabel rtsPackageId (fsLit "__stg_EAGER_BLACKHOLE_info")))
961 CmmReg (CmmGlobal GCEnter1)
962 | arch == ArchPPC && not opt_PIC
963 -> cmmExprNative referenceKind $
964 CmmLit (CmmLabel (mkCmmCodeLabel rtsPackageId (fsLit "__stg_gc_enter_1")))
965 CmmReg (CmmGlobal GCFun)
966 | arch == ArchPPC && not opt_PIC
967 -> cmmExprNative referenceKind $
968 CmmLit (CmmLabel (mkCmmCodeLabel rtsPackageId (fsLit "__stg_gc_fun")))