1 -- -----------------------------------------------------------------------------
3 -- (c) The University of Glasgow 1993-2004
5 -- This is the top-level module in the native code generator.
7 -- -----------------------------------------------------------------------------
10 {-# OPTIONS -fno-warn-tabs #-}
11 -- The above warning supression flag is a temporary kludge.
12 -- While working on this module you are encouraged to remove it and
13 -- detab the module (please do the detabbing in a separate patch). See
14 -- http://hackage.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#TabsvsSpaces
17 module AsmCodeGen ( nativeCodeGen ) where
19 #include "HsVersions.h"
20 #include "nativeGen/NCG.h"
23 import qualified X86.CodeGen
24 import qualified X86.Regs
25 import qualified X86.Instr
26 import qualified X86.Ppr
28 import qualified SPARC.CodeGen
29 import qualified SPARC.Regs
30 import qualified SPARC.Instr
31 import qualified SPARC.Ppr
32 import qualified SPARC.ShortcutJump
33 import qualified SPARC.CodeGen.Expand
35 import qualified PPC.CodeGen
36 import qualified PPC.Cond
37 import qualified PPC.Regs
38 import qualified PPC.RegInfo
39 import qualified PPC.Instr
40 import qualified PPC.Ppr
42 import RegAlloc.Liveness
43 import qualified RegAlloc.Linear.Main as Linear
45 import qualified GraphColor as Color
46 import qualified RegAlloc.Graph.Main as Color
47 import qualified RegAlloc.Graph.Stats as Color
48 import qualified RegAlloc.Graph.TrivColorable as Color
59 import CgUtils ( fixStgRegisters )
61 import CmmOpt ( cmmEliminateDeadBlocks, cmmMiniInline, cmmMachOpFold )
66 import Unique ( Unique, getUnique )
72 import BasicTypes ( Alignment )
75 import qualified Pretty
92 The native-code generator has machine-independent and
93 machine-dependent modules.
95 This module ("AsmCodeGen") is the top-level machine-independent
96 module. Before entering machine-dependent land, we do some
97 machine-independent optimisations (defined below) on the
100 We convert to the machine-specific 'Instr' datatype with
101 'cmmCodeGen', assuming an infinite supply of registers. We then use
102 a machine-independent register allocator ('regAlloc') to rejoin
103 reality. Obviously, 'regAlloc' has machine-specific helper
104 functions (see about "RegAllocInfo" below).
106 Finally, we order the basic blocks of the function so as to minimise
107 the number of jumps between blocks, by utilising fallthrough wherever
110 The machine-dependent bits break down as follows:
112 * ["MachRegs"] Everything about the target platform's machine
113 registers (and immediate operands, and addresses, which tend to
114 intermingle/interact with registers).
116 * ["MachInstrs"] Includes the 'Instr' datatype (possibly should
117 have a module of its own), plus a miscellany of other things
118 (e.g., 'targetDoubleSize', 'smStablePtrTable', ...)
120 * ["MachCodeGen"] is where 'Cmm' stuff turns into
121 machine instructions.
123 * ["PprMach"] 'pprInstr' turns an 'Instr' into text (well, really
126 * ["RegAllocInfo"] In the register allocator, we manipulate
127 'MRegsState's, which are 'BitSet's, one bit per machine register.
128 When we want to say something about a specific machine register
129 (e.g., ``it gets clobbered by this instruction''), we set/unset
130 its bit. Obviously, we do this 'BitSet' thing for efficiency
133 The 'RegAllocInfo' module collects together the machine-specific
134 info needed to do register allocation.
136 * ["RegisterAlloc"] The (machine-independent) register allocator.
139 -- -----------------------------------------------------------------------------
140 -- Top-level of the native codegen
142 data NcgImpl statics instr jumpDest = NcgImpl {
143 cmmTopCodeGen :: RawCmmDecl -> NatM [NatCmmDecl statics instr],
144 generateJumpTableForInstr :: instr -> Maybe (NatCmmDecl statics instr),
145 getJumpDestBlockId :: jumpDest -> Maybe BlockId,
146 canShortcut :: instr -> Maybe jumpDest,
147 shortcutStatics :: (BlockId -> Maybe jumpDest) -> statics -> statics,
148 shortcutJump :: (BlockId -> Maybe jumpDest) -> instr -> instr,
149 pprNatCmmDecl :: Platform -> NatCmmDecl statics instr -> Doc,
150 maxSpillSlots :: Int,
151 allocatableRegs :: [RealReg],
152 ncg_x86fp_kludge :: [NatCmmDecl statics instr] -> [NatCmmDecl statics instr],
153 ncgExpandTop :: [NatCmmDecl statics instr] -> [NatCmmDecl statics instr],
154 ncgMakeFarBranches :: [NatBasicBlock instr] -> [NatBasicBlock instr]
158 nativeCodeGen :: DynFlags -> Handle -> UniqSupply -> [RawCmmGroup] -> IO ()
159 nativeCodeGen dflags h us cmms
160 = let platform = targetPlatform dflags
161 nCG' :: (PlatformOutputable statics, PlatformOutputable instr, Instruction instr) => NcgImpl statics instr jumpDest -> IO ()
162 nCG' ncgImpl = nativeCodeGen' dflags ncgImpl h us cmms
163 x86NcgImpl = NcgImpl {
164 cmmTopCodeGen = X86.CodeGen.cmmTopCodeGen
165 ,generateJumpTableForInstr = X86.CodeGen.generateJumpTableForInstr
166 ,getJumpDestBlockId = X86.Instr.getJumpDestBlockId
167 ,canShortcut = X86.Instr.canShortcut
168 ,shortcutStatics = X86.Instr.shortcutStatics
169 ,shortcutJump = X86.Instr.shortcutJump
170 ,pprNatCmmDecl = X86.Ppr.pprNatCmmDecl
171 ,maxSpillSlots = X86.Instr.maxSpillSlots (target32Bit platform)
172 ,allocatableRegs = X86.Regs.allocatableRegs
173 ,ncg_x86fp_kludge = id
175 ,ncgMakeFarBranches = id
177 in case platformArch platform of
178 ArchX86 -> nCG' (x86NcgImpl { ncg_x86fp_kludge = map x86fp_kludge })
179 ArchX86_64 -> nCG' x86NcgImpl
182 cmmTopCodeGen = PPC.CodeGen.cmmTopCodeGen
183 ,generateJumpTableForInstr = PPC.CodeGen.generateJumpTableForInstr
184 ,getJumpDestBlockId = PPC.RegInfo.getJumpDestBlockId
185 ,canShortcut = PPC.RegInfo.canShortcut
186 ,shortcutStatics = PPC.RegInfo.shortcutStatics
187 ,shortcutJump = PPC.RegInfo.shortcutJump
188 ,pprNatCmmDecl = PPC.Ppr.pprNatCmmDecl
189 ,maxSpillSlots = PPC.Instr.maxSpillSlots
190 ,allocatableRegs = PPC.Regs.allocatableRegs
191 ,ncg_x86fp_kludge = id
193 ,ncgMakeFarBranches = makeFarBranches
197 cmmTopCodeGen = SPARC.CodeGen.cmmTopCodeGen
198 ,generateJumpTableForInstr = SPARC.CodeGen.generateJumpTableForInstr
199 ,getJumpDestBlockId = SPARC.ShortcutJump.getJumpDestBlockId
200 ,canShortcut = SPARC.ShortcutJump.canShortcut
201 ,shortcutStatics = SPARC.ShortcutJump.shortcutStatics
202 ,shortcutJump = SPARC.ShortcutJump.shortcutJump
203 ,pprNatCmmDecl = SPARC.Ppr.pprNatCmmDecl
204 ,maxSpillSlots = SPARC.Instr.maxSpillSlots
205 ,allocatableRegs = SPARC.Regs.allocatableRegs
206 ,ncg_x86fp_kludge = id
207 ,ncgExpandTop = map SPARC.CodeGen.Expand.expandTop
208 ,ncgMakeFarBranches = id
211 panic "nativeCodeGen: No NCG for ARM"
213 panic "nativeCodeGen: No NCG for PPC 64"
215 panic "nativeCodeGen: No NCG for unknown arch"
217 nativeCodeGen' :: (PlatformOutputable statics, PlatformOutputable instr, Instruction instr)
219 -> NcgImpl statics instr jumpDest
220 -> Handle -> UniqSupply -> [RawCmmGroup] -> IO ()
221 nativeCodeGen' dflags ncgImpl h us cmms
223 let platform = targetPlatform dflags
224 split_cmms = concat $ map add_split cmms
225 -- BufHandle is a performance hack. We could hide it inside
226 -- Pretty if it weren't for the fact that we do lots of little
227 -- printDocs here (in order to do codegen in constant space).
228 bufh <- newBufHandle h
229 (imports, prof) <- cmmNativeGens dflags ncgImpl bufh us split_cmms [] [] 0
232 let (native, colorStats, linearStats)
237 Opt_D_dump_asm "Asm code"
238 (vcat $ map (docToSDoc . pprNatCmmDecl ncgImpl platform) $ concat native)
240 -- dump global NCG stats for graph coloring allocator
241 (case concat $ catMaybes colorStats of
244 -- build the global register conflict graph
246 = foldl Color.union Color.initGraph
247 $ [ Color.raGraph stat
248 | stat@Color.RegAllocStatsStart{} <- stats]
250 dumpSDoc dflags Opt_D_dump_asm_stats "NCG stats"
251 $ Color.pprStats stats graphGlobal
254 Opt_D_dump_asm_conflicts "Register conflict graph"
256 (targetRegDotColor platform)
257 (Color.trivColorable platform
258 (targetVirtualRegSqueeze platform)
259 (targetRealRegSqueeze platform))
263 -- dump global NCG stats for linear allocator
264 (case concat $ catMaybes linearStats of
266 stats -> dumpSDoc dflags Opt_D_dump_asm_stats "NCG stats"
267 $ Linear.pprStats (concat native) stats)
269 -- write out the imports
270 Pretty.printDoc Pretty.LeftMode h
271 $ makeImportsDoc dflags (concat imports)
276 | dopt Opt_SplitObjs dflags = split_marker : tops
279 split_marker = CmmProc Nothing mkSplitMarkerLabel (ListGraph [])
282 -- | Do native code generation on all these cmms.
284 cmmNativeGens :: (PlatformOutputable statics, PlatformOutputable instr, Instruction instr)
286 -> NcgImpl statics instr jumpDest
291 -> [ ([NatCmmDecl statics instr],
292 Maybe [Color.RegAllocStats statics instr],
293 Maybe [Linear.RegAllocStats]) ]
296 [([NatCmmDecl statics instr],
297 Maybe [Color.RegAllocStats statics instr],
298 Maybe [Linear.RegAllocStats])] )
300 cmmNativeGens _ _ _ _ [] impAcc profAcc _
301 = return (reverse impAcc, reverse profAcc)
303 cmmNativeGens dflags ncgImpl h us (cmm : cmms) impAcc profAcc count
305 let platform = targetPlatform dflags
307 (us', native, imports, colorStats, linearStats)
308 <- {-# SCC "cmmNativeGen" #-} cmmNativeGen dflags ncgImpl us cmm count
310 {-# SCC "pprNativeCode" #-} Pretty.bufLeftRender h
311 $ Pretty.vcat $ map (pprNatCmmDecl ncgImpl platform) native
313 -- carefully evaluate this strictly. Binding it with 'let'
314 -- and then using 'seq' doesn't work, because the let
315 -- apparently gets inlined first.
316 lsPprNative <- return $!
317 if dopt Opt_D_dump_asm dflags
318 || dopt Opt_D_dump_asm_stats dflags
322 count' <- return $! count + 1;
324 -- force evaulation all this stuff to avoid space leaks
325 {-# SCC "seqString" #-} seqString (showSDoc $ vcat $ map (pprPlatform platform) imports) `seq` return ()
327 cmmNativeGens dflags ncgImpl
330 ((lsPprNative, colorStats, linearStats) : profAcc)
333 where seqString [] = ()
334 seqString (x:xs) = x `seq` seqString xs `seq` ()
337 -- | Complete native code generation phase for a single top-level chunk of Cmm.
338 -- Dumping the output of each stage along the way.
339 -- Global conflict graph and NGC stats
341 :: (PlatformOutputable statics, PlatformOutputable instr, Instruction instr)
343 -> NcgImpl statics instr jumpDest
345 -> RawCmmDecl -- ^ the cmm to generate code for
346 -> Int -- ^ sequence number of this top thing
348 , [NatCmmDecl statics instr] -- native code
349 , [CLabel] -- things imported by this cmm
350 , Maybe [Color.RegAllocStats statics instr] -- stats for the coloring register allocator
351 , Maybe [Linear.RegAllocStats]) -- stats for the linear register allocators
353 cmmNativeGen dflags ncgImpl us cmm count
355 let platform = targetPlatform dflags
357 -- rewrite assignments to global regs
359 {-# SCC "fixStgRegisters" #-}
362 -- cmm to cmm optimisations
363 let (opt_cmm, imports) =
364 {-# SCC "cmmToCmm" #-}
365 cmmToCmm dflags fixed_cmm
368 Opt_D_dump_opt_cmm "Optimised Cmm"
369 (pprCmmGroup platform [opt_cmm])
371 -- generate native code from cmm
372 let ((native, lastMinuteImports), usGen) =
373 {-# SCC "genMachCode" #-}
374 initUs us $ genMachCode dflags (cmmTopCodeGen ncgImpl) opt_cmm
377 Opt_D_dump_asm_native "Native code"
378 (vcat $ map (docToSDoc . pprNatCmmDecl ncgImpl platform) native)
380 -- tag instructions with register liveness information
381 let (withLiveness, usLive) =
382 {-# SCC "regLiveness" #-}
384 $ mapUs (regLiveness platform)
385 $ map natCmmTopToLive native
388 Opt_D_dump_asm_liveness "Liveness annotations added"
389 (vcat $ map (pprPlatform platform) withLiveness)
391 -- allocate registers
392 (alloced, usAlloc, ppr_raStatsColor, ppr_raStatsLinear) <-
393 if ( dopt Opt_RegsGraph dflags
394 || dopt Opt_RegsIterative dflags)
396 -- the regs usable for allocation
397 let (alloc_regs :: UniqFM (UniqSet RealReg))
398 = foldr (\r -> plusUFM_C unionUniqSets
399 $ unitUFM (targetClassOfRealReg platform r) (unitUniqSet r))
401 $ allocatableRegs ncgImpl
403 -- do the graph coloring register allocation
404 let ((alloced, regAllocStats), usAlloc)
405 = {-# SCC "RegAlloc" #-}
410 (mkUniqSet [0 .. maxSpillSlots ncgImpl])
413 -- dump out what happened during register allocation
415 Opt_D_dump_asm_regalloc "Registers allocated"
416 (vcat $ map (docToSDoc . pprNatCmmDecl ncgImpl platform) alloced)
419 Opt_D_dump_asm_regalloc_stages "Build/spill stages"
420 (vcat $ map (\(stage, stats)
421 -> text "# --------------------------"
422 $$ text "# cmm " <> int count <> text " Stage " <> int stage
423 $$ pprPlatform platform stats)
424 $ zip [0..] regAllocStats)
427 if dopt Opt_D_dump_asm_stats dflags
428 then Just regAllocStats else Nothing
430 -- force evaluation of the Maybe to avoid space leak
431 mPprStats `seq` return ()
433 return ( alloced, usAlloc
438 -- do linear register allocation
439 let ((alloced, regAllocStats), usAlloc)
440 = {-# SCC "RegAlloc" #-}
443 $ mapUs (Linear.regAlloc dflags) withLiveness
446 Opt_D_dump_asm_regalloc "Registers allocated"
447 (vcat $ map (docToSDoc . pprNatCmmDecl ncgImpl platform) alloced)
450 if dopt Opt_D_dump_asm_stats dflags
451 then Just (catMaybes regAllocStats) else Nothing
453 -- force evaluation of the Maybe to avoid space leak
454 mPprStats `seq` return ()
456 return ( alloced, usAlloc
460 ---- x86fp_kludge. This pass inserts ffree instructions to clear
461 ---- the FPU stack on x86. The x86 ABI requires that the FPU stack
462 ---- is clear, and library functions can return odd results if it
465 ---- NB. must happen before shortcutBranches, because that
466 ---- generates JXX_GBLs which we can't fix up in x86fp_kludge.
467 let kludged = {-# SCC "x86fp_kludge" #-} ncg_x86fp_kludge ncgImpl alloced
469 ---- generate jump tables
471 {-# SCC "generateJumpTables" #-}
472 generateJumpTables ncgImpl kludged
474 ---- shortcut branches
476 {-# SCC "shortcutBranches" #-}
477 shortcutBranches dflags ncgImpl tabled
481 {-# SCC "sequenceBlocks" #-}
482 map (sequenceTop ncgImpl) shorted
484 ---- expansion of SPARC synthetic instrs
486 {-# SCC "sparc_expand" #-}
487 ncgExpandTop ncgImpl sequenced
490 Opt_D_dump_asm_expanded "Synthetic instructions expanded"
491 (vcat $ map (docToSDoc . pprNatCmmDecl ncgImpl platform) expanded)
495 , lastMinuteImports ++ imports
500 x86fp_kludge :: NatCmmDecl (Alignment, CmmStatics) X86.Instr.Instr -> NatCmmDecl (Alignment, CmmStatics) X86.Instr.Instr
501 x86fp_kludge top@(CmmData _ _) = top
502 x86fp_kludge (CmmProc info lbl (ListGraph code)) =
503 CmmProc info lbl (ListGraph $ X86.Instr.i386_insert_ffrees code)
506 -- | Build a doc for all the imports.
508 makeImportsDoc :: DynFlags -> [CLabel] -> Pretty.Doc
509 makeImportsDoc dflags imports
512 -- On recent versions of Darwin, the linker supports
513 -- dead-stripping of code and data on a per-symbol basis.
514 -- There's a hack to make this work in PprMach.pprNatCmmDecl.
515 (if platformHasSubsectionsViaSymbols (targetPlatform dflags)
516 then Pretty.text ".subsections_via_symbols"
519 -- On recent GNU ELF systems one can mark an object file
520 -- as not requiring an executable stack. If all objects
521 -- linked into a program have this note then the program
522 -- will not use an executable stack, which is good for
523 -- security. GHC generated code does not need an executable
524 -- stack so add the note in:
525 (if platformHasGnuNonexecStack (targetPlatform dflags)
526 then Pretty.text ".section .note.GNU-stack,\"\",@progbits"
529 -- And just because every other compiler does, lets stick in
530 -- an identifier directive: .ident "GHC x.y.z"
531 (if platformHasIdentDirective (targetPlatform dflags)
532 then let compilerIdent = Pretty.text "GHC" Pretty.<+>
533 Pretty.text cProjectVersion
534 in Pretty.text ".ident" Pretty.<+>
535 Pretty.doubleQuotes compilerIdent
539 -- Generate "symbol stubs" for all external symbols that might
540 -- come from a dynamic library.
541 dyld_stubs :: [CLabel] -> Pretty.Doc
542 {- dyld_stubs imps = Pretty.vcat $ map pprDyldSymbolStub $
543 map head $ group $ sort imps-}
545 platform = targetPlatform dflags
546 arch = platformArch platform
547 os = platformOS platform
549 -- (Hack) sometimes two Labels pretty-print the same, but have
550 -- different uniques; so we compare their text versions...
552 | needImportedSymbols arch os
554 (pprGotDeclaration arch os :) $
555 map ( pprImportedSymbol platform . fst . head) $
556 groupBy (\(_,a) (_,b) -> a == b) $
557 sortBy (\(_,a) (_,b) -> compare a b) $
563 doPpr lbl = (lbl, renderWithStyle (pprCLabel platform lbl) astyle)
564 astyle = mkCodeStyle AsmStyle
567 -- -----------------------------------------------------------------------------
568 -- Sequencing the basic blocks
570 -- Cmm BasicBlocks are self-contained entities: they always end in a
571 -- jump, either non-local or to another basic block in the same proc.
572 -- In this phase, we attempt to place the basic blocks in a sequence
573 -- such that as many of the local jumps as possible turn into
578 => NcgImpl statics instr jumpDest -> NatCmmDecl statics instr -> NatCmmDecl statics instr
580 sequenceTop _ top@(CmmData _ _) = top
581 sequenceTop ncgImpl (CmmProc info lbl (ListGraph blocks)) =
582 CmmProc info lbl (ListGraph $ ncgMakeFarBranches ncgImpl $ sequenceBlocks blocks)
584 -- The algorithm is very simple (and stupid): we make a graph out of
585 -- the blocks where there is an edge from one block to another iff the
586 -- first block ends by jumping to the second. Then we topologically
587 -- sort this graph. Then traverse the list: for each block, we first
588 -- output the block, then if it has an out edge, we move the
589 -- destination of the out edge to the front of the list, and continue.
591 -- FYI, the classic layout for basic blocks uses postorder DFS; this
592 -- algorithm is implemented in Hoopl.
596 => [NatBasicBlock instr]
597 -> [NatBasicBlock instr]
599 sequenceBlocks [] = []
600 sequenceBlocks (entry:blocks) =
601 seqBlocks (mkNode entry : reverse (flattenSCCs (sccBlocks blocks)))
602 -- the first block is the entry point ==> it must remain at the start.
607 => [NatBasicBlock instr]
608 -> [SCC ( NatBasicBlock instr
612 sccBlocks blocks = stronglyConnCompFromEdgedVerticesR (map mkNode blocks)
614 -- we're only interested in the last instruction of
615 -- the block, and only if it has a single destination.
618 => [instr] -> [Unique]
621 = case jumpDestsOfInstr (last instrs) of
622 [one] -> [getUnique one]
625 mkNode :: (Instruction t)
627 -> (GenBasicBlock t, Unique, [Unique])
628 mkNode block@(BasicBlock id instrs) = (block, getUnique id, getOutEdges instrs)
630 seqBlocks :: (Eq t) => [(GenBasicBlock t1, t, [t])] -> [GenBasicBlock t1]
632 seqBlocks ((block,_,[]) : rest)
633 = block : seqBlocks rest
634 seqBlocks ((block@(BasicBlock id instrs),_,[next]) : rest)
635 | can_fallthrough = BasicBlock id (init instrs) : seqBlocks rest'
636 | otherwise = block : seqBlocks rest'
638 (can_fallthrough, rest') = reorder next [] rest
639 -- TODO: we should do a better job for cycles; try to maximise the
640 -- fallthroughs within a loop.
641 seqBlocks _ = panic "AsmCodegen:seqBlocks"
643 reorder :: (Eq a) => a -> [(t, a, t1)] -> [(t, a, t1)] -> (Bool, [(t, a, t1)])
644 reorder _ accum [] = (False, reverse accum)
645 reorder id accum (b@(block,id',out) : rest)
646 | id == id' = (True, (block,id,out) : reverse accum ++ rest)
647 | otherwise = reorder id (b:accum) rest
650 -- -----------------------------------------------------------------------------
651 -- Making far branches
653 -- Conditional branches on PowerPC are limited to +-32KB; if our Procs get too
654 -- big, we have to work around this limitation.
657 :: [NatBasicBlock PPC.Instr.Instr]
658 -> [NatBasicBlock PPC.Instr.Instr]
659 makeFarBranches blocks
660 | last blockAddresses < nearLimit = blocks
661 | otherwise = zipWith handleBlock blockAddresses blocks
663 blockAddresses = scanl (+) 0 $ map blockLen blocks
664 blockLen (BasicBlock _ instrs) = length instrs
666 handleBlock addr (BasicBlock id instrs)
667 = BasicBlock id (zipWith makeFar [addr..] instrs)
669 makeFar _ (PPC.Instr.BCC PPC.Cond.ALWAYS tgt) = PPC.Instr.BCC PPC.Cond.ALWAYS tgt
670 makeFar addr (PPC.Instr.BCC cond tgt)
671 | abs (addr - targetAddr) >= nearLimit
672 = PPC.Instr.BCCFAR cond tgt
674 = PPC.Instr.BCC cond tgt
675 where Just targetAddr = lookupUFM blockAddressMap tgt
676 makeFar _ other = other
678 nearLimit = 7000 -- 8192 instructions are allowed; let's keep some
679 -- distance, as we have a few pseudo-insns that are
680 -- pretty-printed as multiple instructions,
681 -- and it's just not worth the effort to calculate
684 blockAddressMap = listToUFM $ zip (map blockId blocks) blockAddresses
686 -- -----------------------------------------------------------------------------
687 -- Generate jump tables
689 -- Analyzes all native code and generates data sections for all jump
690 -- table instructions.
692 :: NcgImpl statics instr jumpDest
693 -> [NatCmmDecl statics instr] -> [NatCmmDecl statics instr]
694 generateJumpTables ncgImpl xs = concatMap f xs
695 where f p@(CmmProc _ _ (ListGraph xs)) = p : concatMap g xs
697 g (BasicBlock _ xs) = catMaybes (map (generateJumpTableForInstr ncgImpl) xs)
699 -- -----------------------------------------------------------------------------
704 -> NcgImpl statics instr jumpDest
705 -> [NatCmmDecl statics instr]
706 -> [NatCmmDecl statics instr]
708 shortcutBranches dflags ncgImpl tops
709 | optLevel dflags < 1 = tops -- only with -O or higher
710 | otherwise = map (apply_mapping ncgImpl mapping) tops'
712 (tops', mappings) = mapAndUnzip (build_mapping ncgImpl) tops
713 mapping = foldr plusUFM emptyUFM mappings
715 build_mapping :: NcgImpl statics instr jumpDest
716 -> GenCmmDecl d t (ListGraph instr)
717 -> (GenCmmDecl d t (ListGraph instr), UniqFM jumpDest)
718 build_mapping _ top@(CmmData _ _) = (top, emptyUFM)
719 build_mapping _ (CmmProc info lbl (ListGraph []))
720 = (CmmProc info lbl (ListGraph []), emptyUFM)
721 build_mapping ncgImpl (CmmProc info lbl (ListGraph (head:blocks)))
722 = (CmmProc info lbl (ListGraph (head:others)), mapping)
723 -- drop the shorted blocks, but don't ever drop the first one,
724 -- because it is pointed to by a global label.
726 -- find all the blocks that just consist of a jump that can be
728 -- Don't completely eliminate loops here -- that can leave a dangling jump!
729 (_, shortcut_blocks, others) = foldl split (emptyBlockSet, [], []) blocks
730 split (s, shortcut_blocks, others) b@(BasicBlock id [insn])
731 | Just jd <- canShortcut ncgImpl insn,
732 Just dest <- getJumpDestBlockId ncgImpl jd,
733 (setMember dest s) || dest == id -- loop checks
734 = (s, shortcut_blocks, b : others)
735 split (s, shortcut_blocks, others) (BasicBlock id [insn])
736 | Just dest <- canShortcut ncgImpl insn
737 = (setInsert id s, (id,dest) : shortcut_blocks, others)
738 split (s, shortcut_blocks, others) other = (s, shortcut_blocks, other : others)
741 -- build a mapping from BlockId to JumpDest for shorting branches
742 mapping = foldl add emptyUFM shortcut_blocks
743 add ufm (id,dest) = addToUFM ufm id dest
745 apply_mapping :: NcgImpl statics instr jumpDest
747 -> GenCmmDecl statics h (ListGraph instr)
748 -> GenCmmDecl statics h (ListGraph instr)
749 apply_mapping ncgImpl ufm (CmmData sec statics)
750 = CmmData sec (shortcutStatics ncgImpl (lookupUFM ufm) statics)
751 apply_mapping ncgImpl ufm (CmmProc info lbl (ListGraph blocks))
752 = CmmProc info lbl (ListGraph $ map short_bb blocks)
754 short_bb (BasicBlock id insns) = BasicBlock id $! map short_insn insns
755 short_insn i = shortcutJump ncgImpl (lookupUFM ufm) i
756 -- shortcutJump should apply the mapping repeatedly,
757 -- just in case we can short multiple branches.
759 -- -----------------------------------------------------------------------------
760 -- Instruction selection
762 -- Native code instruction selection for a chunk of stix code. For
763 -- this part of the computation, we switch from the UniqSM monad to
764 -- the NatM monad. The latter carries not only a Unique, but also an
765 -- Int denoting the current C stack pointer offset in the generated
766 -- code; this is needed for creating correct spill offsets on
767 -- architectures which don't offer, or for which it would be
768 -- prohibitively expensive to employ, a frame pointer register. Viz,
771 -- The offset is measured in bytes, and indicates the difference
772 -- between the current (simulated) C stack-ptr and the value it was at
773 -- the beginning of the block. For stacks which grow down, this value
774 -- should be either zero or negative.
776 -- Switching between the two monads whilst carrying along the same
777 -- Unique supply breaks abstraction. Is that bad?
781 -> (RawCmmDecl -> NatM [NatCmmDecl statics instr])
784 ( [NatCmmDecl statics instr]
787 genMachCode dflags cmmTopCodeGen cmm_top
788 = do { initial_us <- getUs
789 ; let initial_st = mkNatM_State initial_us 0 dflags
790 (new_tops, final_st) = initNat initial_st (cmmTopCodeGen cmm_top)
791 final_delta = natm_delta final_st
792 final_imports = natm_imports final_st
793 ; if final_delta == 0
794 then return (new_tops, final_imports)
795 else pprPanic "genMachCode: nonzero final delta" (int final_delta)
798 -- -----------------------------------------------------------------------------
799 -- Generic Cmm optimiser
805 (b) Simple inlining: a temporary which is assigned to and then
806 used, once, can be shorted.
807 (c) Position independent code and dynamic linking
808 (i) introduce the appropriate indirections
809 and position independent refs
810 (ii) compile a list of imported symbols
811 (d) Some arch-specific optimizations
813 (a) and (b) will be moving to the new Hoopl pipeline, however, (c) and
814 (d) are only needed by the native backend and will continue to live
817 Ideas for other things we could do (put these in Hoopl please!):
819 - shortcut jumps-to-jumps
820 - simple CSE: if an expr is assigned to a temp, then replace later occs of
821 that expr with the temp, until the expr is no longer valid (can push through
822 temp assignments, and certain assigns to mem...)
825 cmmToCmm :: DynFlags -> RawCmmDecl -> (RawCmmDecl, [CLabel])
826 cmmToCmm _ top@(CmmData _ _) = (top, [])
827 cmmToCmm dflags (CmmProc info lbl (ListGraph blocks)) = runCmmOpt dflags $ do
828 let platform = targetPlatform dflags
829 blocks' <- mapM cmmBlockConFold (cmmMiniInline platform (cmmEliminateDeadBlocks blocks))
830 return $ CmmProc info lbl (ListGraph blocks')
832 newtype CmmOptM a = CmmOptM (([CLabel], DynFlags) -> (# a, [CLabel] #))
834 instance Monad CmmOptM where
835 return x = CmmOptM $ \(imports, _) -> (# x,imports #)
837 CmmOptM $ \(imports, dflags) ->
838 case f (imports, dflags) of
841 CmmOptM g' -> g' (imports', dflags)
843 addImportCmmOpt :: CLabel -> CmmOptM ()
844 addImportCmmOpt lbl = CmmOptM $ \(imports, _dflags) -> (# (), lbl:imports #)
846 instance HasDynFlags CmmOptM where
847 getDynFlags = CmmOptM $ \(imports, dflags) -> (# dflags, imports #)
849 runCmmOpt :: DynFlags -> CmmOptM a -> (a, [CLabel])
850 runCmmOpt dflags (CmmOptM f) = case f ([], dflags) of
851 (# result, imports #) -> (result, imports)
853 cmmBlockConFold :: CmmBasicBlock -> CmmOptM CmmBasicBlock
854 cmmBlockConFold (BasicBlock id stmts) = do
855 stmts' <- mapM cmmStmtConFold stmts
856 return $ BasicBlock id stmts'
858 -- This does three optimizations, but they're very quick to check, so we don't
859 -- bother turning them off even when the Hoopl code is active. Since
860 -- this is on the old Cmm representation, we can't reuse the code either:
861 -- * reg = reg --> nop
862 -- * if 0 then jump --> nop
863 -- * if 1 then jump --> jump
864 -- We might be tempted to skip this step entirely of not opt_PIC, but
865 -- there is some PowerPC code for the non-PIC case, which would also
866 -- have to be separated.
867 cmmStmtConFold :: CmmStmt -> CmmOptM CmmStmt
871 -> do src' <- cmmExprConFold DataReference src
872 return $ case src' of
873 CmmReg reg' | reg == reg' -> CmmNop
874 new_src -> CmmAssign reg new_src
877 -> do addr' <- cmmExprConFold DataReference addr
878 src' <- cmmExprConFold DataReference src
879 return $ CmmStore addr' src'
882 -> do addr' <- cmmExprConFold JumpReference addr
883 return $ CmmJump addr'
885 CmmCall target regs args returns
886 -> do target' <- case target of
887 CmmCallee e conv -> do
888 e' <- cmmExprConFold CallReference e
889 return $ CmmCallee e' conv
890 other -> return other
891 args' <- mapM (\(CmmHinted arg hint) -> do
892 arg' <- cmmExprConFold DataReference arg
893 return (CmmHinted arg' hint)) args
894 return $ CmmCall target' regs args' returns
896 CmmCondBranch test dest
897 -> do test' <- cmmExprConFold DataReference test
898 dflags <- getDynFlags
899 let platform = targetPlatform dflags
900 return $ case test' of
901 CmmLit (CmmInt 0 _) ->
902 CmmComment (mkFastString ("deleted: " ++
903 showSDoc (pprStmt platform stmt)))
905 CmmLit (CmmInt _ _) -> CmmBranch dest
906 _other -> CmmCondBranch test' dest
909 -> do expr' <- cmmExprConFold DataReference expr
910 return $ CmmSwitch expr' ids
915 cmmExprConFold :: ReferenceKind -> CmmExpr -> CmmOptM CmmExpr
916 cmmExprConFold referenceKind expr = do
917 dflags <- getDynFlags
918 -- Skip constant folding if new code generator is running
919 -- (this optimization is done in Hoopl)
920 let expr' = if dopt Opt_TryNewCodeGen dflags
922 else cmmExprCon (targetPlatform dflags) expr
923 cmmExprNative referenceKind expr'
925 cmmExprCon :: Platform -> CmmExpr -> CmmExpr
926 cmmExprCon platform (CmmLoad addr rep) = CmmLoad (cmmExprCon platform addr) rep
927 cmmExprCon platform (CmmMachOp mop args)
928 = cmmMachOpFold platform mop (map (cmmExprCon platform) args)
929 cmmExprCon _ other = other
931 -- handles both PIC and non-PIC cases... a very strange mixture
933 cmmExprNative :: ReferenceKind -> CmmExpr -> CmmOptM CmmExpr
934 cmmExprNative referenceKind expr = do
935 dflags <- getDynFlags
936 let platform = targetPlatform dflags
937 arch = platformArch platform
940 -> do addr' <- cmmExprNative DataReference addr
941 return $ CmmLoad addr' rep
944 -> do args' <- mapM (cmmExprNative DataReference) args
945 return $ CmmMachOp mop args'
947 CmmLit (CmmLabel lbl)
949 cmmMakeDynamicReference dflags addImportCmmOpt referenceKind lbl
950 CmmLit (CmmLabelOff lbl off)
952 dynRef <- cmmMakeDynamicReference dflags addImportCmmOpt referenceKind lbl
953 -- need to optimize here, since it's late
954 return $ cmmMachOpFold platform (MO_Add wordWidth) [
956 (CmmLit $ CmmInt (fromIntegral off) wordWidth)
959 -- On powerpc (non-PIC), it's easier to jump directly to a label than
960 -- to use the register table, so we replace these registers
961 -- with the corresponding labels:
962 CmmReg (CmmGlobal EagerBlackholeInfo)
963 | arch == ArchPPC && not opt_PIC
964 -> cmmExprNative referenceKind $
965 CmmLit (CmmLabel (mkCmmCodeLabel rtsPackageId (fsLit "__stg_EAGER_BLACKHOLE_info")))
966 CmmReg (CmmGlobal GCEnter1)
967 | arch == ArchPPC && not opt_PIC
968 -> cmmExprNative referenceKind $
969 CmmLit (CmmLabel (mkCmmCodeLabel rtsPackageId (fsLit "__stg_gc_enter_1")))
970 CmmReg (CmmGlobal GCFun)
971 | arch == ArchPPC && not opt_PIC
972 -> cmmExprNative referenceKind $
973 CmmLit (CmmLabel (mkCmmCodeLabel rtsPackageId (fsLit "__stg_gc_fun")))