Produce new-style Cmm from the Cmm parser
[ghc.git] / compiler / cmm / CmmOpt.hs
1 -----------------------------------------------------------------------------
2 --
3 -- Cmm optimisation
4 --
5 -- (c) The University of Glasgow 2006
6 --
7 -----------------------------------------------------------------------------
8
9 module CmmOpt (
10 cmmMachOpFold,
11 cmmMachOpFoldM,
12 cmmLoopifyForC,
13 ) where
14
15 #include "HsVersions.h"
16
17 import CmmUtils
18 import OldCmm
19 import DynFlags
20 import CLabel
21
22 import FastTypes
23 import Outputable
24 import Platform
25
26 import Data.Bits
27 import Data.Maybe
28
29 -- -----------------------------------------------------------------------------
30 -- MachOp constant folder
31
32 -- Now, try to constant-fold the MachOps. The arguments have already
33 -- been optimized and folded.
34
35 cmmMachOpFold
36 :: DynFlags
37 -> MachOp -- The operation from an CmmMachOp
38 -> [CmmExpr] -- The optimized arguments
39 -> CmmExpr
40
41 cmmMachOpFold dflags op args = fromMaybe (CmmMachOp op args) (cmmMachOpFoldM dflags op args)
42
43 -- Returns Nothing if no changes, useful for Hoopl, also reduces
44 -- allocation!
45 cmmMachOpFoldM
46 :: DynFlags
47 -> MachOp
48 -> [CmmExpr]
49 -> Maybe CmmExpr
50
51 cmmMachOpFoldM _ op [CmmLit (CmmInt x rep)]
52 = Just $ case op of
53 MO_S_Neg _ -> CmmLit (CmmInt (-x) rep)
54 MO_Not _ -> CmmLit (CmmInt (complement x) rep)
55
56 -- these are interesting: we must first narrow to the
57 -- "from" type, in order to truncate to the correct size.
58 -- The final narrow/widen to the destination type
59 -- is implicit in the CmmLit.
60 MO_SF_Conv _from to -> CmmLit (CmmFloat (fromInteger x) to)
61 MO_SS_Conv from to -> CmmLit (CmmInt (narrowS from x) to)
62 MO_UU_Conv from to -> CmmLit (CmmInt (narrowU from x) to)
63
64 _ -> panic "cmmMachOpFoldM: unknown unary op"
65
66
67 -- Eliminate conversion NOPs
68 cmmMachOpFoldM _ (MO_SS_Conv rep1 rep2) [x] | rep1 == rep2 = Just x
69 cmmMachOpFoldM _ (MO_UU_Conv rep1 rep2) [x] | rep1 == rep2 = Just x
70
71 -- Eliminate nested conversions where possible
72 cmmMachOpFoldM dflags conv_outer [CmmMachOp conv_inner [x]]
73 | Just (rep1,rep2,signed1) <- isIntConversion conv_inner,
74 Just (_, rep3,signed2) <- isIntConversion conv_outer
75 = case () of
76 -- widen then narrow to the same size is a nop
77 _ | rep1 < rep2 && rep1 == rep3 -> Just x
78 -- Widen then narrow to different size: collapse to single conversion
79 -- but remember to use the signedness from the widening, just in case
80 -- the final conversion is a widen.
81 | rep1 < rep2 && rep2 > rep3 ->
82 Just $ cmmMachOpFold dflags (intconv signed1 rep1 rep3) [x]
83 -- Nested widenings: collapse if the signedness is the same
84 | rep1 < rep2 && rep2 < rep3 && signed1 == signed2 ->
85 Just $ cmmMachOpFold dflags (intconv signed1 rep1 rep3) [x]
86 -- Nested narrowings: collapse
87 | rep1 > rep2 && rep2 > rep3 ->
88 Just $ cmmMachOpFold dflags (MO_UU_Conv rep1 rep3) [x]
89 | otherwise ->
90 Nothing
91 where
92 isIntConversion (MO_UU_Conv rep1 rep2)
93 = Just (rep1,rep2,False)
94 isIntConversion (MO_SS_Conv rep1 rep2)
95 = Just (rep1,rep2,True)
96 isIntConversion _ = Nothing
97
98 intconv True = MO_SS_Conv
99 intconv False = MO_UU_Conv
100
101 -- ToDo: a narrow of a load can be collapsed into a narrow load, right?
102 -- but what if the architecture only supports word-sized loads, should
103 -- we do the transformation anyway?
104
105 cmmMachOpFoldM dflags mop [CmmLit (CmmInt x xrep), CmmLit (CmmInt y _)]
106 = case mop of
107 -- for comparisons: don't forget to narrow the arguments before
108 -- comparing, since they might be out of range.
109 MO_Eq _ -> Just $ CmmLit (CmmInt (if x_u == y_u then 1 else 0) (wordWidth dflags))
110 MO_Ne _ -> Just $ CmmLit (CmmInt (if x_u /= y_u then 1 else 0) (wordWidth dflags))
111
112 MO_U_Gt _ -> Just $ CmmLit (CmmInt (if x_u > y_u then 1 else 0) (wordWidth dflags))
113 MO_U_Ge _ -> Just $ CmmLit (CmmInt (if x_u >= y_u then 1 else 0) (wordWidth dflags))
114 MO_U_Lt _ -> Just $ CmmLit (CmmInt (if x_u < y_u then 1 else 0) (wordWidth dflags))
115 MO_U_Le _ -> Just $ CmmLit (CmmInt (if x_u <= y_u then 1 else 0) (wordWidth dflags))
116
117 MO_S_Gt _ -> Just $ CmmLit (CmmInt (if x_s > y_s then 1 else 0) (wordWidth dflags))
118 MO_S_Ge _ -> Just $ CmmLit (CmmInt (if x_s >= y_s then 1 else 0) (wordWidth dflags))
119 MO_S_Lt _ -> Just $ CmmLit (CmmInt (if x_s < y_s then 1 else 0) (wordWidth dflags))
120 MO_S_Le _ -> Just $ CmmLit (CmmInt (if x_s <= y_s then 1 else 0) (wordWidth dflags))
121
122 MO_Add r -> Just $ CmmLit (CmmInt (x + y) r)
123 MO_Sub r -> Just $ CmmLit (CmmInt (x - y) r)
124 MO_Mul r -> Just $ CmmLit (CmmInt (x * y) r)
125 MO_U_Quot r | y /= 0 -> Just $ CmmLit (CmmInt (x_u `quot` y_u) r)
126 MO_U_Rem r | y /= 0 -> Just $ CmmLit (CmmInt (x_u `rem` y_u) r)
127 MO_S_Quot r | y /= 0 -> Just $ CmmLit (CmmInt (x `quot` y) r)
128 MO_S_Rem r | y /= 0 -> Just $ CmmLit (CmmInt (x `rem` y) r)
129
130 MO_And r -> Just $ CmmLit (CmmInt (x .&. y) r)
131 MO_Or r -> Just $ CmmLit (CmmInt (x .|. y) r)
132 MO_Xor r -> Just $ CmmLit (CmmInt (x `xor` y) r)
133
134 MO_Shl r -> Just $ CmmLit (CmmInt (x `shiftL` fromIntegral y) r)
135 MO_U_Shr r -> Just $ CmmLit (CmmInt (x_u `shiftR` fromIntegral y) r)
136 MO_S_Shr r -> Just $ CmmLit (CmmInt (x `shiftR` fromIntegral y) r)
137
138 _ -> Nothing
139
140 where
141 x_u = narrowU xrep x
142 y_u = narrowU xrep y
143 x_s = narrowS xrep x
144 y_s = narrowS xrep y
145
146
147 -- When possible, shift the constants to the right-hand side, so that we
148 -- can match for strength reductions. Note that the code generator will
149 -- also assume that constants have been shifted to the right when
150 -- possible.
151
152 cmmMachOpFoldM dflags op [x@(CmmLit _), y]
153 | not (isLit y) && isCommutableMachOp op
154 = Just (cmmMachOpFold dflags op [y, x])
155
156 -- Turn (a+b)+c into a+(b+c) where possible. Because literals are
157 -- moved to the right, it is more likely that we will find
158 -- opportunities for constant folding when the expression is
159 -- right-associated.
160 --
161 -- ToDo: this appears to introduce a quadratic behaviour due to the
162 -- nested cmmMachOpFold. Can we fix this?
163 --
164 -- Why do we check isLit arg1? If arg1 is a lit, it means that arg2
165 -- is also a lit (otherwise arg1 would be on the right). If we
166 -- put arg1 on the left of the rearranged expression, we'll get into a
167 -- loop: (x1+x2)+x3 => x1+(x2+x3) => (x2+x3)+x1 => x2+(x3+x1) ...
168 --
169 -- Also don't do it if arg1 is PicBaseReg, so that we don't separate the
170 -- PicBaseReg from the corresponding label (or label difference).
171 --
172 cmmMachOpFoldM dflags mop1 [CmmMachOp mop2 [arg1,arg2], arg3]
173 | mop2 `associates_with` mop1
174 && not (isLit arg1) && not (isPicReg arg1)
175 = Just (cmmMachOpFold dflags mop2 [arg1, cmmMachOpFold dflags mop1 [arg2,arg3]])
176 where
177 MO_Add{} `associates_with` MO_Sub{} = True
178 mop1 `associates_with` mop2 =
179 mop1 == mop2 && isAssociativeMachOp mop1
180
181 -- special case: (a - b) + c ==> a + (c - b)
182 cmmMachOpFoldM dflags mop1@(MO_Add{}) [CmmMachOp mop2@(MO_Sub{}) [arg1,arg2], arg3]
183 | not (isLit arg1) && not (isPicReg arg1)
184 = Just (cmmMachOpFold dflags mop1 [arg1, cmmMachOpFold dflags mop2 [arg3,arg2]])
185
186 -- Make a RegOff if we can
187 cmmMachOpFoldM _ (MO_Add _) [CmmReg reg, CmmLit (CmmInt n rep)]
188 = Just $ cmmRegOff reg (fromIntegral (narrowS rep n))
189 cmmMachOpFoldM _ (MO_Add _) [CmmRegOff reg off, CmmLit (CmmInt n rep)]
190 = Just $ cmmRegOff reg (off + fromIntegral (narrowS rep n))
191 cmmMachOpFoldM _ (MO_Sub _) [CmmReg reg, CmmLit (CmmInt n rep)]
192 = Just $ cmmRegOff reg (- fromIntegral (narrowS rep n))
193 cmmMachOpFoldM _ (MO_Sub _) [CmmRegOff reg off, CmmLit (CmmInt n rep)]
194 = Just $ cmmRegOff reg (off - fromIntegral (narrowS rep n))
195
196 -- Fold label(+/-)offset into a CmmLit where possible
197
198 cmmMachOpFoldM _ (MO_Add _) [CmmLit lit, CmmLit (CmmInt i rep)]
199 = Just $ CmmLit (cmmOffsetLit lit (fromIntegral (narrowU rep i)))
200 cmmMachOpFoldM _ (MO_Add _) [CmmLit (CmmInt i rep), CmmLit lit]
201 = Just $ CmmLit (cmmOffsetLit lit (fromIntegral (narrowU rep i)))
202 cmmMachOpFoldM _ (MO_Sub _) [CmmLit lit, CmmLit (CmmInt i rep)]
203 = Just $ CmmLit (cmmOffsetLit lit (fromIntegral (negate (narrowU rep i))))
204
205
206 -- Comparison of literal with widened operand: perform the comparison
207 -- at the smaller width, as long as the literal is within range.
208
209 -- We can't do the reverse trick, when the operand is narrowed:
210 -- narrowing throws away bits from the operand, there's no way to do
211 -- the same comparison at the larger size.
212
213 cmmMachOpFoldM dflags cmp [CmmMachOp conv [x], CmmLit (CmmInt i _)]
214 | -- powerPC NCG has a TODO for I8/I16 comparisons, so don't try
215 platformArch (targetPlatform dflags) `elem` [ArchX86, ArchX86_64],
216 -- if the operand is widened:
217 Just (rep, signed, narrow_fn) <- maybe_conversion conv,
218 -- and this is a comparison operation:
219 Just narrow_cmp <- maybe_comparison cmp rep signed,
220 -- and the literal fits in the smaller size:
221 i == narrow_fn rep i
222 -- then we can do the comparison at the smaller size
223 = Just (cmmMachOpFold dflags narrow_cmp [x, CmmLit (CmmInt i rep)])
224 where
225 maybe_conversion (MO_UU_Conv from to)
226 | to > from
227 = Just (from, False, narrowU)
228 maybe_conversion (MO_SS_Conv from to)
229 | to > from
230 = Just (from, True, narrowS)
231
232 -- don't attempt to apply this optimisation when the source
233 -- is a float; see #1916
234 maybe_conversion _ = Nothing
235
236 -- careful (#2080): if the original comparison was signed, but
237 -- we were doing an unsigned widen, then we must do an
238 -- unsigned comparison at the smaller size.
239 maybe_comparison (MO_U_Gt _) rep _ = Just (MO_U_Gt rep)
240 maybe_comparison (MO_U_Ge _) rep _ = Just (MO_U_Ge rep)
241 maybe_comparison (MO_U_Lt _) rep _ = Just (MO_U_Lt rep)
242 maybe_comparison (MO_U_Le _) rep _ = Just (MO_U_Le rep)
243 maybe_comparison (MO_Eq _) rep _ = Just (MO_Eq rep)
244 maybe_comparison (MO_S_Gt _) rep True = Just (MO_S_Gt rep)
245 maybe_comparison (MO_S_Ge _) rep True = Just (MO_S_Ge rep)
246 maybe_comparison (MO_S_Lt _) rep True = Just (MO_S_Lt rep)
247 maybe_comparison (MO_S_Le _) rep True = Just (MO_S_Le rep)
248 maybe_comparison (MO_S_Gt _) rep False = Just (MO_U_Gt rep)
249 maybe_comparison (MO_S_Ge _) rep False = Just (MO_U_Ge rep)
250 maybe_comparison (MO_S_Lt _) rep False = Just (MO_U_Lt rep)
251 maybe_comparison (MO_S_Le _) rep False = Just (MO_U_Le rep)
252 maybe_comparison _ _ _ = Nothing
253
254 -- We can often do something with constants of 0 and 1 ...
255
256 cmmMachOpFoldM dflags mop [x, y@(CmmLit (CmmInt 0 _))]
257 = case mop of
258 MO_Add _ -> Just x
259 MO_Sub _ -> Just x
260 MO_Mul _ -> Just y
261 MO_And _ -> Just y
262 MO_Or _ -> Just x
263 MO_Xor _ -> Just x
264 MO_Shl _ -> Just x
265 MO_S_Shr _ -> Just x
266 MO_U_Shr _ -> Just x
267 MO_Ne _ | isComparisonExpr x -> Just x
268 MO_Eq _ | Just x' <- maybeInvertCmmExpr x -> Just x'
269 MO_U_Gt _ | isComparisonExpr x -> Just x
270 MO_S_Gt _ | isComparisonExpr x -> Just x
271 MO_U_Lt _ | isComparisonExpr x -> Just $ CmmLit (CmmInt 0 (wordWidth dflags))
272 MO_S_Lt _ | isComparisonExpr x -> Just $ CmmLit (CmmInt 0 (wordWidth dflags))
273 MO_U_Ge _ | isComparisonExpr x -> Just $ CmmLit (CmmInt 1 (wordWidth dflags))
274 MO_S_Ge _ | isComparisonExpr x -> Just $ CmmLit (CmmInt 1 (wordWidth dflags))
275 MO_U_Le _ | Just x' <- maybeInvertCmmExpr x -> Just x'
276 MO_S_Le _ | Just x' <- maybeInvertCmmExpr x -> Just x'
277 _ -> Nothing
278
279 cmmMachOpFoldM dflags mop [x, (CmmLit (CmmInt 1 rep))]
280 = case mop of
281 MO_Mul _ -> Just x
282 MO_S_Quot _ -> Just x
283 MO_U_Quot _ -> Just x
284 MO_S_Rem _ -> Just $ CmmLit (CmmInt 0 rep)
285 MO_U_Rem _ -> Just $ CmmLit (CmmInt 0 rep)
286 MO_Ne _ | Just x' <- maybeInvertCmmExpr x -> Just x'
287 MO_Eq _ | isComparisonExpr x -> Just x
288 MO_U_Lt _ | Just x' <- maybeInvertCmmExpr x -> Just x'
289 MO_S_Lt _ | Just x' <- maybeInvertCmmExpr x -> Just x'
290 MO_U_Gt _ | isComparisonExpr x -> Just $ CmmLit (CmmInt 0 (wordWidth dflags))
291 MO_S_Gt _ | isComparisonExpr x -> Just $ CmmLit (CmmInt 0 (wordWidth dflags))
292 MO_U_Le _ | isComparisonExpr x -> Just $ CmmLit (CmmInt 1 (wordWidth dflags))
293 MO_S_Le _ | isComparisonExpr x -> Just $ CmmLit (CmmInt 1 (wordWidth dflags))
294 MO_U_Ge _ | isComparisonExpr x -> Just x
295 MO_S_Ge _ | isComparisonExpr x -> Just x
296 _ -> Nothing
297
298 -- Now look for multiplication/division by powers of 2 (integers).
299
300 cmmMachOpFoldM dflags mop [x, (CmmLit (CmmInt n _))]
301 = case mop of
302 MO_Mul rep
303 | Just p <- exactLog2 n ->
304 Just (cmmMachOpFold dflags (MO_Shl rep) [x, CmmLit (CmmInt p rep)])
305 MO_U_Quot rep
306 | Just p <- exactLog2 n ->
307 Just (cmmMachOpFold dflags (MO_U_Shr rep) [x, CmmLit (CmmInt p rep)])
308 MO_S_Quot rep
309 | Just p <- exactLog2 n,
310 CmmReg _ <- x -> -- We duplicate x below, hence require
311 -- it is a reg. FIXME: remove this restriction.
312 -- shift right is not the same as quot, because it rounds
313 -- to minus infinity, whereasq quot rounds toward zero.
314 -- To fix this up, we add one less than the divisor to the
315 -- dividend if it is a negative number.
316 --
317 -- to avoid a test/jump, we use the following sequence:
318 -- x1 = x >> word_size-1 (all 1s if -ve, all 0s if +ve)
319 -- x2 = y & (divisor-1)
320 -- result = (x+x2) >>= log2(divisor)
321 -- this could be done a bit more simply using conditional moves,
322 -- but we're processor independent here.
323 --
324 -- we optimise the divide by 2 case slightly, generating
325 -- x1 = x >> word_size-1 (unsigned)
326 -- return = (x + x1) >>= log2(divisor)
327 let
328 bits = fromIntegral (widthInBits rep) - 1
329 shr = if p == 1 then MO_U_Shr rep else MO_S_Shr rep
330 x1 = CmmMachOp shr [x, CmmLit (CmmInt bits rep)]
331 x2 = if p == 1 then x1 else
332 CmmMachOp (MO_And rep) [x1, CmmLit (CmmInt (n-1) rep)]
333 x3 = CmmMachOp (MO_Add rep) [x, x2]
334 in
335 Just (cmmMachOpFold dflags (MO_S_Shr rep) [x3, CmmLit (CmmInt p rep)])
336 _ -> Nothing
337
338 -- ToDo (#7116): optimise floating-point multiplication, e.g. x*2.0 -> x+x
339 -- Unfortunately this needs a unique supply because x might not be a
340 -- register. See #2253 (program 6) for an example.
341
342
343 -- Anything else is just too hard.
344
345 cmmMachOpFoldM _ _ _ = Nothing
346
347 -- -----------------------------------------------------------------------------
348 -- exactLog2
349
350 -- This algorithm for determining the $\log_2$ of exact powers of 2 comes
351 -- from GCC. It requires bit manipulation primitives, and we use GHC
352 -- extensions. Tough.
353 --
354 -- Used to be in MachInstrs --SDM.
355 -- ToDo: remove use of unboxery --SDM.
356
357 -- Unboxery removed in favor of FastInt; but is the function supposed to fail
358 -- on inputs >= 2147483648, or was that just an implementation artifact?
359 -- And is this speed-critical, or can we just use Integer operations
360 -- (including Data.Bits)?
361 -- --Isaac Dupree
362
363 exactLog2 :: Integer -> Maybe Integer
364 exactLog2 x_
365 = if (x_ <= 0 || x_ >= 2147483648) then
366 Nothing
367 else
368 case iUnbox (fromInteger x_) of { x ->
369 if (x `bitAndFastInt` negateFastInt x) /=# x then
370 Nothing
371 else
372 Just (toInteger (iBox (pow2 x)))
373 }
374 where
375 pow2 x | x ==# _ILIT(1) = _ILIT(0)
376 | otherwise = _ILIT(1) +# pow2 (x `shiftR_FastInt` _ILIT(1))
377
378
379 -- -----------------------------------------------------------------------------
380 -- Loopify for C
381
382 {-
383 This is a simple pass that replaces tail-recursive functions like this:
384
385 fac() {
386 ...
387 jump fac();
388 }
389
390 with this:
391
392 fac() {
393 L:
394 ...
395 goto L;
396 }
397
398 the latter generates better C code, because the C compiler treats it
399 like a loop, and brings full loop optimisation to bear.
400
401 In my measurements this makes little or no difference to anything
402 except factorial, but what the hell.
403 -}
404
405 cmmLoopifyForC :: DynFlags -> RawCmmDecl -> RawCmmDecl
406 -- XXX: revisit if we actually want to do this
407 -- cmmLoopifyForC p@(CmmProc Nothing _ _) = p -- only if there's an info table, ignore case alts
408 cmmLoopifyForC dflags (CmmProc infos entry_lbl
409 (ListGraph blocks@(BasicBlock top_id _ : _))) =
410 -- pprTrace "jump_lbl" (ppr jump_lbl <+> ppr entry_lbl) $
411 CmmProc infos entry_lbl (ListGraph blocks')
412 where blocks' = [ BasicBlock id (map do_stmt stmts)
413 | BasicBlock id stmts <- blocks ]
414
415 do_stmt (CmmJump (CmmLit (CmmLabel lbl)) _) | lbl == jump_lbl
416 = CmmBranch top_id
417 do_stmt stmt = stmt
418
419 jump_lbl | tablesNextToCode dflags = toInfoLbl entry_lbl
420 | otherwise = entry_lbl
421
422 cmmLoopifyForC _ top = top
423
424 -- -----------------------------------------------------------------------------
425 -- Utils
426
427 isLit :: CmmExpr -> Bool
428 isLit (CmmLit _) = True
429 isLit _ = False
430
431 isComparisonExpr :: CmmExpr -> Bool
432 isComparisonExpr (CmmMachOp op _) = isComparisonMachOp op
433 isComparisonExpr _ = False
434
435 isPicReg :: CmmExpr -> Bool
436 isPicReg (CmmReg (CmmGlobal PicBaseReg)) = True
437 isPicReg _ = False
438