Reduce default for -fmax-pmcheck-iterations from 1e7 to 2e6
[ghc.git] / compiler / prelude / PrimOp.hs
1 {-
2 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998
3
4 \section[PrimOp]{Primitive operations (machine-level)}
5 -}
6
7 {-# LANGUAGE CPP #-}
8
9 -- The default is a bit too low for the quite large primOpInfo definition
10 #if __GLASGOW_HASKELL__ >= 801
11 {-# OPTIONS_GHC -fmax-pmcheck-iterations=10000000 #-}
12 #endif
13
14 module PrimOp (
15 PrimOp(..), PrimOpVecCat(..), allThePrimOps,
16 primOpType, primOpSig,
17 primOpTag, maxPrimOpTag, primOpOcc,
18
19 tagToEnumKey,
20
21 primOpOutOfLine, primOpCodeSize,
22 primOpOkForSpeculation, primOpOkForSideEffects,
23 primOpIsCheap, primOpFixity,
24
25 getPrimOpResultInfo, PrimOpResultInfo(..),
26
27 PrimCall(..)
28 ) where
29
30 #include "HsVersions.h"
31
32 import TysPrim
33 import TysWiredIn
34
35 import CmmType
36 import Demand
37 import OccName ( OccName, pprOccName, mkVarOccFS )
38 import TyCon ( TyCon, isPrimTyCon, PrimRep(..) )
39 import Type
40 import BasicTypes ( Arity, Fixity(..), FixityDirection(..), Boxity(..) )
41 import ForeignCall ( CLabelString )
42 import Unique ( Unique, mkPrimOpIdUnique )
43 import Outputable
44 import FastString
45 import Module ( UnitId )
46
47 {-
48 ************************************************************************
49 * *
50 \subsection[PrimOp-datatype]{Datatype for @PrimOp@ (an enumeration)}
51 * *
52 ************************************************************************
53
54 These are in \tr{state-interface.verb} order.
55 -}
56
57 -- supplies:
58 -- data PrimOp = ...
59 #include "primop-data-decl.hs-incl"
60
61 -- supplies
62 -- primOpTag :: PrimOp -> Int
63 #include "primop-tag.hs-incl"
64 primOpTag _ = error "primOpTag: unknown primop"
65
66
67 instance Eq PrimOp where
68 op1 == op2 = primOpTag op1 == primOpTag op2
69
70 instance Ord PrimOp where
71 op1 < op2 = primOpTag op1 < primOpTag op2
72 op1 <= op2 = primOpTag op1 <= primOpTag op2
73 op1 >= op2 = primOpTag op1 >= primOpTag op2
74 op1 > op2 = primOpTag op1 > primOpTag op2
75 op1 `compare` op2 | op1 < op2 = LT
76 | op1 == op2 = EQ
77 | otherwise = GT
78
79 instance Outputable PrimOp where
80 ppr op = pprPrimOp op
81
82 data PrimOpVecCat = IntVec
83 | WordVec
84 | FloatVec
85
86 -- An @Enum@-derived list would be better; meanwhile... (ToDo)
87
88 allThePrimOps :: [PrimOp]
89 allThePrimOps =
90 #include "primop-list.hs-incl"
91
92 tagToEnumKey :: Unique
93 tagToEnumKey = mkPrimOpIdUnique (primOpTag TagToEnumOp)
94
95 {-
96 ************************************************************************
97 * *
98 \subsection[PrimOp-info]{The essential info about each @PrimOp@}
99 * *
100 ************************************************************************
101
102 The @String@ in the @PrimOpInfos@ is the ``base name'' by which the user may
103 refer to the primitive operation. The conventional \tr{#}-for-
104 unboxed ops is added on later.
105
106 The reason for the funny characters in the names is so we do not
107 interfere with the programmer's Haskell name spaces.
108
109 We use @PrimKinds@ for the ``type'' information, because they're
110 (slightly) more convenient to use than @TyCons@.
111 -}
112
113 data PrimOpInfo
114 = Dyadic OccName -- string :: T -> T -> T
115 Type
116 | Monadic OccName -- string :: T -> T
117 Type
118 | Compare OccName -- string :: T -> T -> Int#
119 Type
120 | GenPrimOp OccName -- string :: \/a1..an . T1 -> .. -> Tk -> T
121 [TyVar]
122 [Type]
123 Type
124
125 mkDyadic, mkMonadic, mkCompare :: FastString -> Type -> PrimOpInfo
126 mkDyadic str ty = Dyadic (mkVarOccFS str) ty
127 mkMonadic str ty = Monadic (mkVarOccFS str) ty
128 mkCompare str ty = Compare (mkVarOccFS str) ty
129
130 mkGenPrimOp :: FastString -> [TyVar] -> [Type] -> Type -> PrimOpInfo
131 mkGenPrimOp str tvs tys ty = GenPrimOp (mkVarOccFS str) tvs tys ty
132
133 {-
134 ************************************************************************
135 * *
136 \subsubsection{Strictness}
137 * *
138 ************************************************************************
139
140 Not all primops are strict!
141 -}
142
143 primOpStrictness :: PrimOp -> Arity -> StrictSig
144 -- See Demand.StrictnessInfo for discussion of what the results
145 -- The arity should be the arity of the primop; that's why
146 -- this function isn't exported.
147 #include "primop-strictness.hs-incl"
148
149 {-
150 ************************************************************************
151 * *
152 \subsubsection{Fixity}
153 * *
154 ************************************************************************
155 -}
156
157 primOpFixity :: PrimOp -> Maybe Fixity
158 #include "primop-fixity.hs-incl"
159
160 {-
161 ************************************************************************
162 * *
163 \subsubsection[PrimOp-comparison]{PrimOpInfo basic comparison ops}
164 * *
165 ************************************************************************
166
167 @primOpInfo@ gives all essential information (from which everything
168 else, notably a type, can be constructed) for each @PrimOp@.
169 -}
170
171 primOpInfo :: PrimOp -> PrimOpInfo
172 #include "primop-primop-info.hs-incl"
173 primOpInfo _ = error "primOpInfo: unknown primop"
174
175 {-
176 Here are a load of comments from the old primOp info:
177
178 A @Word#@ is an unsigned @Int#@.
179
180 @decodeFloat#@ is given w/ Integer-stuff (it's similar).
181
182 @decodeDouble#@ is given w/ Integer-stuff (it's similar).
183
184 Decoding of floating-point numbers is sorta Integer-related. Encoding
185 is done with plain ccalls now (see PrelNumExtra.hs).
186
187 A @Weak@ Pointer is created by the @mkWeak#@ primitive:
188
189 mkWeak# :: k -> v -> f -> State# RealWorld
190 -> (# State# RealWorld, Weak# v #)
191
192 In practice, you'll use the higher-level
193
194 data Weak v = Weak# v
195 mkWeak :: k -> v -> IO () -> IO (Weak v)
196
197 The following operation dereferences a weak pointer. The weak pointer
198 may have been finalized, so the operation returns a result code which
199 must be inspected before looking at the dereferenced value.
200
201 deRefWeak# :: Weak# v -> State# RealWorld ->
202 (# State# RealWorld, v, Int# #)
203
204 Only look at v if the Int# returned is /= 0 !!
205
206 The higher-level op is
207
208 deRefWeak :: Weak v -> IO (Maybe v)
209
210 Weak pointers can be finalized early by using the finalize# operation:
211
212 finalizeWeak# :: Weak# v -> State# RealWorld ->
213 (# State# RealWorld, Int#, IO () #)
214
215 The Int# returned is either
216
217 0 if the weak pointer has already been finalized, or it has no
218 finalizer (the third component is then invalid).
219
220 1 if the weak pointer is still alive, with the finalizer returned
221 as the third component.
222
223 A {\em stable name/pointer} is an index into a table of stable name
224 entries. Since the garbage collector is told about stable pointers,
225 it is safe to pass a stable pointer to external systems such as C
226 routines.
227
228 \begin{verbatim}
229 makeStablePtr# :: a -> State# RealWorld -> (# State# RealWorld, StablePtr# a #)
230 freeStablePtr :: StablePtr# a -> State# RealWorld -> State# RealWorld
231 deRefStablePtr# :: StablePtr# a -> State# RealWorld -> (# State# RealWorld, a #)
232 eqStablePtr# :: StablePtr# a -> StablePtr# a -> Int#
233 \end{verbatim}
234
235 It may seem a bit surprising that @makeStablePtr#@ is a @IO@
236 operation since it doesn't (directly) involve IO operations. The
237 reason is that if some optimisation pass decided to duplicate calls to
238 @makeStablePtr#@ and we only pass one of the stable pointers over, a
239 massive space leak can result. Putting it into the IO monad
240 prevents this. (Another reason for putting them in a monad is to
241 ensure correct sequencing wrt the side-effecting @freeStablePtr@
242 operation.)
243
244 An important property of stable pointers is that if you call
245 makeStablePtr# twice on the same object you get the same stable
246 pointer back.
247
248 Note that we can implement @freeStablePtr#@ using @_ccall_@ (and,
249 besides, it's not likely to be used from Haskell) so it's not a
250 primop.
251
252 Question: Why @RealWorld@ - won't any instance of @_ST@ do the job? [ADR]
253
254 Stable Names
255 ~~~~~~~~~~~~
256
257 A stable name is like a stable pointer, but with three important differences:
258
259 (a) You can't deRef one to get back to the original object.
260 (b) You can convert one to an Int.
261 (c) You don't need to 'freeStableName'
262
263 The existence of a stable name doesn't guarantee to keep the object it
264 points to alive (unlike a stable pointer), hence (a).
265
266 Invariants:
267
268 (a) makeStableName always returns the same value for a given
269 object (same as stable pointers).
270
271 (b) if two stable names are equal, it implies that the objects
272 from which they were created were the same.
273
274 (c) stableNameToInt always returns the same Int for a given
275 stable name.
276
277
278 These primops are pretty weird.
279
280 dataToTag# :: a -> Int (arg must be an evaluated data type)
281 tagToEnum# :: Int -> a (result type must be an enumerated type)
282
283 The constraints aren't currently checked by the front end, but the
284 code generator will fall over if they aren't satisfied.
285
286 ************************************************************************
287 * *
288 Which PrimOps are out-of-line
289 * *
290 ************************************************************************
291
292 Some PrimOps need to be called out-of-line because they either need to
293 perform a heap check or they block.
294 -}
295
296 primOpOutOfLine :: PrimOp -> Bool
297 #include "primop-out-of-line.hs-incl"
298
299 {-
300 ************************************************************************
301 * *
302 Failure and side effects
303 * *
304 ************************************************************************
305
306 Note [PrimOp can_fail and has_side_effects]
307 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
308 Both can_fail and has_side_effects mean that the primop has
309 some effect that is not captured entirely by its result value.
310
311 ---------- has_side_effects ---------------------
312 A primop "has_side_effects" if it has some *write* effect, visible
313 elsewhere
314 - writing to the world (I/O)
315 - writing to a mutable data structure (writeIORef)
316 - throwing a synchronous Haskell exception
317
318 Often such primops have a type like
319 State -> input -> (State, output)
320 so the state token guarantees ordering. In general we rely *only* on
321 data dependencies of the state token to enforce write-effect ordering
322
323 * NB1: if you inline unsafePerformIO, you may end up with
324 side-effecting ops whose 'state' output is discarded.
325 And programmers may do that by hand; see Trac #9390.
326 That is why we (conservatively) do not discard write-effecting
327 primops even if both their state and result is discarded.
328
329 * NB2: We consider primops, such as raiseIO#, that can raise a
330 (Haskell) synchronous exception to "have_side_effects" but not
331 "can_fail". We must be careful about not discarding such things;
332 see the paper "A semantics for imprecise exceptions".
333
334 * NB3: *Read* effects (like reading an IORef) don't count here,
335 because it doesn't matter if we don't do them, or do them more than
336 once. *Sequencing* is maintained by the data dependency of the state
337 token.
338
339 ---------- can_fail ----------------------------
340 A primop "can_fail" if it can fail with an *unchecked* exception on
341 some elements of its input domain. Main examples:
342 division (fails on zero demoninator)
343 array indexing (fails if the index is out of bounds)
344
345 An "unchecked exception" is one that is an outright error, (not
346 turned into a Haskell exception,) such as seg-fault or
347 divide-by-zero error. Such can_fail primops are ALWAYS surrounded
348 with a test that checks for the bad cases, but we need to be
349 very careful about code motion that might move it out of
350 the scope of the test.
351
352 Note [Transformations affected by can_fail and has_side_effects]
353 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
354 The can_fail and has_side_effects properties have the following effect
355 on program transformations. Summary table is followed by details.
356
357 can_fail has_side_effects
358 Discard NO NO
359 Float in YES YES
360 Float out NO NO
361 Duplicate YES NO
362
363 * Discarding. case (a `op` b) of _ -> rhs ===> rhs
364 You should not discard a has_side_effects primop; e.g.
365 case (writeIntArray# a i v s of (# _, _ #) -> True
366 Arguably you should be able to discard this, since the
367 returned stat token is not used, but that relies on NEVER
368 inlining unsafePerformIO, and programmers sometimes write
369 this kind of stuff by hand (Trac #9390). So we (conservatively)
370 never discard a has_side_effects primop.
371
372 However, it's fine to discard a can_fail primop. For example
373 case (indexIntArray# a i) of _ -> True
374 We can discard indexIntArray#; it has can_fail, but not
375 has_side_effects; see Trac #5658 which was all about this.
376 Notice that indexIntArray# is (in a more general handling of
377 effects) read effect, but we don't care about that here, and
378 treat read effects as *not* has_side_effects.
379
380 Similarly (a `/#` b) can be discarded. It can seg-fault or
381 cause a hardware exception, but not a synchronous Haskell
382 exception.
383
384
385
386 Synchronous Haskell exceptions, e.g. from raiseIO#, are treated
387 as has_side_effects and hence are not discarded.
388
389 * Float in. You can float a can_fail or has_side_effects primop
390 *inwards*, but not inside a lambda (see Duplication below).
391
392 * Float out. You must not float a can_fail primop *outwards* lest
393 you escape the dynamic scope of the test. Example:
394 case d ># 0# of
395 True -> case x /# d of r -> r +# 1
396 False -> 0
397 Here we must not float the case outwards to give
398 case x/# d of r ->
399 case d ># 0# of
400 True -> r +# 1
401 False -> 0
402
403 Nor can you float out a has_side_effects primop. For example:
404 if blah then case writeMutVar# v True s0 of (# s1 #) -> s1
405 else s0
406 Notice that s0 is mentioned in both branches of the 'if', but
407 only one of these two will actually be consumed. But if we
408 float out to
409 case writeMutVar# v True s0 of (# s1 #) ->
410 if blah then s1 else s0
411 the writeMutVar will be performed in both branches, which is
412 utterly wrong.
413
414 * Duplication. You cannot duplicate a has_side_effect primop. You
415 might wonder how this can occur given the state token threading, but
416 just look at Control.Monad.ST.Lazy.Imp.strictToLazy! We get
417 something like this
418 p = case readMutVar# s v of
419 (# s', r #) -> (S# s', r)
420 s' = case p of (s', r) -> s'
421 r = case p of (s', r) -> r
422
423 (All these bindings are boxed.) If we inline p at its two call
424 sites, we get a catastrophe: because the read is performed once when
425 s' is demanded, and once when 'r' is demanded, which may be much
426 later. Utterly wrong. Trac #3207 is real example of this happening.
427
428 However, it's fine to duplicate a can_fail primop. That is really
429 the only difference between can_fail and has_side_effects.
430
431 Note [Implementation: how can_fail/has_side_effects affect transformations]
432 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
433 How do we ensure that that floating/duplication/discarding are done right
434 in the simplifier?
435
436 Two main predicates on primpops test these flags:
437 primOpOkForSideEffects <=> not has_side_effects
438 primOpOkForSpeculation <=> not (has_side_effects || can_fail)
439
440 * The "no-float-out" thing is achieved by ensuring that we never
441 let-bind a can_fail or has_side_effects primop. The RHS of a
442 let-binding (which can float in and out freely) satisfies
443 exprOkForSpeculation; this is the let/app invariant. And
444 exprOkForSpeculation is false of can_fail and has_side_effects.
445
446 * So can_fail and has_side_effects primops will appear only as the
447 scrutinees of cases, and that's why the FloatIn pass is capable
448 of floating case bindings inwards.
449
450 * The no-duplicate thing is done via primOpIsCheap, by making
451 has_side_effects things (very very very) not-cheap!
452 -}
453
454 primOpHasSideEffects :: PrimOp -> Bool
455 #include "primop-has-side-effects.hs-incl"
456
457 primOpCanFail :: PrimOp -> Bool
458 #include "primop-can-fail.hs-incl"
459
460 primOpOkForSpeculation :: PrimOp -> Bool
461 -- See Note [PrimOp can_fail and has_side_effects]
462 -- See comments with CoreUtils.exprOkForSpeculation
463 -- primOpOkForSpeculation => primOpOkForSideEffects
464 primOpOkForSpeculation op
465 = primOpOkForSideEffects op
466 && not (primOpOutOfLine op || primOpCanFail op)
467 -- I think the "out of line" test is because out of line things can
468 -- be expensive (eg sine, cosine), and so we may not want to speculate them
469
470 primOpOkForSideEffects :: PrimOp -> Bool
471 primOpOkForSideEffects op
472 = not (primOpHasSideEffects op)
473
474 {-
475 Note [primOpIsCheap]
476 ~~~~~~~~~~~~~~~~~~~~
477 @primOpIsCheap@, as used in \tr{SimplUtils.hs}. For now (HACK
478 WARNING), we just borrow some other predicates for a
479 what-should-be-good-enough test. "Cheap" means willing to call it more
480 than once, and/or push it inside a lambda. The latter could change the
481 behaviour of 'seq' for primops that can fail, so we don't treat them as cheap.
482 -}
483
484 primOpIsCheap :: PrimOp -> Bool
485 -- See Note [PrimOp can_fail and has_side_effects]
486 primOpIsCheap op = primOpOkForSpeculation op
487 -- In March 2001, we changed this to
488 -- primOpIsCheap op = False
489 -- thereby making *no* primops seem cheap. But this killed eta
490 -- expansion on case (x ==# y) of True -> \s -> ...
491 -- which is bad. In particular a loop like
492 -- doLoop n = loop 0
493 -- where
494 -- loop i | i == n = return ()
495 -- | otherwise = bar i >> loop (i+1)
496 -- allocated a closure every time round because it doesn't eta expand.
497 --
498 -- The problem that originally gave rise to the change was
499 -- let x = a +# b *# c in x +# x
500 -- were we don't want to inline x. But primopIsCheap doesn't control
501 -- that (it's exprIsDupable that does) so the problem doesn't occur
502 -- even if primOpIsCheap sometimes says 'True'.
503
504 {-
505 ************************************************************************
506 * *
507 PrimOp code size
508 * *
509 ************************************************************************
510
511 primOpCodeSize
512 ~~~~~~~~~~~~~~
513 Gives an indication of the code size of a primop, for the purposes of
514 calculating unfolding sizes; see CoreUnfold.sizeExpr.
515 -}
516
517 primOpCodeSize :: PrimOp -> Int
518 #include "primop-code-size.hs-incl"
519
520 primOpCodeSizeDefault :: Int
521 primOpCodeSizeDefault = 1
522 -- CoreUnfold.primOpSize already takes into account primOpOutOfLine
523 -- and adds some further costs for the args in that case.
524
525 primOpCodeSizeForeignCall :: Int
526 primOpCodeSizeForeignCall = 4
527
528 {-
529 ************************************************************************
530 * *
531 PrimOp types
532 * *
533 ************************************************************************
534 -}
535
536 primOpType :: PrimOp -> Type -- you may want to use primOpSig instead
537 primOpType op
538 = case primOpInfo op of
539 Dyadic _occ ty -> dyadic_fun_ty ty
540 Monadic _occ ty -> monadic_fun_ty ty
541 Compare _occ ty -> compare_fun_ty ty
542
543 GenPrimOp _occ tyvars arg_tys res_ty ->
544 mkSpecForAllTys tyvars (mkFunTys arg_tys res_ty)
545
546 primOpOcc :: PrimOp -> OccName
547 primOpOcc op = case primOpInfo op of
548 Dyadic occ _ -> occ
549 Monadic occ _ -> occ
550 Compare occ _ -> occ
551 GenPrimOp occ _ _ _ -> occ
552
553 -- primOpSig is like primOpType but gives the result split apart:
554 -- (type variables, argument types, result type)
555 -- It also gives arity, strictness info
556
557 primOpSig :: PrimOp -> ([TyVar], [Type], Type, Arity, StrictSig)
558 primOpSig op
559 = (tyvars, arg_tys, res_ty, arity, primOpStrictness op arity)
560 where
561 arity = length arg_tys
562 (tyvars, arg_tys, res_ty)
563 = case (primOpInfo op) of
564 Monadic _occ ty -> ([], [ty], ty )
565 Dyadic _occ ty -> ([], [ty,ty], ty )
566 Compare _occ ty -> ([], [ty,ty], intPrimTy)
567 GenPrimOp _occ tyvars arg_tys res_ty -> (tyvars, arg_tys, res_ty )
568
569 data PrimOpResultInfo
570 = ReturnsPrim PrimRep
571 | ReturnsAlg TyCon
572
573 -- Some PrimOps need not return a manifest primitive or algebraic value
574 -- (i.e. they might return a polymorphic value). These PrimOps *must*
575 -- be out of line, or the code generator won't work.
576
577 getPrimOpResultInfo :: PrimOp -> PrimOpResultInfo
578 getPrimOpResultInfo op
579 = case (primOpInfo op) of
580 Dyadic _ ty -> ReturnsPrim (typePrimRep ty)
581 Monadic _ ty -> ReturnsPrim (typePrimRep ty)
582 Compare _ _ -> ReturnsPrim (tyConPrimRep intPrimTyCon)
583 GenPrimOp _ _ _ ty | isPrimTyCon tc -> ReturnsPrim (tyConPrimRep tc)
584 | otherwise -> ReturnsAlg tc
585 where
586 tc = tyConAppTyCon ty
587 -- All primops return a tycon-app result
588 -- The tycon can be an unboxed tuple, though, which
589 -- gives rise to a ReturnAlg
590
591 {-
592 We do not currently make use of whether primops are commutable.
593
594 We used to try to move constants to the right hand side for strength
595 reduction.
596 -}
597
598 {-
599 commutableOp :: PrimOp -> Bool
600 #include "primop-commutable.hs-incl"
601 -}
602
603 -- Utils:
604
605 dyadic_fun_ty, monadic_fun_ty, compare_fun_ty :: Type -> Type
606 dyadic_fun_ty ty = mkFunTys [ty, ty] ty
607 monadic_fun_ty ty = mkFunTy ty ty
608 compare_fun_ty ty = mkFunTys [ty, ty] intPrimTy
609
610 -- Output stuff:
611
612 pprPrimOp :: PrimOp -> SDoc
613 pprPrimOp other_op = pprOccName (primOpOcc other_op)
614
615 {-
616 ************************************************************************
617 * *
618 \subsubsection[PrimCall]{User-imported primitive calls}
619 * *
620 ************************************************************************
621 -}
622
623 data PrimCall = PrimCall CLabelString UnitId
624
625 instance Outputable PrimCall where
626 ppr (PrimCall lbl pkgId)
627 = text "__primcall" <+> ppr pkgId <+> ppr lbl