Refactor: delete most of the module FastTypes
[ghc.git] / compiler / prelude / PrimOp.hs
1 {-
2 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998
3
4 \section[PrimOp]{Primitive operations (machine-level)}
5 -}
6
7 {-# LANGUAGE CPP #-}
8
9 module PrimOp (
10 PrimOp(..), PrimOpVecCat(..), allThePrimOps,
11 primOpType, primOpSig,
12 primOpTag, maxPrimOpTag, primOpOcc,
13
14 tagToEnumKey,
15
16 primOpOutOfLine, primOpCodeSize,
17 primOpOkForSpeculation, primOpOkForSideEffects,
18 primOpIsCheap, primOpFixity,
19
20 getPrimOpResultInfo, PrimOpResultInfo(..),
21
22 PrimCall(..)
23 ) where
24
25 #include "HsVersions.h"
26
27 import TysPrim
28 import TysWiredIn
29
30 import CmmType
31 import Demand
32 import Var ( TyVar )
33 import OccName ( OccName, pprOccName, mkVarOccFS )
34 import TyCon ( TyCon, isPrimTyCon, tyConPrimRep, PrimRep(..) )
35 import Type ( Type, mkForAllTys, mkFunTy, mkFunTys, tyConAppTyCon,
36 typePrimRep )
37 import BasicTypes ( Arity, Fixity(..), FixityDirection(..), Boxity(..) )
38 import ForeignCall ( CLabelString )
39 import Unique ( Unique, mkPrimOpIdUnique )
40 import Outputable
41 import FastString
42 import Module ( PackageKey )
43
44 {-
45 ************************************************************************
46 * *
47 \subsection[PrimOp-datatype]{Datatype for @PrimOp@ (an enumeration)}
48 * *
49 ************************************************************************
50
51 These are in \tr{state-interface.verb} order.
52 -}
53
54 -- supplies:
55 -- data PrimOp = ...
56 #include "primop-data-decl.hs-incl"
57
58 -- supplies
59 -- primOpTag :: PrimOp -> Int
60 #include "primop-tag.hs-incl"
61 primOpTag _ = error "primOpTag: unknown primop"
62
63
64 instance Eq PrimOp where
65 op1 == op2 = primOpTag op1 == primOpTag op2
66
67 instance Ord PrimOp where
68 op1 < op2 = primOpTag op1 < primOpTag op2
69 op1 <= op2 = primOpTag op1 <= primOpTag op2
70 op1 >= op2 = primOpTag op1 >= primOpTag op2
71 op1 > op2 = primOpTag op1 > primOpTag op2
72 op1 `compare` op2 | op1 < op2 = LT
73 | op1 == op2 = EQ
74 | otherwise = GT
75
76 instance Outputable PrimOp where
77 ppr op = pprPrimOp op
78
79 data PrimOpVecCat = IntVec
80 | WordVec
81 | FloatVec
82
83 -- An @Enum@-derived list would be better; meanwhile... (ToDo)
84
85 allThePrimOps :: [PrimOp]
86 allThePrimOps =
87 #include "primop-list.hs-incl"
88
89 tagToEnumKey :: Unique
90 tagToEnumKey = mkPrimOpIdUnique (primOpTag TagToEnumOp)
91
92 {-
93 ************************************************************************
94 * *
95 \subsection[PrimOp-info]{The essential info about each @PrimOp@}
96 * *
97 ************************************************************************
98
99 The @String@ in the @PrimOpInfos@ is the ``base name'' by which the user may
100 refer to the primitive operation. The conventional \tr{#}-for-
101 unboxed ops is added on later.
102
103 The reason for the funny characters in the names is so we do not
104 interfere with the programmer's Haskell name spaces.
105
106 We use @PrimKinds@ for the ``type'' information, because they're
107 (slightly) more convenient to use than @TyCons@.
108 -}
109
110 data PrimOpInfo
111 = Dyadic OccName -- string :: T -> T -> T
112 Type
113 | Monadic OccName -- string :: T -> T
114 Type
115 | Compare OccName -- string :: T -> T -> Int#
116 Type
117 | GenPrimOp OccName -- string :: \/a1..an . T1 -> .. -> Tk -> T
118 [TyVar]
119 [Type]
120 Type
121
122 mkDyadic, mkMonadic, mkCompare :: FastString -> Type -> PrimOpInfo
123 mkDyadic str ty = Dyadic (mkVarOccFS str) ty
124 mkMonadic str ty = Monadic (mkVarOccFS str) ty
125 mkCompare str ty = Compare (mkVarOccFS str) ty
126
127 mkGenPrimOp :: FastString -> [TyVar] -> [Type] -> Type -> PrimOpInfo
128 mkGenPrimOp str tvs tys ty = GenPrimOp (mkVarOccFS str) tvs tys ty
129
130 {-
131 ************************************************************************
132 * *
133 \subsubsection{Strictness}
134 * *
135 ************************************************************************
136
137 Not all primops are strict!
138 -}
139
140 primOpStrictness :: PrimOp -> Arity -> StrictSig
141 -- See Demand.StrictnessInfo for discussion of what the results
142 -- The arity should be the arity of the primop; that's why
143 -- this function isn't exported.
144 #include "primop-strictness.hs-incl"
145
146 {-
147 ************************************************************************
148 * *
149 \subsubsection{Fixity}
150 * *
151 ************************************************************************
152 -}
153
154 primOpFixity :: PrimOp -> Maybe Fixity
155 #include "primop-fixity.hs-incl"
156
157 {-
158 ************************************************************************
159 * *
160 \subsubsection[PrimOp-comparison]{PrimOpInfo basic comparison ops}
161 * *
162 ************************************************************************
163
164 @primOpInfo@ gives all essential information (from which everything
165 else, notably a type, can be constructed) for each @PrimOp@.
166 -}
167
168 primOpInfo :: PrimOp -> PrimOpInfo
169 #include "primop-primop-info.hs-incl"
170 primOpInfo _ = error "primOpInfo: unknown primop"
171
172 {-
173 Here are a load of comments from the old primOp info:
174
175 A @Word#@ is an unsigned @Int#@.
176
177 @decodeFloat#@ is given w/ Integer-stuff (it's similar).
178
179 @decodeDouble#@ is given w/ Integer-stuff (it's similar).
180
181 Decoding of floating-point numbers is sorta Integer-related. Encoding
182 is done with plain ccalls now (see PrelNumExtra.hs).
183
184 A @Weak@ Pointer is created by the @mkWeak#@ primitive:
185
186 mkWeak# :: k -> v -> f -> State# RealWorld
187 -> (# State# RealWorld, Weak# v #)
188
189 In practice, you'll use the higher-level
190
191 data Weak v = Weak# v
192 mkWeak :: k -> v -> IO () -> IO (Weak v)
193
194 The following operation dereferences a weak pointer. The weak pointer
195 may have been finalized, so the operation returns a result code which
196 must be inspected before looking at the dereferenced value.
197
198 deRefWeak# :: Weak# v -> State# RealWorld ->
199 (# State# RealWorld, v, Int# #)
200
201 Only look at v if the Int# returned is /= 0 !!
202
203 The higher-level op is
204
205 deRefWeak :: Weak v -> IO (Maybe v)
206
207 Weak pointers can be finalized early by using the finalize# operation:
208
209 finalizeWeak# :: Weak# v -> State# RealWorld ->
210 (# State# RealWorld, Int#, IO () #)
211
212 The Int# returned is either
213
214 0 if the weak pointer has already been finalized, or it has no
215 finalizer (the third component is then invalid).
216
217 1 if the weak pointer is still alive, with the finalizer returned
218 as the third component.
219
220 A {\em stable name/pointer} is an index into a table of stable name
221 entries. Since the garbage collector is told about stable pointers,
222 it is safe to pass a stable pointer to external systems such as C
223 routines.
224
225 \begin{verbatim}
226 makeStablePtr# :: a -> State# RealWorld -> (# State# RealWorld, StablePtr# a #)
227 freeStablePtr :: StablePtr# a -> State# RealWorld -> State# RealWorld
228 deRefStablePtr# :: StablePtr# a -> State# RealWorld -> (# State# RealWorld, a #)
229 eqStablePtr# :: StablePtr# a -> StablePtr# a -> Int#
230 \end{verbatim}
231
232 It may seem a bit surprising that @makeStablePtr#@ is a @IO@
233 operation since it doesn't (directly) involve IO operations. The
234 reason is that if some optimisation pass decided to duplicate calls to
235 @makeStablePtr#@ and we only pass one of the stable pointers over, a
236 massive space leak can result. Putting it into the IO monad
237 prevents this. (Another reason for putting them in a monad is to
238 ensure correct sequencing wrt the side-effecting @freeStablePtr@
239 operation.)
240
241 An important property of stable pointers is that if you call
242 makeStablePtr# twice on the same object you get the same stable
243 pointer back.
244
245 Note that we can implement @freeStablePtr#@ using @_ccall_@ (and,
246 besides, it's not likely to be used from Haskell) so it's not a
247 primop.
248
249 Question: Why @RealWorld@ - won't any instance of @_ST@ do the job? [ADR]
250
251 Stable Names
252 ~~~~~~~~~~~~
253
254 A stable name is like a stable pointer, but with three important differences:
255
256 (a) You can't deRef one to get back to the original object.
257 (b) You can convert one to an Int.
258 (c) You don't need to 'freeStableName'
259
260 The existence of a stable name doesn't guarantee to keep the object it
261 points to alive (unlike a stable pointer), hence (a).
262
263 Invariants:
264
265 (a) makeStableName always returns the same value for a given
266 object (same as stable pointers).
267
268 (b) if two stable names are equal, it implies that the objects
269 from which they were created were the same.
270
271 (c) stableNameToInt always returns the same Int for a given
272 stable name.
273
274
275 These primops are pretty weird.
276
277 dataToTag# :: a -> Int (arg must be an evaluated data type)
278 tagToEnum# :: Int -> a (result type must be an enumerated type)
279
280 The constraints aren't currently checked by the front end, but the
281 code generator will fall over if they aren't satisfied.
282
283 ************************************************************************
284 * *
285 Which PrimOps are out-of-line
286 * *
287 ************************************************************************
288
289 Some PrimOps need to be called out-of-line because they either need to
290 perform a heap check or they block.
291 -}
292
293 primOpOutOfLine :: PrimOp -> Bool
294 #include "primop-out-of-line.hs-incl"
295
296 {-
297 ************************************************************************
298 * *
299 Failure and side effects
300 * *
301 ************************************************************************
302
303 Note [PrimOp can_fail and has_side_effects]
304 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
305 Both can_fail and has_side_effects mean that the primop has
306 some effect that is not captured entirely by its result value.
307
308 ---------- has_side_effects ---------------------
309 A primop "has_side_effects" if it has some *write* effect, visible
310 elsewhere
311 - writing to the world (I/O)
312 - writing to a mutable data structure (writeIORef)
313 - throwing a synchronous Haskell exception
314
315 Often such primops have a type like
316 State -> input -> (State, output)
317 so the state token guarantees ordering. In general we rely *only* on
318 data dependencies of the state token to enforce write-effect ordering
319
320 * NB1: if you inline unsafePerformIO, you may end up with
321 side-effecting ops whose 'state' output is discarded.
322 And programmers may do that by hand; see Trac #9390.
323 That is why we (conservatively) do not discard write-effecting
324 primops even if both their state and result is discarded.
325
326 * NB2: We consider primops, such as raiseIO#, that can raise a
327 (Haskell) synchronous exception to "have_side_effects" but not
328 "can_fail". We must be careful about not discarding such things;
329 see the paper "A semantics for imprecise exceptions".
330
331 * NB3: *Read* effects (like reading an IORef) don't count here,
332 because it doesn't matter if we don't do them, or do them more than
333 once. *Sequencing* is maintained by the data dependency of the state
334 token.
335
336 ---------- can_fail ----------------------------
337 A primop "can_fail" if it can fail with an *unchecked* exception on
338 some elements of its input domain. Main examples:
339 division (fails on zero demoninator)
340 array indexing (fails if the index is out of bounds)
341
342 An "unchecked exception" is one that is an outright error, (not
343 turned into a Haskell exception,) such as seg-fault or
344 divide-by-zero error. Such can_fail primops are ALWAYS surrounded
345 with a test that checks for the bad cases, but we need to be
346 very careful about code motion that might move it out of
347 the scope of the test.
348
349 Note [Transformations affected by can_fail and has_side_effects]
350 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
351 The can_fail and has_side_effects properties have the following effect
352 on program transformations. Summary table is followed by details.
353
354 can_fail has_side_effects
355 Discard NO NO
356 Float in YES YES
357 Float out NO NO
358 Duplicate YES NO
359
360 * Discarding. case (a `op` b) of _ -> rhs ===> rhs
361 You should not discard a has_side_effects primop; e.g.
362 case (writeIntArray# a i v s of (# _, _ #) -> True
363 Arguably you should be able to discard this, since the
364 returned stat token is not used, but that relies on NEVER
365 inlining unsafePerformIO, and programmers sometimes write
366 this kind of stuff by hand (Trac #9390). So we (conservatively)
367 never discard a has_side_effects primop.
368
369 However, it's fine to discard a can_fail primop. For example
370 case (indexIntArray# a i) of _ -> True
371 We can discard indexIntArray#; it has can_fail, but not
372 has_side_effects; see Trac #5658 which was all about this.
373 Notice that indexIntArray# is (in a more general handling of
374 effects) read effect, but we don't care about that here, and
375 treat read effects as *not* has_side_effects.
376
377 Similarly (a `/#` b) can be discarded. It can seg-fault or
378 cause a hardware exception, but not a synchronous Haskell
379 exception.
380
381
382
383 Synchronous Haskell exceptions, e.g. from raiseIO#, are treated
384 as has_side_effects and hence are not discarded.
385
386 * Float in. You can float a can_fail or has_side_effects primop
387 *inwards*, but not inside a lambda (see Duplication below).
388
389 * Float out. You must not float a can_fail primop *outwards* lest
390 you escape the dynamic scope of the test. Example:
391 case d ># 0# of
392 True -> case x /# d of r -> r +# 1
393 False -> 0
394 Here we must not float the case outwards to give
395 case x/# d of r ->
396 case d ># 0# of
397 True -> r +# 1
398 False -> 0
399
400 Nor can you float out a has_side_effects primop. For example:
401 if blah then case writeMutVar# v True s0 of (# s1 #) -> s1
402 else s0
403 Notice that s0 is mentioned in both branches of the 'if', but
404 only one of these two will actually be consumed. But if we
405 float out to
406 case writeMutVar# v True s0 of (# s1 #) ->
407 if blah then s1 else s0
408 the writeMutVar will be performed in both branches, which is
409 utterly wrong.
410
411 * Duplication. You cannot duplicate a has_side_effect primop. You
412 might wonder how this can occur given the state token threading, but
413 just look at Control.Monad.ST.Lazy.Imp.strictToLazy! We get
414 something like this
415 p = case readMutVar# s v of
416 (# s', r #) -> (S# s', r)
417 s' = case p of (s', r) -> s'
418 r = case p of (s', r) -> r
419
420 (All these bindings are boxed.) If we inline p at its two call
421 sites, we get a catastrophe: because the read is performed once when
422 s' is demanded, and once when 'r' is demanded, which may be much
423 later. Utterly wrong. Trac #3207 is real example of this happening.
424
425 However, it's fine to duplicate a can_fail primop. That is really
426 the only difference between can_fail and has_side_effects.
427
428 Note [Implementation: how can_fail/has_side_effects affect transformations]
429 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
430 How do we ensure that that floating/duplication/discarding are done right
431 in the simplifier?
432
433 Two main predicates on primpops test these flags:
434 primOpOkForSideEffects <=> not has_side_effects
435 primOpOkForSpeculation <=> not (has_side_effects || can_fail)
436
437 * The "no-float-out" thing is achieved by ensuring that we never
438 let-bind a can_fail or has_side_effects primop. The RHS of a
439 let-binding (which can float in and out freely) satisfies
440 exprOkForSpeculation; this is the let/app invariant. And
441 exprOkForSpeculation is false of can_fail and has_side_effects.
442
443 * So can_fail and has_side_effects primops will appear only as the
444 scrutinees of cases, and that's why the FloatIn pass is capable
445 of floating case bindings inwards.
446
447 * The no-duplicate thing is done via primOpIsCheap, by making
448 has_side_effects things (very very very) not-cheap!
449 -}
450
451 primOpHasSideEffects :: PrimOp -> Bool
452 #include "primop-has-side-effects.hs-incl"
453
454 primOpCanFail :: PrimOp -> Bool
455 #include "primop-can-fail.hs-incl"
456
457 primOpOkForSpeculation :: PrimOp -> Bool
458 -- See Note [PrimOp can_fail and has_side_effects]
459 -- See comments with CoreUtils.exprOkForSpeculation
460 -- primOpOkForSpeculation => primOpOkForSideEffects
461 primOpOkForSpeculation op
462 = primOpOkForSideEffects op
463 && not (primOpOutOfLine op || primOpCanFail op)
464 -- I think the "out of line" test is because out of line things can
465 -- be expensive (eg sine, cosine), and so we may not want to speculate them
466
467 primOpOkForSideEffects :: PrimOp -> Bool
468 primOpOkForSideEffects op
469 = not (primOpHasSideEffects op)
470
471 {-
472 Note [primOpIsCheap]
473 ~~~~~~~~~~~~~~~~~~~~
474 @primOpIsCheap@, as used in \tr{SimplUtils.hs}. For now (HACK
475 WARNING), we just borrow some other predicates for a
476 what-should-be-good-enough test. "Cheap" means willing to call it more
477 than once, and/or push it inside a lambda. The latter could change the
478 behaviour of 'seq' for primops that can fail, so we don't treat them as cheap.
479 -}
480
481 primOpIsCheap :: PrimOp -> Bool
482 -- See Note [PrimOp can_fail and has_side_effects]
483 primOpIsCheap op = primOpOkForSpeculation op
484 -- In March 2001, we changed this to
485 -- primOpIsCheap op = False
486 -- thereby making *no* primops seem cheap. But this killed eta
487 -- expansion on case (x ==# y) of True -> \s -> ...
488 -- which is bad. In particular a loop like
489 -- doLoop n = loop 0
490 -- where
491 -- loop i | i == n = return ()
492 -- | otherwise = bar i >> loop (i+1)
493 -- allocated a closure every time round because it doesn't eta expand.
494 --
495 -- The problem that originally gave rise to the change was
496 -- let x = a +# b *# c in x +# x
497 -- were we don't want to inline x. But primopIsCheap doesn't control
498 -- that (it's exprIsDupable that does) so the problem doesn't occur
499 -- even if primOpIsCheap sometimes says 'True'.
500
501 {-
502 ************************************************************************
503 * *
504 PrimOp code size
505 * *
506 ************************************************************************
507
508 primOpCodeSize
509 ~~~~~~~~~~~~~~
510 Gives an indication of the code size of a primop, for the purposes of
511 calculating unfolding sizes; see CoreUnfold.sizeExpr.
512 -}
513
514 primOpCodeSize :: PrimOp -> Int
515 #include "primop-code-size.hs-incl"
516
517 primOpCodeSizeDefault :: Int
518 primOpCodeSizeDefault = 1
519 -- CoreUnfold.primOpSize already takes into account primOpOutOfLine
520 -- and adds some further costs for the args in that case.
521
522 primOpCodeSizeForeignCall :: Int
523 primOpCodeSizeForeignCall = 4
524
525 {-
526 ************************************************************************
527 * *
528 PrimOp types
529 * *
530 ************************************************************************
531 -}
532
533 primOpType :: PrimOp -> Type -- you may want to use primOpSig instead
534 primOpType op
535 = case primOpInfo op of
536 Dyadic _occ ty -> dyadic_fun_ty ty
537 Monadic _occ ty -> monadic_fun_ty ty
538 Compare _occ ty -> compare_fun_ty ty
539
540 GenPrimOp _occ tyvars arg_tys res_ty ->
541 mkForAllTys tyvars (mkFunTys arg_tys res_ty)
542
543 primOpOcc :: PrimOp -> OccName
544 primOpOcc op = case primOpInfo op of
545 Dyadic occ _ -> occ
546 Monadic occ _ -> occ
547 Compare occ _ -> occ
548 GenPrimOp occ _ _ _ -> occ
549
550 -- primOpSig is like primOpType but gives the result split apart:
551 -- (type variables, argument types, result type)
552 -- It also gives arity, strictness info
553
554 primOpSig :: PrimOp -> ([TyVar], [Type], Type, Arity, StrictSig)
555 primOpSig op
556 = (tyvars, arg_tys, res_ty, arity, primOpStrictness op arity)
557 where
558 arity = length arg_tys
559 (tyvars, arg_tys, res_ty)
560 = case (primOpInfo op) of
561 Monadic _occ ty -> ([], [ty], ty )
562 Dyadic _occ ty -> ([], [ty,ty], ty )
563 Compare _occ ty -> ([], [ty,ty], intPrimTy)
564 GenPrimOp _occ tyvars arg_tys res_ty -> (tyvars, arg_tys, res_ty )
565
566 data PrimOpResultInfo
567 = ReturnsPrim PrimRep
568 | ReturnsAlg TyCon
569
570 -- Some PrimOps need not return a manifest primitive or algebraic value
571 -- (i.e. they might return a polymorphic value). These PrimOps *must*
572 -- be out of line, or the code generator won't work.
573
574 getPrimOpResultInfo :: PrimOp -> PrimOpResultInfo
575 getPrimOpResultInfo op
576 = case (primOpInfo op) of
577 Dyadic _ ty -> ReturnsPrim (typePrimRep ty)
578 Monadic _ ty -> ReturnsPrim (typePrimRep ty)
579 Compare _ _ -> ReturnsPrim (tyConPrimRep intPrimTyCon)
580 GenPrimOp _ _ _ ty | isPrimTyCon tc -> ReturnsPrim (tyConPrimRep tc)
581 | otherwise -> ReturnsAlg tc
582 where
583 tc = tyConAppTyCon ty
584 -- All primops return a tycon-app result
585 -- The tycon can be an unboxed tuple, though, which
586 -- gives rise to a ReturnAlg
587
588 {-
589 We do not currently make use of whether primops are commutable.
590
591 We used to try to move constants to the right hand side for strength
592 reduction.
593 -}
594
595 {-
596 commutableOp :: PrimOp -> Bool
597 #include "primop-commutable.hs-incl"
598 -}
599
600 -- Utils:
601
602 dyadic_fun_ty, monadic_fun_ty, compare_fun_ty :: Type -> Type
603 dyadic_fun_ty ty = mkFunTys [ty, ty] ty
604 monadic_fun_ty ty = mkFunTy ty ty
605 compare_fun_ty ty = mkFunTys [ty, ty] intPrimTy
606
607 -- Output stuff:
608
609 pprPrimOp :: PrimOp -> SDoc
610 pprPrimOp other_op = pprOccName (primOpOcc other_op)
611
612 {-
613 ************************************************************************
614 * *
615 \subsubsection[PrimCall]{User-imported primitive calls}
616 * *
617 ************************************************************************
618 -}
619
620 data PrimCall = PrimCall CLabelString PackageKey
621
622 instance Outputable PrimCall where
623 ppr (PrimCall lbl pkgId)
624 = text "__primcall" <+> ppr pkgId <+> ppr lbl