Remove fix-submodules.py
[ghc.git] / compiler / prelude / PrimOp.hs
1 {-
2 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998
3
4 \section[PrimOp]{Primitive operations (machine-level)}
5 -}
6
7 {-# LANGUAGE CPP #-}
8
9 -- The default is a bit too low for the quite large primOpInfo definition
10 {-# OPTIONS_GHC -fmax-pmcheck-iterations=10000000 #-}
11
12 module PrimOp (
13 PrimOp(..), PrimOpVecCat(..), allThePrimOps,
14 primOpType, primOpSig,
15 primOpTag, maxPrimOpTag, primOpOcc,
16 primOpWrapperId,
17
18 tagToEnumKey,
19
20 primOpOutOfLine, primOpCodeSize,
21 primOpOkForSpeculation, primOpOkForSideEffects,
22 primOpIsCheap, primOpFixity,
23
24 getPrimOpResultInfo, isComparisonPrimOp, PrimOpResultInfo(..),
25
26 PrimCall(..)
27 ) where
28
29 #include "HsVersions.h"
30
31 import GhcPrelude
32
33 import TysPrim
34 import TysWiredIn
35
36 import CmmType
37 import Demand
38 import Id ( Id, mkVanillaGlobalWithInfo )
39 import IdInfo ( vanillaIdInfo, setCafInfo, CafInfo(NoCafRefs) )
40 import Name
41 import PrelNames ( gHC_PRIMOPWRAPPERS )
42 import TyCon ( TyCon, isPrimTyCon, PrimRep(..) )
43 import Type
44 import RepType ( typePrimRep1, tyConPrimRep1 )
45 import BasicTypes ( Arity, Fixity(..), FixityDirection(..), Boxity(..),
46 SourceText(..) )
47 import SrcLoc ( wiredInSrcSpan )
48 import ForeignCall ( CLabelString )
49 import Unique ( Unique, mkPrimOpIdUnique, mkPrimOpWrapperUnique )
50 import Outputable
51 import FastString
52 import Module ( UnitId )
53
54 {-
55 ************************************************************************
56 * *
57 \subsection[PrimOp-datatype]{Datatype for @PrimOp@ (an enumeration)}
58 * *
59 ************************************************************************
60
61 These are in \tr{state-interface.verb} order.
62 -}
63
64 -- supplies:
65 -- data PrimOp = ...
66 #include "primop-data-decl.hs-incl"
67
68 -- supplies
69 -- primOpTag :: PrimOp -> Int
70 #include "primop-tag.hs-incl"
71 primOpTag _ = error "primOpTag: unknown primop"
72
73
74 instance Eq PrimOp where
75 op1 == op2 = primOpTag op1 == primOpTag op2
76
77 instance Ord PrimOp where
78 op1 < op2 = primOpTag op1 < primOpTag op2
79 op1 <= op2 = primOpTag op1 <= primOpTag op2
80 op1 >= op2 = primOpTag op1 >= primOpTag op2
81 op1 > op2 = primOpTag op1 > primOpTag op2
82 op1 `compare` op2 | op1 < op2 = LT
83 | op1 == op2 = EQ
84 | otherwise = GT
85
86 instance Outputable PrimOp where
87 ppr op = pprPrimOp op
88
89 data PrimOpVecCat = IntVec
90 | WordVec
91 | FloatVec
92
93 -- An @Enum@-derived list would be better; meanwhile... (ToDo)
94
95 allThePrimOps :: [PrimOp]
96 allThePrimOps =
97 #include "primop-list.hs-incl"
98
99 tagToEnumKey :: Unique
100 tagToEnumKey = mkPrimOpIdUnique (primOpTag TagToEnumOp)
101
102 {-
103 ************************************************************************
104 * *
105 \subsection[PrimOp-info]{The essential info about each @PrimOp@}
106 * *
107 ************************************************************************
108
109 The @String@ in the @PrimOpInfos@ is the ``base name'' by which the user may
110 refer to the primitive operation. The conventional \tr{#}-for-
111 unboxed ops is added on later.
112
113 The reason for the funny characters in the names is so we do not
114 interfere with the programmer's Haskell name spaces.
115
116 We use @PrimKinds@ for the ``type'' information, because they're
117 (slightly) more convenient to use than @TyCons@.
118 -}
119
120 data PrimOpInfo
121 = Dyadic OccName -- string :: T -> T -> T
122 Type
123 | Monadic OccName -- string :: T -> T
124 Type
125 | Compare OccName -- string :: T -> T -> Int#
126 Type
127 | GenPrimOp OccName -- string :: \/a1..an . T1 -> .. -> Tk -> T
128 [TyVar]
129 [Type]
130 Type
131
132 mkDyadic, mkMonadic, mkCompare :: FastString -> Type -> PrimOpInfo
133 mkDyadic str ty = Dyadic (mkVarOccFS str) ty
134 mkMonadic str ty = Monadic (mkVarOccFS str) ty
135 mkCompare str ty = Compare (mkVarOccFS str) ty
136
137 mkGenPrimOp :: FastString -> [TyVar] -> [Type] -> Type -> PrimOpInfo
138 mkGenPrimOp str tvs tys ty = GenPrimOp (mkVarOccFS str) tvs tys ty
139
140 {-
141 ************************************************************************
142 * *
143 \subsubsection{Strictness}
144 * *
145 ************************************************************************
146
147 Not all primops are strict!
148 -}
149
150 primOpStrictness :: PrimOp -> Arity -> StrictSig
151 -- See Demand.StrictnessInfo for discussion of what the results
152 -- The arity should be the arity of the primop; that's why
153 -- this function isn't exported.
154 #include "primop-strictness.hs-incl"
155
156 {-
157 ************************************************************************
158 * *
159 \subsubsection{Fixity}
160 * *
161 ************************************************************************
162 -}
163
164 primOpFixity :: PrimOp -> Maybe Fixity
165 #include "primop-fixity.hs-incl"
166
167 {-
168 ************************************************************************
169 * *
170 \subsubsection[PrimOp-comparison]{PrimOpInfo basic comparison ops}
171 * *
172 ************************************************************************
173
174 @primOpInfo@ gives all essential information (from which everything
175 else, notably a type, can be constructed) for each @PrimOp@.
176 -}
177
178 primOpInfo :: PrimOp -> PrimOpInfo
179 #include "primop-primop-info.hs-incl"
180 primOpInfo _ = error "primOpInfo: unknown primop"
181
182 {-
183 Here are a load of comments from the old primOp info:
184
185 A @Word#@ is an unsigned @Int#@.
186
187 @decodeFloat#@ is given w/ Integer-stuff (it's similar).
188
189 @decodeDouble#@ is given w/ Integer-stuff (it's similar).
190
191 Decoding of floating-point numbers is sorta Integer-related. Encoding
192 is done with plain ccalls now (see PrelNumExtra.hs).
193
194 A @Weak@ Pointer is created by the @mkWeak#@ primitive:
195
196 mkWeak# :: k -> v -> f -> State# RealWorld
197 -> (# State# RealWorld, Weak# v #)
198
199 In practice, you'll use the higher-level
200
201 data Weak v = Weak# v
202 mkWeak :: k -> v -> IO () -> IO (Weak v)
203
204 The following operation dereferences a weak pointer. The weak pointer
205 may have been finalized, so the operation returns a result code which
206 must be inspected before looking at the dereferenced value.
207
208 deRefWeak# :: Weak# v -> State# RealWorld ->
209 (# State# RealWorld, v, Int# #)
210
211 Only look at v if the Int# returned is /= 0 !!
212
213 The higher-level op is
214
215 deRefWeak :: Weak v -> IO (Maybe v)
216
217 Weak pointers can be finalized early by using the finalize# operation:
218
219 finalizeWeak# :: Weak# v -> State# RealWorld ->
220 (# State# RealWorld, Int#, IO () #)
221
222 The Int# returned is either
223
224 0 if the weak pointer has already been finalized, or it has no
225 finalizer (the third component is then invalid).
226
227 1 if the weak pointer is still alive, with the finalizer returned
228 as the third component.
229
230 A {\em stable name/pointer} is an index into a table of stable name
231 entries. Since the garbage collector is told about stable pointers,
232 it is safe to pass a stable pointer to external systems such as C
233 routines.
234
235 \begin{verbatim}
236 makeStablePtr# :: a -> State# RealWorld -> (# State# RealWorld, StablePtr# a #)
237 freeStablePtr :: StablePtr# a -> State# RealWorld -> State# RealWorld
238 deRefStablePtr# :: StablePtr# a -> State# RealWorld -> (# State# RealWorld, a #)
239 eqStablePtr# :: StablePtr# a -> StablePtr# a -> Int#
240 \end{verbatim}
241
242 It may seem a bit surprising that @makeStablePtr#@ is a @IO@
243 operation since it doesn't (directly) involve IO operations. The
244 reason is that if some optimisation pass decided to duplicate calls to
245 @makeStablePtr#@ and we only pass one of the stable pointers over, a
246 massive space leak can result. Putting it into the IO monad
247 prevents this. (Another reason for putting them in a monad is to
248 ensure correct sequencing wrt the side-effecting @freeStablePtr@
249 operation.)
250
251 An important property of stable pointers is that if you call
252 makeStablePtr# twice on the same object you get the same stable
253 pointer back.
254
255 Note that we can implement @freeStablePtr#@ using @_ccall_@ (and,
256 besides, it's not likely to be used from Haskell) so it's not a
257 primop.
258
259 Question: Why @RealWorld@ - won't any instance of @_ST@ do the job? [ADR]
260
261 Stable Names
262 ~~~~~~~~~~~~
263
264 A stable name is like a stable pointer, but with three important differences:
265
266 (a) You can't deRef one to get back to the original object.
267 (b) You can convert one to an Int.
268 (c) You don't need to 'freeStableName'
269
270 The existence of a stable name doesn't guarantee to keep the object it
271 points to alive (unlike a stable pointer), hence (a).
272
273 Invariants:
274
275 (a) makeStableName always returns the same value for a given
276 object (same as stable pointers).
277
278 (b) if two stable names are equal, it implies that the objects
279 from which they were created were the same.
280
281 (c) stableNameToInt always returns the same Int for a given
282 stable name.
283
284
285 These primops are pretty weird.
286
287 tagToEnum# :: Int -> a (result type must be an enumerated type)
288
289 The constraints aren't currently checked by the front end, but the
290 code generator will fall over if they aren't satisfied.
291
292 ************************************************************************
293 * *
294 Which PrimOps are out-of-line
295 * *
296 ************************************************************************
297
298 Some PrimOps need to be called out-of-line because they either need to
299 perform a heap check or they block.
300 -}
301
302 primOpOutOfLine :: PrimOp -> Bool
303 #include "primop-out-of-line.hs-incl"
304
305 {-
306 ************************************************************************
307 * *
308 Failure and side effects
309 * *
310 ************************************************************************
311
312 Note [Checking versus non-checking primops]
313 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
314
315 In GHC primops break down into two classes:
316
317 a. Checking primops behave, for instance, like division. In this
318 case the primop may throw an exception (e.g. division-by-zero)
319 and is consequently is marked with the can_fail flag described below.
320 The ability to fail comes at the expense of precluding some optimizations.
321
322 b. Non-checking primops behavior, for instance, like addition. While
323 addition can overflow it does not produce an exception. So can_fail is
324 set to False, and we get more optimisation opportunities. But we must
325 never throw an exception, so we cannot rewrite to a call to error.
326
327 It is important that a non-checking primop never be transformed in a way that
328 would cause it to bottom. Doing so would violate Core's let/app invariant
329 (see Note [CoreSyn let/app invariant] in CoreSyn) which is critical to
330 the simplifier's ability to float without fear of changing program meaning.
331
332
333 Note [PrimOp can_fail and has_side_effects]
334 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
335 Both can_fail and has_side_effects mean that the primop has
336 some effect that is not captured entirely by its result value.
337
338 ---------- has_side_effects ---------------------
339 A primop "has_side_effects" if it has some *write* effect, visible
340 elsewhere
341 - writing to the world (I/O)
342 - writing to a mutable data structure (writeIORef)
343 - throwing a synchronous Haskell exception
344
345 Often such primops have a type like
346 State -> input -> (State, output)
347 so the state token guarantees ordering. In general we rely *only* on
348 data dependencies of the state token to enforce write-effect ordering
349
350 * NB1: if you inline unsafePerformIO, you may end up with
351 side-effecting ops whose 'state' output is discarded.
352 And programmers may do that by hand; see #9390.
353 That is why we (conservatively) do not discard write-effecting
354 primops even if both their state and result is discarded.
355
356 * NB2: We consider primops, such as raiseIO#, that can raise a
357 (Haskell) synchronous exception to "have_side_effects" but not
358 "can_fail". We must be careful about not discarding such things;
359 see the paper "A semantics for imprecise exceptions".
360
361 * NB3: *Read* effects (like reading an IORef) don't count here,
362 because it doesn't matter if we don't do them, or do them more than
363 once. *Sequencing* is maintained by the data dependency of the state
364 token.
365
366 ---------- can_fail ----------------------------
367 A primop "can_fail" if it can fail with an *unchecked* exception on
368 some elements of its input domain. Main examples:
369 division (fails on zero demoninator)
370 array indexing (fails if the index is out of bounds)
371
372 An "unchecked exception" is one that is an outright error, (not
373 turned into a Haskell exception,) such as seg-fault or
374 divide-by-zero error. Such can_fail primops are ALWAYS surrounded
375 with a test that checks for the bad cases, but we need to be
376 very careful about code motion that might move it out of
377 the scope of the test.
378
379 Note [Transformations affected by can_fail and has_side_effects]
380 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
381 The can_fail and has_side_effects properties have the following effect
382 on program transformations. Summary table is followed by details.
383
384 can_fail has_side_effects
385 Discard YES NO
386 Float in YES YES
387 Float out NO NO
388 Duplicate YES NO
389
390 * Discarding. case (a `op` b) of _ -> rhs ===> rhs
391 You should not discard a has_side_effects primop; e.g.
392 case (writeIntArray# a i v s of (# _, _ #) -> True
393 Arguably you should be able to discard this, since the
394 returned stat token is not used, but that relies on NEVER
395 inlining unsafePerformIO, and programmers sometimes write
396 this kind of stuff by hand (#9390). So we (conservatively)
397 never discard a has_side_effects primop.
398
399 However, it's fine to discard a can_fail primop. For example
400 case (indexIntArray# a i) of _ -> True
401 We can discard indexIntArray#; it has can_fail, but not
402 has_side_effects; see #5658 which was all about this.
403 Notice that indexIntArray# is (in a more general handling of
404 effects) read effect, but we don't care about that here, and
405 treat read effects as *not* has_side_effects.
406
407 Similarly (a `/#` b) can be discarded. It can seg-fault or
408 cause a hardware exception, but not a synchronous Haskell
409 exception.
410
411
412
413 Synchronous Haskell exceptions, e.g. from raiseIO#, are treated
414 as has_side_effects and hence are not discarded.
415
416 * Float in. You can float a can_fail or has_side_effects primop
417 *inwards*, but not inside a lambda (see Duplication below).
418
419 * Float out. You must not float a can_fail primop *outwards* lest
420 you escape the dynamic scope of the test. Example:
421 case d ># 0# of
422 True -> case x /# d of r -> r +# 1
423 False -> 0
424 Here we must not float the case outwards to give
425 case x/# d of r ->
426 case d ># 0# of
427 True -> r +# 1
428 False -> 0
429
430 Nor can you float out a has_side_effects primop. For example:
431 if blah then case writeMutVar# v True s0 of (# s1 #) -> s1
432 else s0
433 Notice that s0 is mentioned in both branches of the 'if', but
434 only one of these two will actually be consumed. But if we
435 float out to
436 case writeMutVar# v True s0 of (# s1 #) ->
437 if blah then s1 else s0
438 the writeMutVar will be performed in both branches, which is
439 utterly wrong.
440
441 * Duplication. You cannot duplicate a has_side_effect primop. You
442 might wonder how this can occur given the state token threading, but
443 just look at Control.Monad.ST.Lazy.Imp.strictToLazy! We get
444 something like this
445 p = case readMutVar# s v of
446 (# s', r #) -> (S# s', r)
447 s' = case p of (s', r) -> s'
448 r = case p of (s', r) -> r
449
450 (All these bindings are boxed.) If we inline p at its two call
451 sites, we get a catastrophe: because the read is performed once when
452 s' is demanded, and once when 'r' is demanded, which may be much
453 later. Utterly wrong. #3207 is real example of this happening.
454
455 However, it's fine to duplicate a can_fail primop. That is really
456 the only difference between can_fail and has_side_effects.
457
458 Note [Implementation: how can_fail/has_side_effects affect transformations]
459 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
460 How do we ensure that that floating/duplication/discarding are done right
461 in the simplifier?
462
463 Two main predicates on primpops test these flags:
464 primOpOkForSideEffects <=> not has_side_effects
465 primOpOkForSpeculation <=> not (has_side_effects || can_fail)
466
467 * The "no-float-out" thing is achieved by ensuring that we never
468 let-bind a can_fail or has_side_effects primop. The RHS of a
469 let-binding (which can float in and out freely) satisfies
470 exprOkForSpeculation; this is the let/app invariant. And
471 exprOkForSpeculation is false of can_fail and has_side_effects.
472
473 * So can_fail and has_side_effects primops will appear only as the
474 scrutinees of cases, and that's why the FloatIn pass is capable
475 of floating case bindings inwards.
476
477 * The no-duplicate thing is done via primOpIsCheap, by making
478 has_side_effects things (very very very) not-cheap!
479 -}
480
481 primOpHasSideEffects :: PrimOp -> Bool
482 #include "primop-has-side-effects.hs-incl"
483
484 primOpCanFail :: PrimOp -> Bool
485 #include "primop-can-fail.hs-incl"
486
487 primOpOkForSpeculation :: PrimOp -> Bool
488 -- See Note [PrimOp can_fail and has_side_effects]
489 -- See comments with CoreUtils.exprOkForSpeculation
490 -- primOpOkForSpeculation => primOpOkForSideEffects
491 primOpOkForSpeculation op
492 = primOpOkForSideEffects op
493 && not (primOpOutOfLine op || primOpCanFail op)
494 -- I think the "out of line" test is because out of line things can
495 -- be expensive (eg sine, cosine), and so we may not want to speculate them
496
497 primOpOkForSideEffects :: PrimOp -> Bool
498 primOpOkForSideEffects op
499 = not (primOpHasSideEffects op)
500
501 {-
502 Note [primOpIsCheap]
503 ~~~~~~~~~~~~~~~~~~~~
504 @primOpIsCheap@, as used in \tr{SimplUtils.hs}. For now (HACK
505 WARNING), we just borrow some other predicates for a
506 what-should-be-good-enough test. "Cheap" means willing to call it more
507 than once, and/or push it inside a lambda. The latter could change the
508 behaviour of 'seq' for primops that can fail, so we don't treat them as cheap.
509 -}
510
511 primOpIsCheap :: PrimOp -> Bool
512 -- See Note [PrimOp can_fail and has_side_effects]
513 primOpIsCheap op = primOpOkForSpeculation op
514 -- In March 2001, we changed this to
515 -- primOpIsCheap op = False
516 -- thereby making *no* primops seem cheap. But this killed eta
517 -- expansion on case (x ==# y) of True -> \s -> ...
518 -- which is bad. In particular a loop like
519 -- doLoop n = loop 0
520 -- where
521 -- loop i | i == n = return ()
522 -- | otherwise = bar i >> loop (i+1)
523 -- allocated a closure every time round because it doesn't eta expand.
524 --
525 -- The problem that originally gave rise to the change was
526 -- let x = a +# b *# c in x +# x
527 -- were we don't want to inline x. But primopIsCheap doesn't control
528 -- that (it's exprIsDupable that does) so the problem doesn't occur
529 -- even if primOpIsCheap sometimes says 'True'.
530
531 {-
532 ************************************************************************
533 * *
534 PrimOp code size
535 * *
536 ************************************************************************
537
538 primOpCodeSize
539 ~~~~~~~~~~~~~~
540 Gives an indication of the code size of a primop, for the purposes of
541 calculating unfolding sizes; see CoreUnfold.sizeExpr.
542 -}
543
544 primOpCodeSize :: PrimOp -> Int
545 #include "primop-code-size.hs-incl"
546
547 primOpCodeSizeDefault :: Int
548 primOpCodeSizeDefault = 1
549 -- CoreUnfold.primOpSize already takes into account primOpOutOfLine
550 -- and adds some further costs for the args in that case.
551
552 primOpCodeSizeForeignCall :: Int
553 primOpCodeSizeForeignCall = 4
554
555 {-
556 ************************************************************************
557 * *
558 PrimOp types
559 * *
560 ************************************************************************
561 -}
562
563 primOpType :: PrimOp -> Type -- you may want to use primOpSig instead
564 primOpType op
565 = case primOpInfo op of
566 Dyadic _occ ty -> dyadic_fun_ty ty
567 Monadic _occ ty -> monadic_fun_ty ty
568 Compare _occ ty -> compare_fun_ty ty
569
570 GenPrimOp _occ tyvars arg_tys res_ty ->
571 mkSpecForAllTys tyvars (mkVisFunTys arg_tys res_ty)
572
573 primOpOcc :: PrimOp -> OccName
574 primOpOcc op = case primOpInfo op of
575 Dyadic occ _ -> occ
576 Monadic occ _ -> occ
577 Compare occ _ -> occ
578 GenPrimOp occ _ _ _ -> occ
579
580 {- Note [Primop wrappers]
581 ~~~~~~~~~~~~~~~~~~~~~~~~~
582 Previously hasNoBinding would claim that PrimOpIds didn't have a curried
583 function definition. This caused quite some trouble as we would be forced to
584 eta expand unsaturated primop applications very late in the Core pipeline. Not
585 only would this produce unnecessary thunks, but it would also result in nasty
586 inconsistencies in CAFfy-ness determinations (see #16846 and
587 Note [CAFfyness inconsistencies due to late eta expansion] in TidyPgm).
588
589 However, it was quite unnecessary for hasNoBinding to claim this; primops in
590 fact *do* have curried definitions which are found in GHC.PrimopWrappers, which
591 is auto-generated by utils/genprimops from prelude/primops.txt.pp. These wrappers
592 are standard Haskell functions mirroring the types of the primops they wrap.
593 For instance, in the case of plusInt# we would have:
594
595 module GHC.PrimopWrappers where
596 import GHC.Prim as P
597 plusInt# a b = P.plusInt# a b
598
599 We now take advantage of these curried definitions by letting hasNoBinding
600 claim that PrimOpIds have a curried definition and then rewrite any unsaturated
601 PrimOpId applications that we find during CoreToStg as applications of the
602 associated wrapper (e.g. `GHC.Prim.plusInt# 3#` will get rewritten to
603 `GHC.PrimopWrappers.plusInt# 3#`).` The Id of the wrapper for a primop can be
604 found using 'PrimOp.primOpWrapperId'.
605
606 Nota Bene: GHC.PrimopWrappers is needed *regardless*, because it's
607 used by GHCi, which does not implement primops direct at all.
608
609 -}
610
611 -- | Returns the 'Id' of the wrapper associated with the given 'PrimOp'.
612 -- See Note [Primop wrappers].
613 primOpWrapperId :: PrimOp -> Id
614 primOpWrapperId op = mkVanillaGlobalWithInfo name ty info
615 where
616 info = setCafInfo vanillaIdInfo NoCafRefs
617 name = mkExternalName uniq gHC_PRIMOPWRAPPERS (primOpOcc op) wiredInSrcSpan
618 uniq = mkPrimOpWrapperUnique (primOpTag op)
619 ty = primOpType op
620
621 isComparisonPrimOp :: PrimOp -> Bool
622 isComparisonPrimOp op = case primOpInfo op of
623 Compare {} -> True
624 _ -> False
625
626 -- primOpSig is like primOpType but gives the result split apart:
627 -- (type variables, argument types, result type)
628 -- It also gives arity, strictness info
629
630 primOpSig :: PrimOp -> ([TyVar], [Type], Type, Arity, StrictSig)
631 primOpSig op
632 = (tyvars, arg_tys, res_ty, arity, primOpStrictness op arity)
633 where
634 arity = length arg_tys
635 (tyvars, arg_tys, res_ty)
636 = case (primOpInfo op) of
637 Monadic _occ ty -> ([], [ty], ty )
638 Dyadic _occ ty -> ([], [ty,ty], ty )
639 Compare _occ ty -> ([], [ty,ty], intPrimTy)
640 GenPrimOp _occ tyvars arg_tys res_ty -> (tyvars, arg_tys, res_ty )
641
642 data PrimOpResultInfo
643 = ReturnsPrim PrimRep
644 | ReturnsAlg TyCon
645
646 -- Some PrimOps need not return a manifest primitive or algebraic value
647 -- (i.e. they might return a polymorphic value). These PrimOps *must*
648 -- be out of line, or the code generator won't work.
649
650 getPrimOpResultInfo :: PrimOp -> PrimOpResultInfo
651 getPrimOpResultInfo op
652 = case (primOpInfo op) of
653 Dyadic _ ty -> ReturnsPrim (typePrimRep1 ty)
654 Monadic _ ty -> ReturnsPrim (typePrimRep1 ty)
655 Compare _ _ -> ReturnsPrim (tyConPrimRep1 intPrimTyCon)
656 GenPrimOp _ _ _ ty | isPrimTyCon tc -> ReturnsPrim (tyConPrimRep1 tc)
657 | otherwise -> ReturnsAlg tc
658 where
659 tc = tyConAppTyCon ty
660 -- All primops return a tycon-app result
661 -- The tycon can be an unboxed tuple or sum, though,
662 -- which gives rise to a ReturnAlg
663
664 {-
665 We do not currently make use of whether primops are commutable.
666
667 We used to try to move constants to the right hand side for strength
668 reduction.
669 -}
670
671 {-
672 commutableOp :: PrimOp -> Bool
673 #include "primop-commutable.hs-incl"
674 -}
675
676 -- Utils:
677
678 dyadic_fun_ty, monadic_fun_ty, compare_fun_ty :: Type -> Type
679 dyadic_fun_ty ty = mkVisFunTys [ty, ty] ty
680 monadic_fun_ty ty = mkVisFunTy ty ty
681 compare_fun_ty ty = mkVisFunTys [ty, ty] intPrimTy
682
683 -- Output stuff:
684
685 pprPrimOp :: PrimOp -> SDoc
686 pprPrimOp other_op = pprOccName (primOpOcc other_op)
687
688 {-
689 ************************************************************************
690 * *
691 \subsubsection[PrimCall]{User-imported primitive calls}
692 * *
693 ************************************************************************
694 -}
695
696 data PrimCall = PrimCall CLabelString UnitId
697
698 instance Outputable PrimCall where
699 ppr (PrimCall lbl pkgId)
700 = text "__primcall" <+> ppr pkgId <+> ppr lbl