Remove old GUM/GranSim code
[ghc.git] / rts / RtsAPI.c
1 /* ----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 1998-2001
4 *
5 * API for invoking Haskell functions via the RTS
6 *
7 * --------------------------------------------------------------------------*/
8
9 #include "PosixSource.h"
10 #include "Rts.h"
11 #include "OSThreads.h"
12 #include "RtsAPI.h"
13 #include "SchedAPI.h"
14 #include "RtsFlags.h"
15 #include "RtsUtils.h"
16 #include "Prelude.h"
17 #include "Schedule.h"
18 #include "Capability.h"
19 #include "Stable.h"
20
21 #include <stdlib.h>
22
23 /* ----------------------------------------------------------------------------
24 Building Haskell objects from C datatypes.
25
26 TODO: Currently this code does not tag created pointers,
27 however it is not unsafe (the contructor code will do it)
28 just inefficient.
29 ------------------------------------------------------------------------- */
30 HaskellObj
31 rts_mkChar (Capability *cap, HsChar c)
32 {
33 StgClosure *p = (StgClosure *)allocateLocal(cap, CONSTR_sizeW(0,1));
34 SET_HDR(p, Czh_con_info, CCS_SYSTEM);
35 p->payload[0] = (StgClosure *)(StgWord)(StgChar)c;
36 return p;
37 }
38
39 HaskellObj
40 rts_mkInt (Capability *cap, HsInt i)
41 {
42 StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,1));
43 SET_HDR(p, Izh_con_info, CCS_SYSTEM);
44 p->payload[0] = (StgClosure *)(StgInt)i;
45 return p;
46 }
47
48 HaskellObj
49 rts_mkInt8 (Capability *cap, HsInt8 i)
50 {
51 StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,1));
52 SET_HDR(p, I8zh_con_info, CCS_SYSTEM);
53 /* Make sure we mask out the bits above the lowest 8 */
54 p->payload[0] = (StgClosure *)(StgInt)i;
55 return p;
56 }
57
58 HaskellObj
59 rts_mkInt16 (Capability *cap, HsInt16 i)
60 {
61 StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,1));
62 SET_HDR(p, I16zh_con_info, CCS_SYSTEM);
63 /* Make sure we mask out the relevant bits */
64 p->payload[0] = (StgClosure *)(StgInt)i;
65 return p;
66 }
67
68 HaskellObj
69 rts_mkInt32 (Capability *cap, HsInt32 i)
70 {
71 StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,1));
72 SET_HDR(p, I32zh_con_info, CCS_SYSTEM);
73 p->payload[0] = (StgClosure *)(StgInt)i;
74 return p;
75 }
76
77
78 #ifdef sparc_HOST_ARCH
79 /* The closures returned by allocateLocal are only guaranteed to be 32 bit
80 aligned, because that's the size of pointers. SPARC v9 can't do
81 misaligned loads/stores, so we have to write the 64bit word in chunks */
82
83 HaskellObj
84 rts_mkInt64 (Capability *cap, HsInt64 i_)
85 {
86 StgInt64 i = (StgInt64)i_;
87 StgInt32 *tmp;
88
89 StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,2));
90 SET_HDR(p, I64zh_con_info, CCS_SYSTEM);
91
92 tmp = (StgInt32*)&(p->payload[0]);
93
94 tmp[0] = (StgInt32)((StgInt64)i >> 32);
95 tmp[1] = (StgInt32)i; /* truncate high 32 bits */
96
97 return p;
98 }
99
100 #else
101
102 HaskellObj
103 rts_mkInt64 (Capability *cap, HsInt64 i)
104 {
105 llong *tmp;
106 StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,2));
107 SET_HDR(p, I64zh_con_info, CCS_SYSTEM);
108 tmp = (llong*)&(p->payload[0]);
109 *tmp = (StgInt64)i;
110 return p;
111 }
112
113 #endif /* sparc_HOST_ARCH */
114
115
116 HaskellObj
117 rts_mkWord (Capability *cap, HsWord i)
118 {
119 StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,1));
120 SET_HDR(p, Wzh_con_info, CCS_SYSTEM);
121 p->payload[0] = (StgClosure *)(StgWord)i;
122 return p;
123 }
124
125 HaskellObj
126 rts_mkWord8 (Capability *cap, HsWord8 w)
127 {
128 /* see rts_mkInt* comments */
129 StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,1));
130 SET_HDR(p, W8zh_con_info, CCS_SYSTEM);
131 p->payload[0] = (StgClosure *)(StgWord)(w & 0xff);
132 return p;
133 }
134
135 HaskellObj
136 rts_mkWord16 (Capability *cap, HsWord16 w)
137 {
138 /* see rts_mkInt* comments */
139 StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,1));
140 SET_HDR(p, W16zh_con_info, CCS_SYSTEM);
141 p->payload[0] = (StgClosure *)(StgWord)(w & 0xffff);
142 return p;
143 }
144
145 HaskellObj
146 rts_mkWord32 (Capability *cap, HsWord32 w)
147 {
148 /* see rts_mkInt* comments */
149 StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,1));
150 SET_HDR(p, W32zh_con_info, CCS_SYSTEM);
151 p->payload[0] = (StgClosure *)(StgWord)(w & 0xffffffff);
152 return p;
153 }
154
155
156 #ifdef sparc_HOST_ARCH
157 /* The closures returned by allocateLocal are only guaranteed to be 32 bit
158 aligned, because that's the size of pointers. SPARC v9 can't do
159 misaligned loads/stores, so we have to write the 64bit word in chunks */
160
161 HaskellObj
162 rts_mkWord64 (Capability *cap, HsWord64 w_)
163 {
164 StgWord64 w = (StgWord64)w_;
165 StgWord32 *tmp;
166
167 StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,2));
168 /* see mk_Int8 comment */
169 SET_HDR(p, W64zh_con_info, CCS_SYSTEM);
170
171 tmp = (StgWord32*)&(p->payload[0]);
172
173 tmp[0] = (StgWord32)((StgWord64)w >> 32);
174 tmp[1] = (StgWord32)w; /* truncate high 32 bits */
175 return p;
176 }
177
178 #else
179
180 HaskellObj
181 rts_mkWord64 (Capability *cap, HsWord64 w)
182 {
183 ullong *tmp;
184
185 StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,2));
186 /* see mk_Int8 comment */
187 SET_HDR(p, W64zh_con_info, CCS_SYSTEM);
188 tmp = (ullong*)&(p->payload[0]);
189 *tmp = (StgWord64)w;
190 return p;
191 }
192
193 #endif
194
195
196 HaskellObj
197 rts_mkFloat (Capability *cap, HsFloat f)
198 {
199 StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,1));
200 SET_HDR(p, Fzh_con_info, CCS_SYSTEM);
201 ASSIGN_FLT((P_)p->payload, (StgFloat)f);
202 return p;
203 }
204
205 HaskellObj
206 rts_mkDouble (Capability *cap, HsDouble d)
207 {
208 StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,sizeofW(StgDouble)));
209 SET_HDR(p, Dzh_con_info, CCS_SYSTEM);
210 ASSIGN_DBL((P_)p->payload, (StgDouble)d);
211 return p;
212 }
213
214 HaskellObj
215 rts_mkStablePtr (Capability *cap, HsStablePtr s)
216 {
217 StgClosure *p = (StgClosure *)allocateLocal(cap,sizeofW(StgHeader)+1);
218 SET_HDR(p, StablePtr_con_info, CCS_SYSTEM);
219 p->payload[0] = (StgClosure *)s;
220 return p;
221 }
222
223 HaskellObj
224 rts_mkPtr (Capability *cap, HsPtr a)
225 {
226 StgClosure *p = (StgClosure *)allocateLocal(cap,sizeofW(StgHeader)+1);
227 SET_HDR(p, Ptr_con_info, CCS_SYSTEM);
228 p->payload[0] = (StgClosure *)a;
229 return p;
230 }
231
232 HaskellObj
233 rts_mkFunPtr (Capability *cap, HsFunPtr a)
234 {
235 StgClosure *p = (StgClosure *)allocateLocal(cap,sizeofW(StgHeader)+1);
236 SET_HDR(p, FunPtr_con_info, CCS_SYSTEM);
237 p->payload[0] = (StgClosure *)a;
238 return p;
239 }
240
241 HaskellObj
242 rts_mkBool (Capability *cap STG_UNUSED, HsBool b)
243 {
244 if (b) {
245 return (StgClosure *)True_closure;
246 } else {
247 return (StgClosure *)False_closure;
248 }
249 }
250
251 HaskellObj
252 rts_mkString (Capability *cap, char *s)
253 {
254 return rts_apply(cap, (StgClosure *)unpackCString_closure, rts_mkPtr(cap,s));
255 }
256
257 HaskellObj
258 rts_apply (Capability *cap, HaskellObj f, HaskellObj arg)
259 {
260 StgThunk *ap;
261
262 ap = (StgThunk *)allocateLocal(cap,sizeofW(StgThunk) + 2);
263 SET_HDR(ap, (StgInfoTable *)&stg_ap_2_upd_info, CCS_SYSTEM);
264 ap->payload[0] = f;
265 ap->payload[1] = arg;
266 return (StgClosure *)ap;
267 }
268
269 /* ----------------------------------------------------------------------------
270 Deconstructing Haskell objects
271
272 We would like to assert that we have the right kind of object in
273 each case, but this is problematic because in GHCi the info table
274 for the D# constructor (say) might be dynamically loaded. Hence we
275 omit these assertions for now.
276 ------------------------------------------------------------------------- */
277
278 HsChar
279 rts_getChar (HaskellObj p)
280 {
281 // See comment above:
282 // ASSERT(p->header.info == Czh_con_info ||
283 // p->header.info == Czh_static_info);
284 return (StgChar)(StgWord)(UNTAG_CLOSURE(p)->payload[0]);
285 }
286
287 HsInt
288 rts_getInt (HaskellObj p)
289 {
290 // See comment above:
291 // ASSERT(p->header.info == Izh_con_info ||
292 // p->header.info == Izh_static_info);
293 return (HsInt)(UNTAG_CLOSURE(p)->payload[0]);
294 }
295
296 HsInt8
297 rts_getInt8 (HaskellObj p)
298 {
299 // See comment above:
300 // ASSERT(p->header.info == I8zh_con_info ||
301 // p->header.info == I8zh_static_info);
302 return (HsInt8)(HsInt)(UNTAG_CLOSURE(p)->payload[0]);
303 }
304
305 HsInt16
306 rts_getInt16 (HaskellObj p)
307 {
308 // See comment above:
309 // ASSERT(p->header.info == I16zh_con_info ||
310 // p->header.info == I16zh_static_info);
311 return (HsInt16)(HsInt)(UNTAG_CLOSURE(p)->payload[0]);
312 }
313
314 HsInt32
315 rts_getInt32 (HaskellObj p)
316 {
317 // See comment above:
318 // ASSERT(p->header.info == I32zh_con_info ||
319 // p->header.info == I32zh_static_info);
320 return (HsInt32)(HsInt)(UNTAG_CLOSURE(p)->payload[0]);
321 }
322
323
324 #ifdef sparc_HOST_ARCH
325 /* The closures returned by allocateLocal are only guaranteed to be 32 bit
326 aligned, because that's the size of pointers. SPARC v9 can't do
327 misaligned loads/stores, so we have to read the 64bit word in chunks */
328
329 HsInt64
330 rts_getInt64 (HaskellObj p)
331 {
332 HsInt32* tmp;
333 // See comment above:
334 // ASSERT(p->header.info == I64zh_con_info ||
335 // p->header.info == I64zh_static_info);
336 tmp = (HsInt32*)&(UNTAG_CLOSURE(p)->payload[0]);
337
338 HsInt64 i = (HsInt64)((HsInt64)(tmp[0]) << 32) | (HsInt64)tmp[1];
339 return i;
340 }
341
342 #else
343
344 HsInt64
345 rts_getInt64 (HaskellObj p)
346 {
347 HsInt64* tmp;
348 // See comment above:
349 // ASSERT(p->header.info == I64zh_con_info ||
350 // p->header.info == I64zh_static_info);
351 tmp = (HsInt64*)&(UNTAG_CLOSURE(p)->payload[0]);
352 return *tmp;
353 }
354
355 #endif /* sparc_HOST_ARCH */
356
357
358 HsWord
359 rts_getWord (HaskellObj p)
360 {
361 // See comment above:
362 // ASSERT(p->header.info == Wzh_con_info ||
363 // p->header.info == Wzh_static_info);
364 return (HsWord)(UNTAG_CLOSURE(p)->payload[0]);
365 }
366
367 HsWord8
368 rts_getWord8 (HaskellObj p)
369 {
370 // See comment above:
371 // ASSERT(p->header.info == W8zh_con_info ||
372 // p->header.info == W8zh_static_info);
373 return (HsWord8)(HsWord)(UNTAG_CLOSURE(p)->payload[0]);
374 }
375
376 HsWord16
377 rts_getWord16 (HaskellObj p)
378 {
379 // See comment above:
380 // ASSERT(p->header.info == W16zh_con_info ||
381 // p->header.info == W16zh_static_info);
382 return (HsWord16)(HsWord)(UNTAG_CLOSURE(p)->payload[0]);
383 }
384
385 HsWord32
386 rts_getWord32 (HaskellObj p)
387 {
388 // See comment above:
389 // ASSERT(p->header.info == W32zh_con_info ||
390 // p->header.info == W32zh_static_info);
391 return (HsWord32)(HsWord)(UNTAG_CLOSURE(p)->payload[0]);
392 }
393
394
395 #ifdef sparc_HOST_ARCH
396 /* The closures returned by allocateLocal are only guaranteed to be 32 bit
397 aligned, because that's the size of pointers. SPARC v9 can't do
398 misaligned loads/stores, so we have to write the 64bit word in chunks */
399
400 HsWord64
401 rts_getWord64 (HaskellObj p)
402 {
403 HsInt32* tmp;
404 // See comment above:
405 // ASSERT(p->header.info == I64zh_con_info ||
406 // p->header.info == I64zh_static_info);
407 tmp = (HsInt32*)&(UNTAG_CLOSURE(p)->payload[0]);
408
409 HsInt64 i = (HsWord64)((HsWord64)(tmp[0]) << 32) | (HsWord64)tmp[1];
410 return i;
411 }
412
413 #else
414
415 HsWord64
416 rts_getWord64 (HaskellObj p)
417 {
418 HsWord64* tmp;
419 // See comment above:
420 // ASSERT(p->header.info == W64zh_con_info ||
421 // p->header.info == W64zh_static_info);
422 tmp = (HsWord64*)&(UNTAG_CLOSURE(p)->payload[0]);
423 return *tmp;
424 }
425
426 #endif
427
428
429 HsFloat
430 rts_getFloat (HaskellObj p)
431 {
432 // See comment above:
433 // ASSERT(p->header.info == Fzh_con_info ||
434 // p->header.info == Fzh_static_info);
435 return (float)(PK_FLT((P_)UNTAG_CLOSURE(p)->payload));
436 }
437
438 HsDouble
439 rts_getDouble (HaskellObj p)
440 {
441 // See comment above:
442 // ASSERT(p->header.info == Dzh_con_info ||
443 // p->header.info == Dzh_static_info);
444 return (double)(PK_DBL((P_)UNTAG_CLOSURE(p)->payload));
445 }
446
447 HsStablePtr
448 rts_getStablePtr (HaskellObj p)
449 {
450 // See comment above:
451 // ASSERT(p->header.info == StablePtr_con_info ||
452 // p->header.info == StablePtr_static_info);
453 return (StgStablePtr)(UNTAG_CLOSURE(p)->payload[0]);
454 }
455
456 HsPtr
457 rts_getPtr (HaskellObj p)
458 {
459 // See comment above:
460 // ASSERT(p->header.info == Ptr_con_info ||
461 // p->header.info == Ptr_static_info);
462 return (Capability *)(UNTAG_CLOSURE(p)->payload[0]);
463 }
464
465 HsFunPtr
466 rts_getFunPtr (HaskellObj p)
467 {
468 // See comment above:
469 // ASSERT(p->header.info == FunPtr_con_info ||
470 // p->header.info == FunPtr_static_info);
471 return (void *)(UNTAG_CLOSURE(p)->payload[0]);
472 }
473
474 HsBool
475 rts_getBool (HaskellObj p)
476 {
477 StgInfoTable *info;
478
479 info = get_itbl((StgClosure *)UNTAG_CLOSURE(p));
480 if (info->srt_bitmap == 0) { // srt_bitmap is the constructor tag
481 return 0;
482 } else {
483 return 1;
484 }
485 }
486
487 /* -----------------------------------------------------------------------------
488 Creating threads
489 -------------------------------------------------------------------------- */
490
491 INLINE_HEADER void pushClosure (StgTSO *tso, StgWord c) {
492 tso->sp--;
493 tso->sp[0] = (W_) c;
494 }
495
496 StgTSO *
497 createGenThread (Capability *cap, nat stack_size, StgClosure *closure)
498 {
499 StgTSO *t;
500 t = createThread (cap, stack_size);
501 pushClosure(t, (W_)closure);
502 pushClosure(t, (W_)&stg_enter_info);
503 return t;
504 }
505
506 StgTSO *
507 createIOThread (Capability *cap, nat stack_size, StgClosure *closure)
508 {
509 StgTSO *t;
510 t = createThread (cap, stack_size);
511 pushClosure(t, (W_)&stg_noforceIO_info);
512 pushClosure(t, (W_)&stg_ap_v_info);
513 pushClosure(t, (W_)closure);
514 pushClosure(t, (W_)&stg_enter_info);
515 return t;
516 }
517
518 /*
519 * Same as above, but also evaluate the result of the IO action
520 * to whnf while we're at it.
521 */
522
523 StgTSO *
524 createStrictIOThread(Capability *cap, nat stack_size, StgClosure *closure)
525 {
526 StgTSO *t;
527 t = createThread(cap, stack_size);
528 pushClosure(t, (W_)&stg_forceIO_info);
529 pushClosure(t, (W_)&stg_ap_v_info);
530 pushClosure(t, (W_)closure);
531 pushClosure(t, (W_)&stg_enter_info);
532 return t;
533 }
534
535 /* ----------------------------------------------------------------------------
536 Evaluating Haskell expressions
537 ------------------------------------------------------------------------- */
538
539 Capability *
540 rts_eval (Capability *cap, HaskellObj p, /*out*/HaskellObj *ret)
541 {
542 StgTSO *tso;
543
544 tso = createGenThread(cap, RtsFlags.GcFlags.initialStkSize, p);
545 return scheduleWaitThread(tso,ret,cap);
546 }
547
548 Capability *
549 rts_eval_ (Capability *cap, HaskellObj p, unsigned int stack_size,
550 /*out*/HaskellObj *ret)
551 {
552 StgTSO *tso;
553
554 tso = createGenThread(cap, stack_size, p);
555 return scheduleWaitThread(tso,ret,cap);
556 }
557
558 /*
559 * rts_evalIO() evaluates a value of the form (IO a), forcing the action's
560 * result to WHNF before returning.
561 */
562 Capability *
563 rts_evalIO (Capability *cap, HaskellObj p, /*out*/HaskellObj *ret)
564 {
565 StgTSO* tso;
566
567 tso = createStrictIOThread(cap, RtsFlags.GcFlags.initialStkSize, p);
568 return scheduleWaitThread(tso,ret,cap);
569 }
570
571 /*
572 * rts_evalStableIO() is suitable for calling from Haskell. It
573 * evaluates a value of the form (StablePtr (IO a)), forcing the
574 * action's result to WHNF before returning. The result is returned
575 * in a StablePtr.
576 */
577 Capability *
578 rts_evalStableIO (Capability *cap, HsStablePtr s, /*out*/HsStablePtr *ret)
579 {
580 StgTSO* tso;
581 StgClosure *p, *r;
582 SchedulerStatus stat;
583
584 p = (StgClosure *)deRefStablePtr(s);
585 tso = createStrictIOThread(cap, RtsFlags.GcFlags.initialStkSize, p);
586 // async exceptions are always blocked by default in the created
587 // thread. See #1048.
588 tso->flags |= TSO_BLOCKEX | TSO_INTERRUPTIBLE;
589 cap = scheduleWaitThread(tso,&r,cap);
590 stat = rts_getSchedStatus(cap);
591
592 if (stat == Success && ret != NULL) {
593 ASSERT(r != NULL);
594 *ret = getStablePtr((StgPtr)r);
595 }
596
597 return cap;
598 }
599
600 /*
601 * Like rts_evalIO(), but doesn't force the action's result.
602 */
603 Capability *
604 rts_evalLazyIO (Capability *cap, HaskellObj p, /*out*/HaskellObj *ret)
605 {
606 StgTSO *tso;
607
608 tso = createIOThread(cap, RtsFlags.GcFlags.initialStkSize, p);
609 return scheduleWaitThread(tso,ret,cap);
610 }
611
612 Capability *
613 rts_evalLazyIO_ (Capability *cap, HaskellObj p, unsigned int stack_size,
614 /*out*/HaskellObj *ret)
615 {
616 StgTSO *tso;
617
618 tso = createIOThread(cap, stack_size, p);
619 return scheduleWaitThread(tso,ret,cap);
620 }
621
622 /* Convenience function for decoding the returned status. */
623
624 void
625 rts_checkSchedStatus (char* site, Capability *cap)
626 {
627 SchedulerStatus rc = cap->running_task->stat;
628 switch (rc) {
629 case Success:
630 return;
631 case Killed:
632 errorBelch("%s: uncaught exception",site);
633 stg_exit(EXIT_FAILURE);
634 case Interrupted:
635 errorBelch("%s: interrupted", site);
636 stg_exit(EXIT_FAILURE);
637 default:
638 errorBelch("%s: Return code (%d) not ok",(site),(rc));
639 stg_exit(EXIT_FAILURE);
640 }
641 }
642
643 SchedulerStatus
644 rts_getSchedStatus (Capability *cap)
645 {
646 return cap->running_task->stat;
647 }
648
649 Capability *
650 rts_lock (void)
651 {
652 Capability *cap;
653 Task *task;
654
655 task = newBoundTask();
656
657 cap = NULL;
658 waitForReturnCapability(&cap, task);
659 return (Capability *)cap;
660 }
661
662 // Exiting the RTS: we hold a Capability that is not necessarily the
663 // same one that was originally returned by rts_lock(), because
664 // rts_evalIO() etc. may return a new one. Now that we have
665 // investigated the return value, we can release the Capability,
666 // and free the Task (in that order).
667
668 void
669 rts_unlock (Capability *cap)
670 {
671 Task *task;
672
673 task = cap->running_task;
674 ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
675
676 // Now release the Capability. With the capability released, GC
677 // may happen. NB. does not try to put the current Task on the
678 // worker queue.
679 // NB. keep cap->lock held while we call boundTaskExiting(). This
680 // is necessary during shutdown, where we want the invariant that
681 // after shutdownCapability(), all the Tasks associated with the
682 // Capability have completed their shutdown too. Otherwise we
683 // could have boundTaskExiting()/workerTaskStop() running at some
684 // random point in the future, which causes problems for
685 // freeTaskManager().
686 ACQUIRE_LOCK(&cap->lock);
687 releaseCapability_(cap,rtsFalse);
688
689 // Finally, we can release the Task to the free list.
690 boundTaskExiting(task);
691 RELEASE_LOCK(&cap->lock);
692 }