SPARC NCG: Add a SPARC version of rts_mkInt64 that handles misaligned closure payloads.
[ghc.git] / rts / RtsAPI.c
1 /* ----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 1998-2001
4 *
5 * API for invoking Haskell functions via the RTS
6 *
7 * --------------------------------------------------------------------------*/
8
9 #include "PosixSource.h"
10 #include "Rts.h"
11 #include "OSThreads.h"
12 #include "RtsAPI.h"
13 #include "SchedAPI.h"
14 #include "RtsFlags.h"
15 #include "RtsUtils.h"
16 #include "Prelude.h"
17 #include "Schedule.h"
18 #include "Capability.h"
19 #include "Stable.h"
20
21 #include <stdlib.h>
22
23 /* ----------------------------------------------------------------------------
24 Building Haskell objects from C datatypes.
25
26 TODO: Currently this code does not tag created pointers,
27 however it is not unsafe (the contructor code will do it)
28 just inefficient.
29 ------------------------------------------------------------------------- */
30 HaskellObj
31 rts_mkChar (Capability *cap, HsChar c)
32 {
33 StgClosure *p = (StgClosure *)allocateLocal(cap, CONSTR_sizeW(0,1));
34 SET_HDR(p, Czh_con_info, CCS_SYSTEM);
35 p->payload[0] = (StgClosure *)(StgWord)(StgChar)c;
36 return p;
37 }
38
39 HaskellObj
40 rts_mkInt (Capability *cap, HsInt i)
41 {
42 StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,1));
43 SET_HDR(p, Izh_con_info, CCS_SYSTEM);
44 p->payload[0] = (StgClosure *)(StgInt)i;
45 return p;
46 }
47
48 HaskellObj
49 rts_mkInt8 (Capability *cap, HsInt8 i)
50 {
51 StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,1));
52 SET_HDR(p, I8zh_con_info, CCS_SYSTEM);
53 /* Make sure we mask out the bits above the lowest 8 */
54 p->payload[0] = (StgClosure *)(StgInt)i;
55 return p;
56 }
57
58 HaskellObj
59 rts_mkInt16 (Capability *cap, HsInt16 i)
60 {
61 StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,1));
62 SET_HDR(p, I16zh_con_info, CCS_SYSTEM);
63 /* Make sure we mask out the relevant bits */
64 p->payload[0] = (StgClosure *)(StgInt)i;
65 return p;
66 }
67
68 HaskellObj
69 rts_mkInt32 (Capability *cap, HsInt32 i)
70 {
71 StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,1));
72 SET_HDR(p, I32zh_con_info, CCS_SYSTEM);
73 p->payload[0] = (StgClosure *)(StgInt)i;
74 return p;
75 }
76
77
78 #ifdef sparc_HOST_ARCH
79 /* The closures returned by allocateLocal are only guaranteed to be 32 bit
80 aligned, because that's the size of pointers. SPARC v9 can't do
81 misaligned loads/stores, so we have to write the 64bit word in chunks */
82
83 HaskellObj
84 rts_mkInt64 (Capability *cap, HsInt64 i_)
85 {
86 StgInt64 i = (StgInt64)i_;
87 StgInt32 *tmp;
88
89 StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,2));
90 SET_HDR(p, I64zh_con_info, CCS_SYSTEM);
91
92 tmp = (StgInt32*)&(p->payload[0]);
93
94 tmp[0] = (StgInt32)((StgInt64)i >> 32);
95 tmp[1] = (StgInt32)i; /* truncate high 32 bits */
96
97 return p;
98 }
99
100 #else
101
102 HaskellObj
103 rts_mkInt64 (Capability *cap, HsInt64 i)
104 {
105 llong *tmp;
106 StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,2));
107 SET_HDR(p, I64zh_con_info, CCS_SYSTEM);
108 tmp = (llong*)&(p->payload[0]);
109 *tmp = (StgInt64)i;
110 return p;
111 }
112
113 #endif /* sparc_HOST_ARCH */
114
115
116 HaskellObj
117 rts_mkWord (Capability *cap, HsWord i)
118 {
119 StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,1));
120 SET_HDR(p, Wzh_con_info, CCS_SYSTEM);
121 p->payload[0] = (StgClosure *)(StgWord)i;
122 return p;
123 }
124
125 HaskellObj
126 rts_mkWord8 (Capability *cap, HsWord8 w)
127 {
128 /* see rts_mkInt* comments */
129 StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,1));
130 SET_HDR(p, W8zh_con_info, CCS_SYSTEM);
131 p->payload[0] = (StgClosure *)(StgWord)(w & 0xff);
132 return p;
133 }
134
135 HaskellObj
136 rts_mkWord16 (Capability *cap, HsWord16 w)
137 {
138 /* see rts_mkInt* comments */
139 StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,1));
140 SET_HDR(p, W16zh_con_info, CCS_SYSTEM);
141 p->payload[0] = (StgClosure *)(StgWord)(w & 0xffff);
142 return p;
143 }
144
145 HaskellObj
146 rts_mkWord32 (Capability *cap, HsWord32 w)
147 {
148 /* see rts_mkInt* comments */
149 StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,1));
150 SET_HDR(p, W32zh_con_info, CCS_SYSTEM);
151 p->payload[0] = (StgClosure *)(StgWord)(w & 0xffffffff);
152 return p;
153 }
154
155 HaskellObj
156 rts_mkWord64 (Capability *cap, HsWord64 w)
157 {
158 ullong *tmp;
159
160 StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,2));
161 /* see mk_Int8 comment */
162 SET_HDR(p, W64zh_con_info, CCS_SYSTEM);
163 tmp = (ullong*)&(p->payload[0]);
164 *tmp = (StgWord64)w;
165 return p;
166 }
167
168 HaskellObj
169 rts_mkFloat (Capability *cap, HsFloat f)
170 {
171 StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,1));
172 SET_HDR(p, Fzh_con_info, CCS_SYSTEM);
173 ASSIGN_FLT((P_)p->payload, (StgFloat)f);
174 return p;
175 }
176
177 HaskellObj
178 rts_mkDouble (Capability *cap, HsDouble d)
179 {
180 StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,sizeofW(StgDouble)));
181 SET_HDR(p, Dzh_con_info, CCS_SYSTEM);
182 ASSIGN_DBL((P_)p->payload, (StgDouble)d);
183 return p;
184 }
185
186 HaskellObj
187 rts_mkStablePtr (Capability *cap, HsStablePtr s)
188 {
189 StgClosure *p = (StgClosure *)allocateLocal(cap,sizeofW(StgHeader)+1);
190 SET_HDR(p, StablePtr_con_info, CCS_SYSTEM);
191 p->payload[0] = (StgClosure *)s;
192 return p;
193 }
194
195 HaskellObj
196 rts_mkPtr (Capability *cap, HsPtr a)
197 {
198 StgClosure *p = (StgClosure *)allocateLocal(cap,sizeofW(StgHeader)+1);
199 SET_HDR(p, Ptr_con_info, CCS_SYSTEM);
200 p->payload[0] = (StgClosure *)a;
201 return p;
202 }
203
204 HaskellObj
205 rts_mkFunPtr (Capability *cap, HsFunPtr a)
206 {
207 StgClosure *p = (StgClosure *)allocateLocal(cap,sizeofW(StgHeader)+1);
208 SET_HDR(p, FunPtr_con_info, CCS_SYSTEM);
209 p->payload[0] = (StgClosure *)a;
210 return p;
211 }
212
213 HaskellObj
214 rts_mkBool (Capability *cap STG_UNUSED, HsBool b)
215 {
216 if (b) {
217 return (StgClosure *)True_closure;
218 } else {
219 return (StgClosure *)False_closure;
220 }
221 }
222
223 HaskellObj
224 rts_mkString (Capability *cap, char *s)
225 {
226 return rts_apply(cap, (StgClosure *)unpackCString_closure, rts_mkPtr(cap,s));
227 }
228
229 HaskellObj
230 rts_apply (Capability *cap, HaskellObj f, HaskellObj arg)
231 {
232 StgThunk *ap;
233
234 ap = (StgThunk *)allocateLocal(cap,sizeofW(StgThunk) + 2);
235 SET_HDR(ap, (StgInfoTable *)&stg_ap_2_upd_info, CCS_SYSTEM);
236 ap->payload[0] = f;
237 ap->payload[1] = arg;
238 return (StgClosure *)ap;
239 }
240
241 /* ----------------------------------------------------------------------------
242 Deconstructing Haskell objects
243
244 We would like to assert that we have the right kind of object in
245 each case, but this is problematic because in GHCi the info table
246 for the D# constructor (say) might be dynamically loaded. Hence we
247 omit these assertions for now.
248 ------------------------------------------------------------------------- */
249
250 HsChar
251 rts_getChar (HaskellObj p)
252 {
253 // See comment above:
254 // ASSERT(p->header.info == Czh_con_info ||
255 // p->header.info == Czh_static_info);
256 return (StgChar)(StgWord)(UNTAG_CLOSURE(p)->payload[0]);
257 }
258
259 HsInt
260 rts_getInt (HaskellObj p)
261 {
262 // See comment above:
263 // ASSERT(p->header.info == Izh_con_info ||
264 // p->header.info == Izh_static_info);
265 return (HsInt)(UNTAG_CLOSURE(p)->payload[0]);
266 }
267
268 HsInt8
269 rts_getInt8 (HaskellObj p)
270 {
271 // See comment above:
272 // ASSERT(p->header.info == I8zh_con_info ||
273 // p->header.info == I8zh_static_info);
274 return (HsInt8)(HsInt)(UNTAG_CLOSURE(p)->payload[0]);
275 }
276
277 HsInt16
278 rts_getInt16 (HaskellObj p)
279 {
280 // See comment above:
281 // ASSERT(p->header.info == I16zh_con_info ||
282 // p->header.info == I16zh_static_info);
283 return (HsInt16)(HsInt)(UNTAG_CLOSURE(p)->payload[0]);
284 }
285
286 HsInt32
287 rts_getInt32 (HaskellObj p)
288 {
289 // See comment above:
290 // ASSERT(p->header.info == I32zh_con_info ||
291 // p->header.info == I32zh_static_info);
292 return (HsInt32)(HsInt)(UNTAG_CLOSURE(p)->payload[0]);
293 }
294
295 HsInt64
296 rts_getInt64 (HaskellObj p)
297 {
298 HsInt64* tmp;
299 // See comment above:
300 // ASSERT(p->header.info == I64zh_con_info ||
301 // p->header.info == I64zh_static_info);
302 tmp = (HsInt64*)&(UNTAG_CLOSURE(p)->payload[0]);
303 return *tmp;
304 }
305 HsWord
306 rts_getWord (HaskellObj p)
307 {
308 // See comment above:
309 // ASSERT(p->header.info == Wzh_con_info ||
310 // p->header.info == Wzh_static_info);
311 return (HsWord)(UNTAG_CLOSURE(p)->payload[0]);
312 }
313
314 HsWord8
315 rts_getWord8 (HaskellObj p)
316 {
317 // See comment above:
318 // ASSERT(p->header.info == W8zh_con_info ||
319 // p->header.info == W8zh_static_info);
320 return (HsWord8)(HsWord)(UNTAG_CLOSURE(p)->payload[0]);
321 }
322
323 HsWord16
324 rts_getWord16 (HaskellObj p)
325 {
326 // See comment above:
327 // ASSERT(p->header.info == W16zh_con_info ||
328 // p->header.info == W16zh_static_info);
329 return (HsWord16)(HsWord)(UNTAG_CLOSURE(p)->payload[0]);
330 }
331
332 HsWord32
333 rts_getWord32 (HaskellObj p)
334 {
335 // See comment above:
336 // ASSERT(p->header.info == W32zh_con_info ||
337 // p->header.info == W32zh_static_info);
338 return (HsWord32)(HsWord)(UNTAG_CLOSURE(p)->payload[0]);
339 }
340
341
342 HsWord64
343 rts_getWord64 (HaskellObj p)
344 {
345 HsWord64* tmp;
346 // See comment above:
347 // ASSERT(p->header.info == W64zh_con_info ||
348 // p->header.info == W64zh_static_info);
349 tmp = (HsWord64*)&(UNTAG_CLOSURE(p)->payload[0]);
350 return *tmp;
351 }
352
353 HsFloat
354 rts_getFloat (HaskellObj p)
355 {
356 // See comment above:
357 // ASSERT(p->header.info == Fzh_con_info ||
358 // p->header.info == Fzh_static_info);
359 return (float)(PK_FLT((P_)UNTAG_CLOSURE(p)->payload));
360 }
361
362 HsDouble
363 rts_getDouble (HaskellObj p)
364 {
365 // See comment above:
366 // ASSERT(p->header.info == Dzh_con_info ||
367 // p->header.info == Dzh_static_info);
368 return (double)(PK_DBL((P_)UNTAG_CLOSURE(p)->payload));
369 }
370
371 HsStablePtr
372 rts_getStablePtr (HaskellObj p)
373 {
374 // See comment above:
375 // ASSERT(p->header.info == StablePtr_con_info ||
376 // p->header.info == StablePtr_static_info);
377 return (StgStablePtr)(UNTAG_CLOSURE(p)->payload[0]);
378 }
379
380 HsPtr
381 rts_getPtr (HaskellObj p)
382 {
383 // See comment above:
384 // ASSERT(p->header.info == Ptr_con_info ||
385 // p->header.info == Ptr_static_info);
386 return (Capability *)(UNTAG_CLOSURE(p)->payload[0]);
387 }
388
389 HsFunPtr
390 rts_getFunPtr (HaskellObj p)
391 {
392 // See comment above:
393 // ASSERT(p->header.info == FunPtr_con_info ||
394 // p->header.info == FunPtr_static_info);
395 return (void *)(UNTAG_CLOSURE(p)->payload[0]);
396 }
397
398 HsBool
399 rts_getBool (HaskellObj p)
400 {
401 StgInfoTable *info;
402
403 info = get_itbl((StgClosure *)UNTAG_CLOSURE(p));
404 if (info->srt_bitmap == 0) { // srt_bitmap is the constructor tag
405 return 0;
406 } else {
407 return 1;
408 }
409 }
410
411 /* -----------------------------------------------------------------------------
412 Creating threads
413 -------------------------------------------------------------------------- */
414
415 INLINE_HEADER void pushClosure (StgTSO *tso, StgWord c) {
416 tso->sp--;
417 tso->sp[0] = (W_) c;
418 }
419
420 StgTSO *
421 createGenThread (Capability *cap, nat stack_size, StgClosure *closure)
422 {
423 StgTSO *t;
424 #if defined(GRAN)
425 t = createThread (cap, stack_size, NO_PRI);
426 #else
427 t = createThread (cap, stack_size);
428 #endif
429 pushClosure(t, (W_)closure);
430 pushClosure(t, (W_)&stg_enter_info);
431 return t;
432 }
433
434 StgTSO *
435 createIOThread (Capability *cap, nat stack_size, StgClosure *closure)
436 {
437 StgTSO *t;
438 #if defined(GRAN)
439 t = createThread (cap, stack_size, NO_PRI);
440 #else
441 t = createThread (cap, stack_size);
442 #endif
443 pushClosure(t, (W_)&stg_noforceIO_info);
444 pushClosure(t, (W_)&stg_ap_v_info);
445 pushClosure(t, (W_)closure);
446 pushClosure(t, (W_)&stg_enter_info);
447 return t;
448 }
449
450 /*
451 * Same as above, but also evaluate the result of the IO action
452 * to whnf while we're at it.
453 */
454
455 StgTSO *
456 createStrictIOThread(Capability *cap, nat stack_size, StgClosure *closure)
457 {
458 StgTSO *t;
459 #if defined(GRAN)
460 t = createThread(cap, stack_size, NO_PRI);
461 #else
462 t = createThread(cap, stack_size);
463 #endif
464 pushClosure(t, (W_)&stg_forceIO_info);
465 pushClosure(t, (W_)&stg_ap_v_info);
466 pushClosure(t, (W_)closure);
467 pushClosure(t, (W_)&stg_enter_info);
468 return t;
469 }
470
471 /* ----------------------------------------------------------------------------
472 Evaluating Haskell expressions
473 ------------------------------------------------------------------------- */
474
475 Capability *
476 rts_eval (Capability *cap, HaskellObj p, /*out*/HaskellObj *ret)
477 {
478 StgTSO *tso;
479
480 tso = createGenThread(cap, RtsFlags.GcFlags.initialStkSize, p);
481 return scheduleWaitThread(tso,ret,cap);
482 }
483
484 Capability *
485 rts_eval_ (Capability *cap, HaskellObj p, unsigned int stack_size,
486 /*out*/HaskellObj *ret)
487 {
488 StgTSO *tso;
489
490 tso = createGenThread(cap, stack_size, p);
491 return scheduleWaitThread(tso,ret,cap);
492 }
493
494 /*
495 * rts_evalIO() evaluates a value of the form (IO a), forcing the action's
496 * result to WHNF before returning.
497 */
498 Capability *
499 rts_evalIO (Capability *cap, HaskellObj p, /*out*/HaskellObj *ret)
500 {
501 StgTSO* tso;
502
503 tso = createStrictIOThread(cap, RtsFlags.GcFlags.initialStkSize, p);
504 return scheduleWaitThread(tso,ret,cap);
505 }
506
507 /*
508 * rts_evalStableIO() is suitable for calling from Haskell. It
509 * evaluates a value of the form (StablePtr (IO a)), forcing the
510 * action's result to WHNF before returning. The result is returned
511 * in a StablePtr.
512 */
513 Capability *
514 rts_evalStableIO (Capability *cap, HsStablePtr s, /*out*/HsStablePtr *ret)
515 {
516 StgTSO* tso;
517 StgClosure *p, *r;
518 SchedulerStatus stat;
519
520 p = (StgClosure *)deRefStablePtr(s);
521 tso = createStrictIOThread(cap, RtsFlags.GcFlags.initialStkSize, p);
522 // async exceptions are always blocked by default in the created
523 // thread. See #1048.
524 tso->flags |= TSO_BLOCKEX | TSO_INTERRUPTIBLE;
525 cap = scheduleWaitThread(tso,&r,cap);
526 stat = rts_getSchedStatus(cap);
527
528 if (stat == Success && ret != NULL) {
529 ASSERT(r != NULL);
530 *ret = getStablePtr((StgPtr)r);
531 }
532
533 return cap;
534 }
535
536 /*
537 * Like rts_evalIO(), but doesn't force the action's result.
538 */
539 Capability *
540 rts_evalLazyIO (Capability *cap, HaskellObj p, /*out*/HaskellObj *ret)
541 {
542 StgTSO *tso;
543
544 tso = createIOThread(cap, RtsFlags.GcFlags.initialStkSize, p);
545 return scheduleWaitThread(tso,ret,cap);
546 }
547
548 Capability *
549 rts_evalLazyIO_ (Capability *cap, HaskellObj p, unsigned int stack_size,
550 /*out*/HaskellObj *ret)
551 {
552 StgTSO *tso;
553
554 tso = createIOThread(cap, stack_size, p);
555 return scheduleWaitThread(tso,ret,cap);
556 }
557
558 /* Convenience function for decoding the returned status. */
559
560 void
561 rts_checkSchedStatus (char* site, Capability *cap)
562 {
563 SchedulerStatus rc = cap->running_task->stat;
564 switch (rc) {
565 case Success:
566 return;
567 case Killed:
568 errorBelch("%s: uncaught exception",site);
569 stg_exit(EXIT_FAILURE);
570 case Interrupted:
571 errorBelch("%s: interrupted", site);
572 stg_exit(EXIT_FAILURE);
573 default:
574 errorBelch("%s: Return code (%d) not ok",(site),(rc));
575 stg_exit(EXIT_FAILURE);
576 }
577 }
578
579 SchedulerStatus
580 rts_getSchedStatus (Capability *cap)
581 {
582 return cap->running_task->stat;
583 }
584
585 Capability *
586 rts_lock (void)
587 {
588 Capability *cap;
589 Task *task;
590
591 // ToDo: get rid of this lock in the common case. We could store
592 // a free Task in thread-local storage, for example. That would
593 // leave just one lock on the path into the RTS: cap->lock when
594 // acquiring the Capability.
595 ACQUIRE_LOCK(&sched_mutex);
596 task = newBoundTask();
597 RELEASE_LOCK(&sched_mutex);
598
599 cap = NULL;
600 waitForReturnCapability(&cap, task);
601 return (Capability *)cap;
602 }
603
604 // Exiting the RTS: we hold a Capability that is not necessarily the
605 // same one that was originally returned by rts_lock(), because
606 // rts_evalIO() etc. may return a new one. Now that we have
607 // investigated the return value, we can release the Capability,
608 // and free the Task (in that order).
609
610 void
611 rts_unlock (Capability *cap)
612 {
613 Task *task;
614
615 task = cap->running_task;
616 ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
617
618 // Now release the Capability. With the capability released, GC
619 // may happen. NB. does not try to put the current Task on the
620 // worker queue.
621 // NB. keep cap->lock held while we call boundTaskExiting(). This
622 // is necessary during shutdown, where we want the invariant that
623 // after shutdownCapability(), all the Tasks associated with the
624 // Capability have completed their shutdown too. Otherwise we
625 // could have boundTaskExiting()/workerTaskStop() running at some
626 // random point in the future, which causes problems for
627 // freeTaskManager().
628 ACQUIRE_LOCK(&cap->lock);
629 releaseCapability_(cap,rtsFalse);
630
631 // Finally, we can release the Task to the free list.
632 boundTaskExiting(task);
633 RELEASE_LOCK(&cap->lock);
634 }