Strip parentheses in expressions contexts in error messages
[ghc.git] / rts / Interpreter.c
1 /* -----------------------------------------------------------------------------
2 * Bytecode interpreter
3 *
4 * Copyright (c) The GHC Team, 1994-2002.
5 * ---------------------------------------------------------------------------*/
6
7 #include "PosixSource.h"
8 #include "Rts.h"
9 #include "RtsAPI.h"
10 #include "rts/Bytecodes.h"
11
12 // internal headers
13 #include "sm/Storage.h"
14 #include "sm/Sanity.h"
15 #include "RtsUtils.h"
16 #include "Schedule.h"
17 #include "Updates.h"
18 #include "Prelude.h"
19 #include "StablePtr.h"
20 #include "Printer.h"
21 #include "Profiling.h"
22 #include "Disassembler.h"
23 #include "Interpreter.h"
24 #include "ThreadPaused.h"
25 #include "Threads.h"
26
27 #include <string.h> /* for memcpy */
28 #if defined(HAVE_ERRNO_H)
29 #include <errno.h>
30 #endif
31
32 // When building the RTS in the non-dyn way on Windows, we don't
33 // want declspec(__dllimport__) on the front of function prototypes
34 // from libffi.
35 #if defined(mingw32_HOST_OS)
36 #if (defined(i386_HOST_ARCH) && !defined(__PIC__)) || defined(x86_64_HOST_ARCH)
37 # define LIBFFI_NOT_DLL
38 #endif
39 #endif
40
41 #include "ffi.h"
42
43 /* --------------------------------------------------------------------------
44 * The bytecode interpreter
45 * ------------------------------------------------------------------------*/
46
47 /* Gather stats about entry, opcode, opcode-pair frequencies. For
48 tuning the interpreter. */
49
50 /* #define INTERP_STATS */
51
52
53 /* Sp points to the lowest live word on the stack. */
54
55 #define BCO_NEXT instrs[bciPtr++]
56 #define BCO_NEXT_32 (bciPtr += 2)
57 #define BCO_READ_NEXT_32 (BCO_NEXT_32, (((StgWord) instrs[bciPtr-2]) << 16) \
58 + ( (StgWord) instrs[bciPtr-1]))
59 #define BCO_NEXT_64 (bciPtr += 4)
60 #define BCO_READ_NEXT_64 (BCO_NEXT_64, (((StgWord) instrs[bciPtr-4]) << 48) \
61 + (((StgWord) instrs[bciPtr-3]) << 32) \
62 + (((StgWord) instrs[bciPtr-2]) << 16) \
63 + ( (StgWord) instrs[bciPtr-1]))
64 #if WORD_SIZE_IN_BITS == 32
65 #define BCO_NEXT_WORD BCO_NEXT_32
66 #define BCO_READ_NEXT_WORD BCO_READ_NEXT_32
67 #elif WORD_SIZE_IN_BITS == 64
68 #define BCO_NEXT_WORD BCO_NEXT_64
69 #define BCO_READ_NEXT_WORD BCO_READ_NEXT_64
70 #else
71 #error Cannot cope with WORD_SIZE_IN_BITS being nether 32 nor 64
72 #endif
73 #define BCO_GET_LARGE_ARG ((bci & bci_FLAG_LARGE_ARGS) ? BCO_READ_NEXT_WORD : BCO_NEXT)
74
75 #define BCO_PTR(n) (W_)ptrs[n]
76 #define BCO_LIT(n) literals[n]
77
78 #define LOAD_STACK_POINTERS \
79 Sp = cap->r.rCurrentTSO->stackobj->sp; \
80 /* We don't change this ... */ \
81 SpLim = tso_SpLim(cap->r.rCurrentTSO);
82
83 #define SAVE_STACK_POINTERS \
84 cap->r.rCurrentTSO->stackobj->sp = Sp;
85
86 #if defined(PROFILING)
87 #define LOAD_THREAD_STATE() \
88 LOAD_STACK_POINTERS \
89 cap->r.rCCCS = cap->r.rCurrentTSO->prof.cccs;
90 #else
91 #define LOAD_THREAD_STATE() \
92 LOAD_STACK_POINTERS
93 #endif
94
95 #if defined(PROFILING)
96 #define SAVE_THREAD_STATE() \
97 SAVE_STACK_POINTERS \
98 cap->r.rCurrentTSO->prof.cccs = cap->r.rCCCS;
99 #else
100 #define SAVE_THREAD_STATE() \
101 SAVE_STACK_POINTERS
102 #endif
103
104 // Note [Not true: ASSERT(Sp > SpLim)]
105 //
106 // SpLim has some headroom (RESERVED_STACK_WORDS) to allow for saving
107 // any necessary state on the stack when returning to the scheduler
108 // when a stack check fails.. The upshot of this is that Sp could be
109 // less than SpLim both when leaving to return to the scheduler.
110
111 #define RETURN_TO_SCHEDULER(todo,retcode) \
112 SAVE_THREAD_STATE(); \
113 cap->r.rCurrentTSO->what_next = (todo); \
114 threadPaused(cap,cap->r.rCurrentTSO); \
115 cap->r.rRet = (retcode); \
116 return cap;
117
118 // Note [avoiding threadPaused]
119 //
120 // Switching between the interpreter to compiled code can happen very
121 // frequently, so we don't want to call threadPaused(), which is
122 // expensive. BUT we must be careful not to violate the invariant
123 // that threadPaused() has been called on all threads before we GC
124 // (see Note [upd-black-hole]. So the scheduler must ensure that when
125 // we return in this way that we definitely immediately run the thread
126 // again and don't GC or do something else.
127 //
128 #define RETURN_TO_SCHEDULER_NO_PAUSE(todo,retcode) \
129 SAVE_THREAD_STATE(); \
130 cap->r.rCurrentTSO->what_next = (todo); \
131 cap->r.rRet = (retcode); \
132 return cap;
133
134 #define Sp_plusB(n) ((void *)(((StgWord8*)Sp) + (n)))
135 #define Sp_minusB(n) ((void *)(((StgWord8*)Sp) - (n)))
136
137 #define Sp_plusW(n) (Sp_plusB((n) * sizeof(W_)))
138 #define Sp_minusW(n) (Sp_minusB((n) * sizeof(W_)))
139
140 #define Sp_addB(n) (Sp = Sp_plusB(n))
141 #define Sp_subB(n) (Sp = Sp_minusB(n))
142 #define Sp_addW(n) (Sp = Sp_plusW(n))
143 #define Sp_subW(n) (Sp = Sp_minusW(n))
144
145 #define SpW(n) (*(StgWord*)(Sp_plusW(n)))
146 #define SpB(n) (*(StgWord*)(Sp_plusB(n)))
147
148 STATIC_INLINE StgPtr
149 allocate_NONUPD (Capability *cap, int n_words)
150 {
151 return allocate(cap, stg_max(sizeofW(StgHeader)+MIN_PAYLOAD_SIZE, n_words));
152 }
153
154 int rts_stop_next_breakpoint = 0;
155 int rts_stop_on_exception = 0;
156
157 #if defined(INTERP_STATS)
158
159 /* Hacky stats, for tuning the interpreter ... */
160 int it_unknown_entries[N_CLOSURE_TYPES];
161 int it_total_unknown_entries;
162 int it_total_entries;
163
164 int it_retto_BCO;
165 int it_retto_UPDATE;
166 int it_retto_other;
167
168 int it_slides;
169 int it_insns;
170 int it_BCO_entries;
171
172 int it_ofreq[27];
173 int it_oofreq[27][27];
174 int it_lastopc;
175
176
177 #define INTERP_TICK(n) (n)++
178
179 void interp_startup ( void )
180 {
181 int i, j;
182 it_retto_BCO = it_retto_UPDATE = it_retto_other = 0;
183 it_total_entries = it_total_unknown_entries = 0;
184 for (i = 0; i < N_CLOSURE_TYPES; i++)
185 it_unknown_entries[i] = 0;
186 it_slides = it_insns = it_BCO_entries = 0;
187 for (i = 0; i < 27; i++) it_ofreq[i] = 0;
188 for (i = 0; i < 27; i++)
189 for (j = 0; j < 27; j++)
190 it_oofreq[i][j] = 0;
191 it_lastopc = 0;
192 }
193
194 void interp_shutdown ( void )
195 {
196 int i, j, k, o_max, i_max, j_max;
197 debugBelch("%d constrs entered -> (%d BCO, %d UPD, %d ??? )\n",
198 it_retto_BCO + it_retto_UPDATE + it_retto_other,
199 it_retto_BCO, it_retto_UPDATE, it_retto_other );
200 debugBelch("%d total entries, %d unknown entries \n",
201 it_total_entries, it_total_unknown_entries);
202 for (i = 0; i < N_CLOSURE_TYPES; i++) {
203 if (it_unknown_entries[i] == 0) continue;
204 debugBelch(" type %2d: unknown entries (%4.1f%%) == %d\n",
205 i, 100.0 * ((double)it_unknown_entries[i]) /
206 ((double)it_total_unknown_entries),
207 it_unknown_entries[i]);
208 }
209 debugBelch("%d insns, %d slides, %d BCO_entries\n",
210 it_insns, it_slides, it_BCO_entries);
211 for (i = 0; i < 27; i++)
212 debugBelch("opcode %2d got %d\n", i, it_ofreq[i] );
213
214 for (k = 1; k < 20; k++) {
215 o_max = 0;
216 i_max = j_max = 0;
217 for (i = 0; i < 27; i++) {
218 for (j = 0; j < 27; j++) {
219 if (it_oofreq[i][j] > o_max) {
220 o_max = it_oofreq[i][j];
221 i_max = i; j_max = j;
222 }
223 }
224 }
225
226 debugBelch("%d: count (%4.1f%%) %6d is %d then %d\n",
227 k, ((double)o_max) * 100.0 / ((double)it_insns), o_max,
228 i_max, j_max );
229 it_oofreq[i_max][j_max] = 0;
230
231 }
232 }
233
234 #else // !INTERP_STATS
235
236 #define INTERP_TICK(n) /* nothing */
237
238 #endif
239
240 #if defined(PROFILING)
241
242 //
243 // Build a zero-argument PAP with the current CCS
244 // See Note [Evaluating functions with profiling] in Apply.cmm
245 //
246 STATIC_INLINE
247 StgClosure * newEmptyPAP (Capability *cap,
248 StgClosure *tagged_obj, // a FUN or a BCO
249 uint32_t arity)
250 {
251 StgPAP *pap = (StgPAP *)allocate(cap, sizeofW(StgPAP));
252 SET_HDR(pap, &stg_PAP_info, cap->r.rCCCS);
253 pap->arity = arity;
254 pap->n_args = 0;
255 pap->fun = tagged_obj;
256 return (StgClosure *)pap;
257 }
258
259 //
260 // Make an exact copy of a PAP, except that we combine the current CCS with the
261 // CCS in the PAP. See Note [Evaluating functions with profiling] in Apply.cmm
262 //
263 STATIC_INLINE
264 StgClosure * copyPAP (Capability *cap, StgPAP *oldpap)
265 {
266 uint32_t size = PAP_sizeW(oldpap->n_args);
267 StgPAP *pap = (StgPAP *)allocate(cap, size);
268 enterFunCCS(&cap->r, oldpap->header.prof.ccs);
269 pap->arity = oldpap->arity;
270 pap->n_args = oldpap->n_args;
271 pap->fun = oldpap->fun;
272 uint32_t i;
273 for (i = 0; i < ((StgPAP *)pap)->n_args; i++) {
274 pap->payload[i] = oldpap->payload[i];
275 }
276 // No write barrier is needed here as this is a new allocation
277 SET_HDR(pap, &stg_PAP_info, cap->r.rCCCS);
278 return (StgClosure *)pap;
279 }
280
281 #endif
282
283 static StgWord app_ptrs_itbl[] = {
284 (W_)&stg_ap_p_info,
285 (W_)&stg_ap_pp_info,
286 (W_)&stg_ap_ppp_info,
287 (W_)&stg_ap_pppp_info,
288 (W_)&stg_ap_ppppp_info,
289 (W_)&stg_ap_pppppp_info,
290 };
291
292 HsStablePtr rts_breakpoint_io_action; // points to the IO action which is executed on a breakpoint
293 // it is set in main/GHC.hs:runStmt
294
295 Capability *
296 interpretBCO (Capability* cap)
297 {
298 // Use of register here is primarily to make it clear to compilers
299 // that these entities are non-aliasable.
300 register void *Sp; // local state -- stack pointer
301 register void *SpLim; // local state -- stack lim pointer
302 register StgClosure *tagged_obj = 0, *obj = NULL;
303 uint32_t n, m;
304
305 LOAD_THREAD_STATE();
306
307 cap->r.rHpLim = (P_)1; // HpLim is the context-switch flag; when it
308 // goes to zero we must return to the scheduler.
309
310 IF_DEBUG(interpreter,
311 debugBelch(
312 "\n---------------------------------------------------------------\n");
313 debugBelch("Entering the interpreter, Sp = %p\n", Sp);
314 #if defined(PROFILING)
315 fprintCCS(stderr, cap->r.rCCCS);
316 debugBelch("\n");
317 #endif
318 debugBelch("\n");
319 printStackChunk(Sp,cap->r.rCurrentTSO->stackobj->stack+cap->r.rCurrentTSO->stackobj->stack_size);
320 debugBelch("\n\n");
321 );
322
323 // ------------------------------------------------------------------------
324 // Case 1:
325 //
326 // We have a closure to evaluate. Stack looks like:
327 //
328 // | XXXX_info |
329 // +---------------+
330 // Sp | -------------------> closure
331 // +---------------+
332 // | stg_enter |
333 // +---------------+
334 //
335 if (SpW(0) == (W_)&stg_enter_info) {
336 Sp_addW(1);
337 goto eval;
338 }
339
340 // ------------------------------------------------------------------------
341 // Case 2:
342 //
343 // We have a BCO application to perform. Stack looks like:
344 //
345 // | .... |
346 // +---------------+
347 // | arg1 |
348 // +---------------+
349 // | BCO |
350 // +---------------+
351 // Sp | RET_BCO |
352 // +---------------+
353 //
354 else if (SpW(0) == (W_)&stg_apply_interp_info) {
355 obj = UNTAG_CLOSURE((StgClosure *)SpW(1));
356 Sp_addW(2);
357 goto run_BCO_fun;
358 }
359
360 // ------------------------------------------------------------------------
361 // Case 3:
362 //
363 // We have an unboxed value to return. See comment before
364 // do_return_unboxed, below.
365 //
366 else {
367 goto do_return_unboxed;
368 }
369
370 // Evaluate the object on top of the stack.
371 eval:
372 tagged_obj = (StgClosure*)SpW(0); Sp_addW(1);
373
374 eval_obj:
375 obj = UNTAG_CLOSURE(tagged_obj);
376 INTERP_TICK(it_total_evals);
377
378 IF_DEBUG(interpreter,
379 debugBelch(
380 "\n---------------------------------------------------------------\n");
381 debugBelch("Evaluating: "); printObj(obj);
382 debugBelch("Sp = %p\n", Sp);
383 #if defined(PROFILING)
384 fprintCCS(stderr, cap->r.rCCCS);
385 debugBelch("\n");
386 #endif
387 debugBelch("\n" );
388
389 printStackChunk(Sp,cap->r.rCurrentTSO->stackobj->stack+cap->r.rCurrentTSO->stackobj->stack_size);
390 debugBelch("\n\n");
391 );
392
393 // IF_DEBUG(sanity,checkStackChunk(Sp, cap->r.rCurrentTSO->stack+cap->r.rCurrentTSO->stack_size));
394 IF_DEBUG(sanity,checkStackFrame(Sp));
395
396 switch ( get_itbl(obj)->type ) {
397
398 case IND:
399 case IND_STATIC:
400 {
401 tagged_obj = ((StgInd*)obj)->indirectee;
402 goto eval_obj;
403 }
404
405 case CONSTR:
406 case CONSTR_1_0:
407 case CONSTR_0_1:
408 case CONSTR_2_0:
409 case CONSTR_1_1:
410 case CONSTR_0_2:
411 case CONSTR_NOCAF:
412 break;
413
414 case FUN:
415 case FUN_1_0:
416 case FUN_0_1:
417 case FUN_2_0:
418 case FUN_1_1:
419 case FUN_0_2:
420 case FUN_STATIC:
421 #if defined(PROFILING)
422 if (cap->r.rCCCS != obj->header.prof.ccs) {
423 int arity = get_fun_itbl(obj)->f.arity;
424 // Tag the function correctly. We guarantee that pap->fun
425 // is correctly tagged (this is checked by
426 // Sanity.c:checkPAP()), but we don't guarantee that every
427 // pointer to a FUN is tagged on the stack or elsewhere,
428 // so we fix the tag here. (#13767)
429 // For full details of the invariants on tagging, see
430 // https://gitlab.haskell.org/ghc/ghc/wikis/commentary/rts/haskell-execution/pointer-tagging
431 tagged_obj =
432 newEmptyPAP(cap,
433 arity <= TAG_MASK
434 ? (StgClosure *) ((intptr_t) obj + arity)
435 : obj,
436 arity);
437 }
438 #endif
439 break;
440
441 case PAP:
442 #if defined(PROFILING)
443 if (cap->r.rCCCS != obj->header.prof.ccs) {
444 tagged_obj = copyPAP(cap, (StgPAP *)obj);
445 }
446 #endif
447 break;
448
449 case BCO:
450 ASSERT(((StgBCO *)obj)->arity > 0);
451 #if defined(PROFILING)
452 if (cap->r.rCCCS != obj->header.prof.ccs) {
453 tagged_obj = newEmptyPAP(cap, obj, ((StgBCO *)obj)->arity);
454 }
455 #endif
456 break;
457
458 case AP: /* Copied from stg_AP_entry. */
459 {
460 uint32_t i, words;
461 StgAP *ap;
462
463 ap = (StgAP*)obj;
464 words = ap->n_args;
465
466 // Stack check
467 if (Sp_minusW(words+sizeofW(StgUpdateFrame)+2) < SpLim) {
468 Sp_subW(2);
469 SpW(1) = (W_)tagged_obj;
470 SpW(0) = (W_)&stg_enter_info;
471 RETURN_TO_SCHEDULER(ThreadInterpret, StackOverflow);
472 }
473
474 #if defined(PROFILING)
475 // restore the CCCS after evaluating the AP
476 Sp_subW(2);
477 SpW(1) = (W_)cap->r.rCCCS;
478 SpW(0) = (W_)&stg_restore_cccs_eval_info;
479 #endif
480
481 Sp_subW(sizeofW(StgUpdateFrame));
482 {
483 StgUpdateFrame *__frame;
484 __frame = (StgUpdateFrame *)Sp;
485 SET_INFO((StgClosure *)__frame, (StgInfoTable *)&stg_upd_frame_info);
486 __frame->updatee = (StgClosure *)(ap);
487 }
488
489 ENTER_CCS_THUNK(cap,ap);
490
491 /* Reload the stack */
492 Sp_subW(words);
493 for (i=0; i < words; i++) {
494 SpW(i) = (W_)ap->payload[i];
495 }
496
497 obj = UNTAG_CLOSURE((StgClosure*)ap->fun);
498 ASSERT(get_itbl(obj)->type == BCO);
499 goto run_BCO_fun;
500 }
501
502 default:
503 #if defined(INTERP_STATS)
504 {
505 int j;
506
507 j = get_itbl(obj)->type;
508 ASSERT(j >= 0 && j < N_CLOSURE_TYPES);
509 it_unknown_entries[j]++;
510 it_total_unknown_entries++;
511 }
512 #endif
513 {
514 // Can't handle this object; yield to scheduler
515 IF_DEBUG(interpreter,
516 debugBelch("evaluating unknown closure -- yielding to sched\n");
517 printObj(obj);
518 );
519 #if defined(PROFILING)
520 // restore the CCCS after evaluating the closure
521 Sp_subW(2);
522 SpW(1) = (W_)cap->r.rCCCS;
523 SpW(0) = (W_)&stg_restore_cccs_eval_info;
524 #endif
525 Sp_subW(2);
526 SpW(1) = (W_)tagged_obj;
527 SpW(0) = (W_)&stg_enter_info;
528 RETURN_TO_SCHEDULER_NO_PAUSE(ThreadRunGHC, ThreadYielding);
529 }
530 }
531
532 // ------------------------------------------------------------------------
533 // We now have an evaluated object (tagged_obj). The next thing to
534 // do is return it to the stack frame on top of the stack.
535 do_return:
536 obj = UNTAG_CLOSURE(tagged_obj);
537 ASSERT(closure_HNF(obj));
538
539 IF_DEBUG(interpreter,
540 debugBelch(
541 "\n---------------------------------------------------------------\n");
542 debugBelch("Returning: "); printObj(obj);
543 debugBelch("Sp = %p\n", Sp);
544 #if defined(PROFILING)
545 fprintCCS(stderr, cap->r.rCCCS);
546 debugBelch("\n");
547 #endif
548 debugBelch("\n");
549 printStackChunk(Sp,cap->r.rCurrentTSO->stackobj->stack+cap->r.rCurrentTSO->stackobj->stack_size);
550 debugBelch("\n\n");
551 );
552
553 IF_DEBUG(sanity,checkStackChunk(Sp, cap->r.rCurrentTSO->stackobj->stack+cap->r.rCurrentTSO->stackobj->stack_size));
554
555 switch (get_itbl((StgClosure *)Sp)->type) {
556
557 case RET_SMALL: {
558 const StgInfoTable *info;
559
560 // NOTE: not using get_itbl().
561 info = ((StgClosure *)Sp)->header.info;
562
563 if (info == (StgInfoTable *)&stg_restore_cccs_info ||
564 info == (StgInfoTable *)&stg_restore_cccs_eval_info) {
565 cap->r.rCCCS = (CostCentreStack*)SpW(1);
566 Sp_addW(2);
567 goto do_return;
568 }
569
570 if (info == (StgInfoTable *)&stg_ap_v_info) {
571 n = 1; m = 0; goto do_apply;
572 }
573 if (info == (StgInfoTable *)&stg_ap_f_info) {
574 n = 1; m = 1; goto do_apply;
575 }
576 if (info == (StgInfoTable *)&stg_ap_d_info) {
577 n = 1; m = sizeofW(StgDouble); goto do_apply;
578 }
579 if (info == (StgInfoTable *)&stg_ap_l_info) {
580 n = 1; m = sizeofW(StgInt64); goto do_apply;
581 }
582 if (info == (StgInfoTable *)&stg_ap_n_info) {
583 n = 1; m = 1; goto do_apply;
584 }
585 if (info == (StgInfoTable *)&stg_ap_p_info) {
586 n = 1; m = 1; goto do_apply;
587 }
588 if (info == (StgInfoTable *)&stg_ap_pp_info) {
589 n = 2; m = 2; goto do_apply;
590 }
591 if (info == (StgInfoTable *)&stg_ap_ppp_info) {
592 n = 3; m = 3; goto do_apply;
593 }
594 if (info == (StgInfoTable *)&stg_ap_pppp_info) {
595 n = 4; m = 4; goto do_apply;
596 }
597 if (info == (StgInfoTable *)&stg_ap_ppppp_info) {
598 n = 5; m = 5; goto do_apply;
599 }
600 if (info == (StgInfoTable *)&stg_ap_pppppp_info) {
601 n = 6; m = 6; goto do_apply;
602 }
603 goto do_return_unrecognised;
604 }
605
606 case UPDATE_FRAME:
607 // Returning to an update frame: do the update, pop the update
608 // frame, and continue with the next stack frame.
609 //
610 // NB. we must update with the *tagged* pointer. Some tags
611 // are not optional, and if we omit the tag bits when updating
612 // then bad things can happen (albeit very rarely). See #1925.
613 // What happened was an indirection was created with an
614 // untagged pointer, and this untagged pointer was propagated
615 // to a PAP by the GC, violating the invariant that PAPs
616 // always contain a tagged pointer to the function.
617 INTERP_TICK(it_retto_UPDATE);
618 updateThunk(cap, cap->r.rCurrentTSO,
619 ((StgUpdateFrame *)Sp)->updatee, tagged_obj);
620 Sp_addW(sizeofW(StgUpdateFrame));
621 goto do_return;
622
623 case RET_BCO:
624 // Returning to an interpreted continuation: put the object on
625 // the stack, and start executing the BCO.
626 INTERP_TICK(it_retto_BCO);
627 Sp_subW(1);
628 SpW(0) = (W_)obj;
629 // NB. return the untagged object; the bytecode expects it to
630 // be untagged. XXX this doesn't seem right.
631 obj = (StgClosure*)SpW(2);
632 ASSERT(get_itbl(obj)->type == BCO);
633 goto run_BCO_return;
634
635 default:
636 do_return_unrecognised:
637 {
638 // Can't handle this return address; yield to scheduler
639 INTERP_TICK(it_retto_other);
640 IF_DEBUG(interpreter,
641 debugBelch("returning to unknown frame -- yielding to sched\n");
642 printStackChunk(Sp,cap->r.rCurrentTSO->stackobj->stack+cap->r.rCurrentTSO->stackobj->stack_size);
643 );
644 Sp_subW(2);
645 SpW(1) = (W_)tagged_obj;
646 SpW(0) = (W_)&stg_enter_info;
647 RETURN_TO_SCHEDULER_NO_PAUSE(ThreadRunGHC, ThreadYielding);
648 }
649 }
650
651 // -------------------------------------------------------------------------
652 // Returning an unboxed value. The stack looks like this:
653 //
654 // | .... |
655 // +---------------+
656 // | fv2 |
657 // +---------------+
658 // | fv1 |
659 // +---------------+
660 // | BCO |
661 // +---------------+
662 // | stg_ctoi_ret_ |
663 // +---------------+
664 // | retval |
665 // +---------------+
666 // | XXXX_info |
667 // +---------------+
668 //
669 // where XXXX_info is one of the stg_ret_*_info family.
670 //
671 // We're only interested in the case when the real return address
672 // is a BCO; otherwise we'll return to the scheduler.
673
674 do_return_unboxed:
675 {
676 int offset;
677
678 ASSERT( SpW(0) == (W_)&stg_ret_v_info
679 || SpW(0) == (W_)&stg_ret_p_info
680 || SpW(0) == (W_)&stg_ret_n_info
681 || SpW(0) == (W_)&stg_ret_f_info
682 || SpW(0) == (W_)&stg_ret_d_info
683 || SpW(0) == (W_)&stg_ret_l_info
684 );
685
686 IF_DEBUG(interpreter,
687 debugBelch(
688 "\n---------------------------------------------------------------\n");
689 debugBelch("Returning: "); printObj(obj);
690 debugBelch("Sp = %p\n", Sp);
691 #if defined(PROFILING)
692 fprintCCS(stderr, cap->r.rCCCS);
693 debugBelch("\n");
694 #endif
695 debugBelch("\n");
696 printStackChunk(Sp,cap->r.rCurrentTSO->stackobj->stack+cap->r.rCurrentTSO->stackobj->stack_size);
697 debugBelch("\n\n");
698 );
699
700 // get the offset of the stg_ctoi_ret_XXX itbl
701 offset = stack_frame_sizeW((StgClosure *)Sp);
702
703 switch (get_itbl((StgClosure*)(Sp_plusW(offset)))->type) {
704
705 case RET_BCO:
706 // Returning to an interpreted continuation: put the object on
707 // the stack, and start executing the BCO.
708 INTERP_TICK(it_retto_BCO);
709 obj = (StgClosure*)SpW(offset+1);
710 ASSERT(get_itbl(obj)->type == BCO);
711 goto run_BCO_return_unboxed;
712
713 default:
714 {
715 // Can't handle this return address; yield to scheduler
716 INTERP_TICK(it_retto_other);
717 IF_DEBUG(interpreter,
718 debugBelch("returning to unknown frame -- yielding to sched\n");
719 printStackChunk(Sp,cap->r.rCurrentTSO->stackobj->stack+cap->r.rCurrentTSO->stackobj->stack_size);
720 );
721 RETURN_TO_SCHEDULER_NO_PAUSE(ThreadRunGHC, ThreadYielding);
722 }
723 }
724 }
725 // not reached.
726
727
728 // -------------------------------------------------------------------------
729 // Application...
730
731 do_apply:
732 ASSERT(obj == UNTAG_CLOSURE(tagged_obj));
733 // we have a function to apply (obj), and n arguments taking up m
734 // words on the stack. The info table (stg_ap_pp_info or whatever)
735 // is on top of the arguments on the stack.
736 {
737 switch (get_itbl(obj)->type) {
738
739 case PAP: {
740 StgPAP *pap;
741 uint32_t i, arity;
742
743 pap = (StgPAP *)obj;
744
745 // we only cope with PAPs whose function is a BCO
746 if (get_itbl(UNTAG_CLOSURE(pap->fun))->type != BCO) {
747 goto defer_apply_to_sched;
748 }
749
750 // Stack check: we're about to unpack the PAP onto the
751 // stack. The (+1) is for the (arity < n) case, where we
752 // also need space for an extra info pointer.
753 if (Sp_minusW(pap->n_args + 1) < SpLim) {
754 Sp_subW(2);
755 SpW(1) = (W_)tagged_obj;
756 SpW(0) = (W_)&stg_enter_info;
757 RETURN_TO_SCHEDULER(ThreadInterpret, StackOverflow);
758 }
759
760 Sp_addW(1);
761 arity = pap->arity;
762 ASSERT(arity > 0);
763 if (arity < n) {
764 // n must be greater than 1, and the only kinds of
765 // application we support with more than one argument
766 // are all pointers...
767 //
768 // Shuffle the args for this function down, and put
769 // the appropriate info table in the gap.
770 for (i = 0; i < arity; i++) {
771 SpW((int)i-1) = SpW(i);
772 // ^^^^^ careful, i-1 might be negative, but i is unsigned
773 }
774 SpW(arity-1) = app_ptrs_itbl[n-arity-1];
775 Sp_subW(1);
776 // unpack the PAP's arguments onto the stack
777 Sp_subW(pap->n_args);
778 for (i = 0; i < pap->n_args; i++) {
779 SpW(i) = (W_)pap->payload[i];
780 }
781 obj = UNTAG_CLOSURE(pap->fun);
782
783 #if defined(PROFILING)
784 enterFunCCS(&cap->r, pap->header.prof.ccs);
785 #endif
786 goto run_BCO_fun;
787 }
788 else if (arity == n) {
789 Sp_subW(pap->n_args);
790 for (i = 0; i < pap->n_args; i++) {
791 SpW(i) = (W_)pap->payload[i];
792 }
793 obj = UNTAG_CLOSURE(pap->fun);
794 #if defined(PROFILING)
795 enterFunCCS(&cap->r, pap->header.prof.ccs);
796 #endif
797 goto run_BCO_fun;
798 }
799 else /* arity > n */ {
800 // build a new PAP and return it.
801 StgPAP *new_pap;
802 new_pap = (StgPAP *)allocate(cap, PAP_sizeW(pap->n_args + m));
803 new_pap->arity = pap->arity - n;
804 new_pap->n_args = pap->n_args + m;
805 new_pap->fun = pap->fun;
806 for (i = 0; i < pap->n_args; i++) {
807 new_pap->payload[i] = pap->payload[i];
808 }
809 for (i = 0; i < m; i++) {
810 new_pap->payload[pap->n_args + i] = (StgClosure *)SpW(i);
811 }
812 // No write barrier is needed here as this is a new allocation
813 SET_HDR(new_pap,&stg_PAP_info,cap->r.rCCCS);
814 tagged_obj = (StgClosure *)new_pap;
815 Sp_addW(m);
816 goto do_return;
817 }
818 }
819
820 case BCO: {
821 uint32_t arity, i;
822
823 Sp_addW(1);
824 arity = ((StgBCO *)obj)->arity;
825 ASSERT(arity > 0);
826 if (arity < n) {
827 // n must be greater than 1, and the only kinds of
828 // application we support with more than one argument
829 // are all pointers...
830 //
831 // Shuffle the args for this function down, and put
832 // the appropriate info table in the gap.
833 for (i = 0; i < arity; i++) {
834 SpW((int)i-1) = SpW(i);
835 // ^^^^^ careful, i-1 might be negative, but i is unsigned
836 }
837 SpW(arity-1) = app_ptrs_itbl[n-arity-1];
838 Sp_subW(1);
839 goto run_BCO_fun;
840 }
841 else if (arity == n) {
842 goto run_BCO_fun;
843 }
844 else /* arity > n */ {
845 // build a PAP and return it.
846 StgPAP *pap;
847 uint32_t i;
848 pap = (StgPAP *)allocate(cap, PAP_sizeW(m));
849 pap->arity = arity - n;
850 pap->fun = obj;
851 pap->n_args = m;
852 for (i = 0; i < m; i++) {
853 pap->payload[i] = (StgClosure *)SpW(i);
854 }
855 // No write barrier is needed here as this is a new allocation
856 SET_HDR(pap, &stg_PAP_info,cap->r.rCCCS);
857 tagged_obj = (StgClosure *)pap;
858 Sp_addW(m);
859 goto do_return;
860 }
861 }
862
863 // No point in us applying machine-code functions
864 default:
865 defer_apply_to_sched:
866 IF_DEBUG(interpreter,
867 debugBelch("Cannot apply compiled function; yielding to scheduler\n"));
868 Sp_subW(2);
869 SpW(1) = (W_)tagged_obj;
870 SpW(0) = (W_)&stg_enter_info;
871 RETURN_TO_SCHEDULER_NO_PAUSE(ThreadRunGHC, ThreadYielding);
872 }
873
874 // ------------------------------------------------------------------------
875 // Ok, we now have a bco (obj), and its arguments are all on the
876 // stack. We can start executing the byte codes.
877 //
878 // The stack is in one of two states. First, if this BCO is a
879 // function:
880 //
881 // | .... |
882 // +---------------+
883 // | arg2 |
884 // +---------------+
885 // | arg1 |
886 // +---------------+
887 //
888 // Second, if this BCO is a continuation:
889 //
890 // | .... |
891 // +---------------+
892 // | fv2 |
893 // +---------------+
894 // | fv1 |
895 // +---------------+
896 // | BCO |
897 // +---------------+
898 // | stg_ctoi_ret_ |
899 // +---------------+
900 // | retval |
901 // +---------------+
902 //
903 // where retval is the value being returned to this continuation.
904 // In the event of a stack check, heap check, or context switch,
905 // we need to leave the stack in a sane state so the garbage
906 // collector can find all the pointers.
907 //
908 // (1) BCO is a function: the BCO's bitmap describes the
909 // pointerhood of the arguments.
910 //
911 // (2) BCO is a continuation: BCO's bitmap describes the
912 // pointerhood of the free variables.
913 //
914 // Sadly we have three different kinds of stack/heap/cswitch check
915 // to do:
916
917
918 run_BCO_return:
919 // Heap check
920 if (doYouWantToGC(cap)) {
921 Sp_subW(1); SpW(0) = (W_)&stg_enter_info;
922 RETURN_TO_SCHEDULER(ThreadInterpret, HeapOverflow);
923 }
924 // Stack checks aren't necessary at return points, the stack use
925 // is aggregated into the enclosing function entry point.
926
927 goto run_BCO;
928
929 run_BCO_return_unboxed:
930 // Heap check
931 if (doYouWantToGC(cap)) {
932 RETURN_TO_SCHEDULER(ThreadInterpret, HeapOverflow);
933 }
934 // Stack checks aren't necessary at return points, the stack use
935 // is aggregated into the enclosing function entry point.
936
937 goto run_BCO;
938
939 run_BCO_fun:
940 IF_DEBUG(sanity,
941 Sp_subW(2);
942 SpW(1) = (W_)obj;
943 SpW(0) = (W_)&stg_apply_interp_info;
944 checkStackChunk(Sp, cap->r.rCurrentTSO->stackobj->stack+cap->r.rCurrentTSO->stackobj->stack_size);
945 Sp_addW(2);
946 );
947
948 // Heap check
949 if (doYouWantToGC(cap)) {
950 Sp_subW(2);
951 SpW(1) = (W_)obj;
952 SpW(0) = (W_)&stg_apply_interp_info; // placeholder, really
953 RETURN_TO_SCHEDULER(ThreadInterpret, HeapOverflow);
954 }
955
956 // Stack check
957 if (Sp_minusW(INTERP_STACK_CHECK_THRESH) < SpLim) {
958 Sp_subW(2);
959 SpW(1) = (W_)obj;
960 SpW(0) = (W_)&stg_apply_interp_info; // placeholder, really
961 RETURN_TO_SCHEDULER(ThreadInterpret, StackOverflow);
962 }
963
964 goto run_BCO;
965
966 // Now, actually interpret the BCO... (no returning to the
967 // scheduler again until the stack is in an orderly state).
968 run_BCO:
969 INTERP_TICK(it_BCO_entries);
970 {
971 register int bciPtr = 0; /* instruction pointer */
972 register StgWord16 bci;
973 register StgBCO* bco = (StgBCO*)obj;
974 register StgWord16* instrs = (StgWord16*)(bco->instrs->payload);
975 register StgWord* literals = (StgWord*)(&bco->literals->payload[0]);
976 register StgPtr* ptrs = (StgPtr*)(&bco->ptrs->payload[0]);
977 #if defined(DEBUG)
978 int bcoSize;
979 bcoSize = bco->instrs->bytes / sizeof(StgWord16);
980 #endif
981 IF_DEBUG(interpreter,debugBelch("bcoSize = %d\n", bcoSize));
982
983 #if defined(INTERP_STATS)
984 it_lastopc = 0; /* no opcode */
985 #endif
986
987 nextInsn:
988 ASSERT(bciPtr < bcoSize);
989 IF_DEBUG(interpreter,
990 //if (do_print_stack) {
991 //debugBelch("\n-- BEGIN stack\n");
992 //printStack(Sp,cap->r.rCurrentTSO->stack+cap->r.rCurrentTSO->stack_size,iSu);
993 //debugBelch("-- END stack\n\n");
994 //}
995 debugBelch("Sp = %p pc = %-4d ", Sp, bciPtr);
996 disInstr(bco,bciPtr);
997 if (0) { int i;
998 debugBelch("\n");
999 for (i = 8; i >= 0; i--) {
1000 debugBelch("%d %p\n", i, (void *) SpW(i));
1001 }
1002 debugBelch("\n");
1003 }
1004 //if (do_print_stack) checkStack(Sp,cap->r.rCurrentTSO->stack+cap->r.rCurrentTSO->stack_size,iSu);
1005 );
1006
1007
1008 INTERP_TICK(it_insns);
1009
1010 #if defined(INTERP_STATS)
1011 ASSERT( (int)instrs[bciPtr] >= 0 && (int)instrs[bciPtr] < 27 );
1012 it_ofreq[ (int)instrs[bciPtr] ] ++;
1013 it_oofreq[ it_lastopc ][ (int)instrs[bciPtr] ] ++;
1014 it_lastopc = (int)instrs[bciPtr];
1015 #endif
1016
1017 bci = BCO_NEXT;
1018 /* We use the high 8 bits for flags, only the highest of which is
1019 * currently allocated */
1020 ASSERT((bci & 0xFF00) == (bci & 0x8000));
1021
1022 switch (bci & 0xFF) {
1023
1024 /* check for a breakpoint on the beginning of a let binding */
1025 case bci_BRK_FUN:
1026 {
1027 int arg1_brk_array, arg2_array_index, arg3_module_uniq;
1028 #if defined(PROFILING)
1029 int arg4_cc;
1030 #endif
1031 StgArrBytes *breakPoints;
1032 int returning_from_break;
1033
1034 // the io action to run at a breakpoint
1035 StgClosure *ioAction;
1036
1037 // a closure to save the top stack frame on the heap
1038 StgAP_STACK *new_aps;
1039
1040 int i;
1041 int size_words;
1042
1043 arg1_brk_array = BCO_GET_LARGE_ARG;
1044 arg2_array_index = BCO_NEXT;
1045 arg3_module_uniq = BCO_GET_LARGE_ARG;
1046 #if defined(PROFILING)
1047 arg4_cc = BCO_GET_LARGE_ARG;
1048 #else
1049 BCO_GET_LARGE_ARG;
1050 #endif
1051
1052 // check if we are returning from a breakpoint - this info
1053 // is stored in the flags field of the current TSO. If true,
1054 // then don't break this time around.
1055 returning_from_break =
1056 cap->r.rCurrentTSO->flags & TSO_STOPPED_ON_BREAKPOINT;
1057
1058 #if defined(PROFILING)
1059 cap->r.rCCCS = pushCostCentre(cap->r.rCCCS,
1060 (CostCentre*)BCO_LIT(arg4_cc));
1061 #endif
1062
1063 // if we are returning from a break then skip this section
1064 // and continue executing
1065 if (!returning_from_break)
1066 {
1067 breakPoints = (StgArrBytes *) BCO_PTR(arg1_brk_array);
1068
1069 // stop the current thread if either the
1070 // "rts_stop_next_breakpoint" flag is true OR if the
1071 // breakpoint flag for this particular expression is
1072 // true
1073 if (rts_stop_next_breakpoint == true ||
1074 ((StgWord8*)breakPoints->payload)[arg2_array_index]
1075 == true)
1076 {
1077 // make sure we don't automatically stop at the
1078 // next breakpoint
1079 rts_stop_next_breakpoint = false;
1080
1081 // allocate memory for a new AP_STACK, enough to
1082 // store the top stack frame plus an
1083 // stg_apply_interp_info pointer and a pointer to
1084 // the BCO
1085 size_words = BCO_BITMAP_SIZE(obj) + 2;
1086 new_aps = (StgAP_STACK *) allocate(cap, AP_STACK_sizeW(size_words));
1087 new_aps->size = size_words;
1088 new_aps->fun = &stg_dummy_ret_closure;
1089
1090 // fill in the payload of the AP_STACK
1091 new_aps->payload[0] = (StgClosure *)&stg_apply_interp_info;
1092 new_aps->payload[1] = (StgClosure *)obj;
1093
1094 // copy the contents of the top stack frame into the AP_STACK
1095 for (i = 2; i < size_words; i++)
1096 {
1097 new_aps->payload[i] = (StgClosure *)SpW(i-2);
1098 }
1099
1100 // No write barrier is needed here as this is a new allocation
1101 SET_HDR(new_aps,&stg_AP_STACK_info,cap->r.rCCCS);
1102
1103 // Arrange the stack to call the breakpoint IO action, and
1104 // continue execution of this BCO when the IO action returns.
1105 //
1106 // ioAction :: Int# -- the breakpoint index
1107 // -> Int# -- the module uniq
1108 // -> Bool -- exception?
1109 // -> HValue -- the AP_STACK, or exception
1110 // -> IO ()
1111 //
1112 ioAction = (StgClosure *) deRefStablePtr (
1113 rts_breakpoint_io_action);
1114
1115 Sp_subW(11);
1116 SpW(10) = (W_)obj;
1117 SpW(9) = (W_)&stg_apply_interp_info;
1118 SpW(8) = (W_)new_aps;
1119 SpW(7) = (W_)False_closure; // True <=> an exception
1120 SpW(6) = (W_)&stg_ap_ppv_info;
1121 SpW(5) = (W_)BCO_LIT(arg3_module_uniq);
1122 SpW(4) = (W_)&stg_ap_n_info;
1123 SpW(3) = (W_)arg2_array_index;
1124 SpW(2) = (W_)&stg_ap_n_info;
1125 SpW(1) = (W_)ioAction;
1126 SpW(0) = (W_)&stg_enter_info;
1127
1128 // set the flag in the TSO to say that we are now
1129 // stopping at a breakpoint so that when we resume
1130 // we don't stop on the same breakpoint that we
1131 // already stopped at just now
1132 cap->r.rCurrentTSO->flags |= TSO_STOPPED_ON_BREAKPOINT;
1133
1134 // stop this thread and return to the scheduler -
1135 // eventually we will come back and the IO action on
1136 // the top of the stack will be executed
1137 RETURN_TO_SCHEDULER_NO_PAUSE(ThreadRunGHC, ThreadYielding);
1138 }
1139 }
1140 // record that this thread is not stopped at a breakpoint anymore
1141 cap->r.rCurrentTSO->flags &= ~TSO_STOPPED_ON_BREAKPOINT;
1142
1143 // continue normal execution of the byte code instructions
1144 goto nextInsn;
1145 }
1146
1147 case bci_STKCHECK: {
1148 // Explicit stack check at the beginning of a function
1149 // *only* (stack checks in case alternatives are
1150 // propagated to the enclosing function).
1151 StgWord stk_words_reqd = BCO_GET_LARGE_ARG + 1;
1152 if (Sp_minusW(stk_words_reqd) < SpLim) {
1153 Sp_subW(2);
1154 SpW(1) = (W_)obj;
1155 SpW(0) = (W_)&stg_apply_interp_info;
1156 RETURN_TO_SCHEDULER(ThreadInterpret, StackOverflow);
1157 } else {
1158 goto nextInsn;
1159 }
1160 }
1161
1162 case bci_PUSH_L: {
1163 int o1 = BCO_NEXT;
1164 SpW(-1) = SpW(o1);
1165 Sp_subW(1);
1166 goto nextInsn;
1167 }
1168
1169 case bci_PUSH_LL: {
1170 int o1 = BCO_NEXT;
1171 int o2 = BCO_NEXT;
1172 SpW(-1) = SpW(o1);
1173 SpW(-2) = SpW(o2);
1174 Sp_subW(2);
1175 goto nextInsn;
1176 }
1177
1178 case bci_PUSH_LLL: {
1179 int o1 = BCO_NEXT;
1180 int o2 = BCO_NEXT;
1181 int o3 = BCO_NEXT;
1182 SpW(-1) = SpW(o1);
1183 SpW(-2) = SpW(o2);
1184 SpW(-3) = SpW(o3);
1185 Sp_subW(3);
1186 goto nextInsn;
1187 }
1188
1189 case bci_PUSH8: {
1190 int off = BCO_NEXT;
1191 Sp_subB(1);
1192 *(StgWord8*)Sp = *(StgWord8*)(Sp_plusB(off+1));
1193 goto nextInsn;
1194 }
1195
1196 case bci_PUSH16: {
1197 int off = BCO_NEXT;
1198 Sp_subB(2);
1199 *(StgWord16*)Sp = *(StgWord16*)(Sp_plusB(off+2));
1200 goto nextInsn;
1201 }
1202
1203 case bci_PUSH32: {
1204 int off = BCO_NEXT;
1205 Sp_subB(4);
1206 *(StgWord32*)Sp = *(StgWord32*)(Sp_plusB(off+4));
1207 goto nextInsn;
1208 }
1209
1210 case bci_PUSH8_W: {
1211 int off = BCO_NEXT;
1212 *(StgWord*)(Sp_minusW(1)) = *(StgWord8*)(Sp_plusB(off));
1213 Sp_subW(1);
1214 goto nextInsn;
1215 }
1216
1217 case bci_PUSH16_W: {
1218 int off = BCO_NEXT;
1219 *(StgWord*)(Sp_minusW(1)) = *(StgWord16*)(Sp_plusB(off));
1220 Sp_subW(1);
1221 goto nextInsn;
1222 }
1223
1224 case bci_PUSH32_W: {
1225 int off = BCO_NEXT;
1226 *(StgWord*)(Sp_minusW(1)) = *(StgWord32*)(Sp_plusB(off));
1227 Sp_subW(1);
1228 goto nextInsn;
1229 }
1230
1231 case bci_PUSH_G: {
1232 int o1 = BCO_GET_LARGE_ARG;
1233 SpW(-1) = BCO_PTR(o1);
1234 Sp_subW(1);
1235 goto nextInsn;
1236 }
1237
1238 case bci_PUSH_ALTS: {
1239 int o_bco = BCO_GET_LARGE_ARG;
1240 Sp_subW(2);
1241 SpW(1) = BCO_PTR(o_bco);
1242 SpW(0) = (W_)&stg_ctoi_R1p_info;
1243 #if defined(PROFILING)
1244 Sp_subW(2);
1245 SpW(1) = (W_)cap->r.rCCCS;
1246 SpW(0) = (W_)&stg_restore_cccs_info;
1247 #endif
1248 goto nextInsn;
1249 }
1250
1251 case bci_PUSH_ALTS_P: {
1252 int o_bco = BCO_GET_LARGE_ARG;
1253 SpW(-2) = (W_)&stg_ctoi_R1unpt_info;
1254 SpW(-1) = BCO_PTR(o_bco);
1255 Sp_subW(2);
1256 #if defined(PROFILING)
1257 Sp_subW(2);
1258 SpW(1) = (W_)cap->r.rCCCS;
1259 SpW(0) = (W_)&stg_restore_cccs_info;
1260 #endif
1261 goto nextInsn;
1262 }
1263
1264 case bci_PUSH_ALTS_N: {
1265 int o_bco = BCO_GET_LARGE_ARG;
1266 SpW(-2) = (W_)&stg_ctoi_R1n_info;
1267 SpW(-1) = BCO_PTR(o_bco);
1268 Sp_subW(2);
1269 #if defined(PROFILING)
1270 Sp_subW(2);
1271 SpW(1) = (W_)cap->r.rCCCS;
1272 SpW(0) = (W_)&stg_restore_cccs_info;
1273 #endif
1274 goto nextInsn;
1275 }
1276
1277 case bci_PUSH_ALTS_F: {
1278 int o_bco = BCO_GET_LARGE_ARG;
1279 SpW(-2) = (W_)&stg_ctoi_F1_info;
1280 SpW(-1) = BCO_PTR(o_bco);
1281 Sp_subW(2);
1282 #if defined(PROFILING)
1283 Sp_subW(2);
1284 SpW(1) = (W_)cap->r.rCCCS;
1285 SpW(0) = (W_)&stg_restore_cccs_info;
1286 #endif
1287 goto nextInsn;
1288 }
1289
1290 case bci_PUSH_ALTS_D: {
1291 int o_bco = BCO_GET_LARGE_ARG;
1292 SpW(-2) = (W_)&stg_ctoi_D1_info;
1293 SpW(-1) = BCO_PTR(o_bco);
1294 Sp_subW(2);
1295 #if defined(PROFILING)
1296 Sp_subW(2);
1297 SpW(1) = (W_)cap->r.rCCCS;
1298 SpW(0) = (W_)&stg_restore_cccs_info;
1299 #endif
1300 goto nextInsn;
1301 }
1302
1303 case bci_PUSH_ALTS_L: {
1304 int o_bco = BCO_GET_LARGE_ARG;
1305 SpW(-2) = (W_)&stg_ctoi_L1_info;
1306 SpW(-1) = BCO_PTR(o_bco);
1307 Sp_subW(2);
1308 #if defined(PROFILING)
1309 Sp_subW(2);
1310 SpW(1) = (W_)cap->r.rCCCS;
1311 SpW(0) = (W_)&stg_restore_cccs_info;
1312 #endif
1313 goto nextInsn;
1314 }
1315
1316 case bci_PUSH_ALTS_V: {
1317 int o_bco = BCO_GET_LARGE_ARG;
1318 SpW(-2) = (W_)&stg_ctoi_V_info;
1319 SpW(-1) = BCO_PTR(o_bco);
1320 Sp_subW(2);
1321 #if defined(PROFILING)
1322 Sp_subW(2);
1323 SpW(1) = (W_)cap->r.rCCCS;
1324 SpW(0) = (W_)&stg_restore_cccs_info;
1325 #endif
1326 goto nextInsn;
1327 }
1328
1329 case bci_PUSH_APPLY_N:
1330 Sp_subW(1); SpW(0) = (W_)&stg_ap_n_info;
1331 goto nextInsn;
1332 case bci_PUSH_APPLY_V:
1333 Sp_subW(1); SpW(0) = (W_)&stg_ap_v_info;
1334 goto nextInsn;
1335 case bci_PUSH_APPLY_F:
1336 Sp_subW(1); SpW(0) = (W_)&stg_ap_f_info;
1337 goto nextInsn;
1338 case bci_PUSH_APPLY_D:
1339 Sp_subW(1); SpW(0) = (W_)&stg_ap_d_info;
1340 goto nextInsn;
1341 case bci_PUSH_APPLY_L:
1342 Sp_subW(1); SpW(0) = (W_)&stg_ap_l_info;
1343 goto nextInsn;
1344 case bci_PUSH_APPLY_P:
1345 Sp_subW(1); SpW(0) = (W_)&stg_ap_p_info;
1346 goto nextInsn;
1347 case bci_PUSH_APPLY_PP:
1348 Sp_subW(1); SpW(0) = (W_)&stg_ap_pp_info;
1349 goto nextInsn;
1350 case bci_PUSH_APPLY_PPP:
1351 Sp_subW(1); SpW(0) = (W_)&stg_ap_ppp_info;
1352 goto nextInsn;
1353 case bci_PUSH_APPLY_PPPP:
1354 Sp_subW(1); SpW(0) = (W_)&stg_ap_pppp_info;
1355 goto nextInsn;
1356 case bci_PUSH_APPLY_PPPPP:
1357 Sp_subW(1); SpW(0) = (W_)&stg_ap_ppppp_info;
1358 goto nextInsn;
1359 case bci_PUSH_APPLY_PPPPPP:
1360 Sp_subW(1); SpW(0) = (W_)&stg_ap_pppppp_info;
1361 goto nextInsn;
1362
1363 case bci_PUSH_PAD8: {
1364 Sp_subB(1);
1365 *(StgWord8*)Sp = 0;
1366 goto nextInsn;
1367 }
1368
1369 case bci_PUSH_PAD16: {
1370 Sp_subB(2);
1371 *(StgWord16*)Sp = 0;
1372 goto nextInsn;
1373 }
1374
1375 case bci_PUSH_PAD32: {
1376 Sp_subB(4);
1377 *(StgWord32*)Sp = 0;
1378 goto nextInsn;
1379 }
1380
1381 case bci_PUSH_UBX8: {
1382 int o_lit = BCO_GET_LARGE_ARG;
1383 Sp_subB(1);
1384 *(StgWord8*)Sp = *(StgWord8*)(literals+o_lit);
1385 goto nextInsn;
1386 }
1387
1388 case bci_PUSH_UBX16: {
1389 int o_lit = BCO_GET_LARGE_ARG;
1390 Sp_subB(2);
1391 *(StgWord16*)Sp = *(StgWord16*)(literals+o_lit);
1392 goto nextInsn;
1393 }
1394
1395 case bci_PUSH_UBX32: {
1396 int o_lit = BCO_GET_LARGE_ARG;
1397 Sp_subB(4);
1398 *(StgWord32*)Sp = *(StgWord32*)(literals+o_lit);
1399 goto nextInsn;
1400 }
1401
1402 case bci_PUSH_UBX: {
1403 int i;
1404 int o_lits = BCO_GET_LARGE_ARG;
1405 int n_words = BCO_NEXT;
1406 Sp_subW(n_words);
1407 for (i = 0; i < n_words; i++) {
1408 SpW(i) = (W_)BCO_LIT(o_lits+i);
1409 }
1410 goto nextInsn;
1411 }
1412
1413 case bci_SLIDE: {
1414 int n = BCO_NEXT;
1415 int by = BCO_NEXT;
1416 /* a_1, .. a_n, b_1, .. b_by, s => a_1, .. a_n, s */
1417 while(--n >= 0) {
1418 SpW(n+by) = SpW(n);
1419 }
1420 Sp_addW(by);
1421 INTERP_TICK(it_slides);
1422 goto nextInsn;
1423 }
1424
1425 case bci_ALLOC_AP: {
1426 int n_payload = BCO_NEXT;
1427 StgAP *ap = (StgAP*)allocate(cap, AP_sizeW(n_payload));
1428 SpW(-1) = (W_)ap;
1429 ap->n_args = n_payload;
1430 ap->arity = 0;
1431 // No write barrier is needed here as this is a new allocation
1432 // visible only from our stack
1433 SET_HDR(ap, &stg_AP_info, cap->r.rCCCS)
1434 Sp_subW(1);
1435 goto nextInsn;
1436 }
1437
1438 case bci_ALLOC_AP_NOUPD: {
1439 int n_payload = BCO_NEXT;
1440 StgAP *ap = (StgAP*)allocate(cap, AP_sizeW(n_payload));
1441 SpW(-1) = (W_)ap;
1442 ap->n_args = n_payload;
1443 ap->arity = 0;
1444 // No write barrier is needed here as this is a new allocation
1445 // visible only from our stack
1446 SET_HDR(ap, &stg_AP_NOUPD_info, cap->r.rCCCS)
1447 Sp_subW(1);
1448 goto nextInsn;
1449 }
1450
1451 case bci_ALLOC_PAP: {
1452 StgPAP* pap;
1453 int arity = BCO_NEXT;
1454 int n_payload = BCO_NEXT;
1455 pap = (StgPAP*)allocate(cap, PAP_sizeW(n_payload));
1456 SpW(-1) = (W_)pap;
1457 pap->n_args = n_payload;
1458 pap->arity = arity;
1459 // No write barrier is needed here as this is a new allocation
1460 // visible only from our stack
1461 SET_HDR(pap, &stg_PAP_info, cap->r.rCCCS)
1462 Sp_subW(1);
1463 goto nextInsn;
1464 }
1465
1466 case bci_MKAP: {
1467 int i;
1468 int stkoff = BCO_NEXT;
1469 int n_payload = BCO_NEXT;
1470 StgAP* ap = (StgAP*)SpW(stkoff);
1471 ASSERT((int)ap->n_args == n_payload);
1472 ap->fun = (StgClosure*)SpW(0);
1473
1474 // The function should be a BCO, and its bitmap should
1475 // cover the payload of the AP correctly.
1476 ASSERT(get_itbl(ap->fun)->type == BCO
1477 && BCO_BITMAP_SIZE(ap->fun) == ap->n_args);
1478
1479 for (i = 0; i < n_payload; i++)
1480 ap->payload[i] = (StgClosure*)SpW(i+1);
1481 Sp_addW(n_payload+1);
1482 IF_DEBUG(interpreter,
1483 debugBelch("\tBuilt ");
1484 printObj((StgClosure*)ap);
1485 );
1486 goto nextInsn;
1487 }
1488
1489 case bci_MKPAP: {
1490 int i;
1491 int stkoff = BCO_NEXT;
1492 int n_payload = BCO_NEXT;
1493 StgPAP* pap = (StgPAP*)SpW(stkoff);
1494 ASSERT((int)pap->n_args == n_payload);
1495 pap->fun = (StgClosure*)SpW(0);
1496
1497 // The function should be a BCO
1498 if (get_itbl(pap->fun)->type != BCO) {
1499 #if defined(DEBUG)
1500 printClosure(pap->fun);
1501 #endif
1502 barf("bci_MKPAP");
1503 }
1504
1505 for (i = 0; i < n_payload; i++)
1506 pap->payload[i] = (StgClosure*)SpW(i+1);
1507 Sp_addW(n_payload+1);
1508 IF_DEBUG(interpreter,
1509 debugBelch("\tBuilt ");
1510 printObj((StgClosure*)pap);
1511 );
1512 goto nextInsn;
1513 }
1514
1515 case bci_UNPACK: {
1516 /* Unpack N ptr words from t.o.s constructor */
1517 int i;
1518 int n_words = BCO_NEXT;
1519 StgClosure* con = (StgClosure*)SpW(0);
1520 Sp_subW(n_words);
1521 for (i = 0; i < n_words; i++) {
1522 SpW(i) = (W_)con->payload[i];
1523 }
1524 goto nextInsn;
1525 }
1526
1527 case bci_PACK: {
1528 int i;
1529 int o_itbl = BCO_GET_LARGE_ARG;
1530 int n_words = BCO_NEXT;
1531 StgInfoTable* itbl = INFO_PTR_TO_STRUCT((StgInfoTable *)BCO_LIT(o_itbl));
1532 int request = CONSTR_sizeW( itbl->layout.payload.ptrs,
1533 itbl->layout.payload.nptrs );
1534 StgClosure* con = (StgClosure*)allocate_NONUPD(cap,request);
1535 ASSERT( itbl->layout.payload.ptrs + itbl->layout.payload.nptrs > 0);
1536 for (i = 0; i < n_words; i++) {
1537 con->payload[i] = (StgClosure*)SpW(i);
1538 }
1539 Sp_addW(n_words);
1540 Sp_subW(1);
1541 // No write barrier is needed here as this is a new allocation
1542 // visible only from our stack
1543 SET_HDR(con, (StgInfoTable*)BCO_LIT(o_itbl), cap->r.rCCCS);
1544 SpW(0) = (W_)con;
1545 IF_DEBUG(interpreter,
1546 debugBelch("\tBuilt ");
1547 printObj((StgClosure*)con);
1548 );
1549 goto nextInsn;
1550 }
1551
1552 case bci_TESTLT_P: {
1553 unsigned int discr = BCO_NEXT;
1554 int failto = BCO_GET_LARGE_ARG;
1555 StgClosure* con = (StgClosure*)SpW(0);
1556 if (GET_TAG(con) >= discr) {
1557 bciPtr = failto;
1558 }
1559 goto nextInsn;
1560 }
1561
1562 case bci_TESTEQ_P: {
1563 unsigned int discr = BCO_NEXT;
1564 int failto = BCO_GET_LARGE_ARG;
1565 StgClosure* con = (StgClosure*)SpW(0);
1566 if (GET_TAG(con) != discr) {
1567 bciPtr = failto;
1568 }
1569 goto nextInsn;
1570 }
1571
1572 case bci_TESTLT_I: {
1573 // There should be an Int at SpW(1), and an info table at SpW(0).
1574 int discr = BCO_GET_LARGE_ARG;
1575 int failto = BCO_GET_LARGE_ARG;
1576 I_ stackInt = (I_)SpW(1);
1577 if (stackInt >= (I_)BCO_LIT(discr))
1578 bciPtr = failto;
1579 goto nextInsn;
1580 }
1581
1582 case bci_TESTEQ_I: {
1583 // There should be an Int at SpW(1), and an info table at SpW(0).
1584 int discr = BCO_GET_LARGE_ARG;
1585 int failto = BCO_GET_LARGE_ARG;
1586 I_ stackInt = (I_)SpW(1);
1587 if (stackInt != (I_)BCO_LIT(discr)) {
1588 bciPtr = failto;
1589 }
1590 goto nextInsn;
1591 }
1592
1593 case bci_TESTLT_W: {
1594 // There should be an Int at SpW(1), and an info table at SpW(0).
1595 int discr = BCO_GET_LARGE_ARG;
1596 int failto = BCO_GET_LARGE_ARG;
1597 W_ stackWord = (W_)SpW(1);
1598 if (stackWord >= (W_)BCO_LIT(discr))
1599 bciPtr = failto;
1600 goto nextInsn;
1601 }
1602
1603 case bci_TESTEQ_W: {
1604 // There should be an Int at SpW(1), and an info table at SpW(0).
1605 int discr = BCO_GET_LARGE_ARG;
1606 int failto = BCO_GET_LARGE_ARG;
1607 W_ stackWord = (W_)SpW(1);
1608 if (stackWord != (W_)BCO_LIT(discr)) {
1609 bciPtr = failto;
1610 }
1611 goto nextInsn;
1612 }
1613
1614 case bci_TESTLT_D: {
1615 // There should be a Double at SpW(1), and an info table at SpW(0).
1616 int discr = BCO_GET_LARGE_ARG;
1617 int failto = BCO_GET_LARGE_ARG;
1618 StgDouble stackDbl, discrDbl;
1619 stackDbl = PK_DBL( & SpW(1) );
1620 discrDbl = PK_DBL( & BCO_LIT(discr) );
1621 if (stackDbl >= discrDbl) {
1622 bciPtr = failto;
1623 }
1624 goto nextInsn;
1625 }
1626
1627 case bci_TESTEQ_D: {
1628 // There should be a Double at SpW(1), and an info table at SpW(0).
1629 int discr = BCO_GET_LARGE_ARG;
1630 int failto = BCO_GET_LARGE_ARG;
1631 StgDouble stackDbl, discrDbl;
1632 stackDbl = PK_DBL( & SpW(1) );
1633 discrDbl = PK_DBL( & BCO_LIT(discr) );
1634 if (stackDbl != discrDbl) {
1635 bciPtr = failto;
1636 }
1637 goto nextInsn;
1638 }
1639
1640 case bci_TESTLT_F: {
1641 // There should be a Float at SpW(1), and an info table at SpW(0).
1642 int discr = BCO_GET_LARGE_ARG;
1643 int failto = BCO_GET_LARGE_ARG;
1644 StgFloat stackFlt, discrFlt;
1645 stackFlt = PK_FLT( & SpW(1) );
1646 discrFlt = PK_FLT( & BCO_LIT(discr) );
1647 if (stackFlt >= discrFlt) {
1648 bciPtr = failto;
1649 }
1650 goto nextInsn;
1651 }
1652
1653 case bci_TESTEQ_F: {
1654 // There should be a Float at SpW(1), and an info table at SpW(0).
1655 int discr = BCO_GET_LARGE_ARG;
1656 int failto = BCO_GET_LARGE_ARG;
1657 StgFloat stackFlt, discrFlt;
1658 stackFlt = PK_FLT( & SpW(1) );
1659 discrFlt = PK_FLT( & BCO_LIT(discr) );
1660 if (stackFlt != discrFlt) {
1661 bciPtr = failto;
1662 }
1663 goto nextInsn;
1664 }
1665
1666 // Control-flow ish things
1667 case bci_ENTER:
1668 // Context-switch check. We put it here to ensure that
1669 // the interpreter has done at least *some* work before
1670 // context switching: sometimes the scheduler can invoke
1671 // the interpreter with context_switch == 1, particularly
1672 // if the -C0 flag has been given on the cmd line.
1673 if (cap->r.rHpLim == NULL) {
1674 Sp_subW(1); SpW(0) = (W_)&stg_enter_info;
1675 RETURN_TO_SCHEDULER(ThreadInterpret, ThreadYielding);
1676 }
1677 goto eval;
1678
1679 case bci_RETURN:
1680 tagged_obj = (StgClosure *)SpW(0);
1681 Sp_addW(1);
1682 goto do_return;
1683
1684 case bci_RETURN_P:
1685 Sp_subW(1);
1686 SpW(0) = (W_)&stg_ret_p_info;
1687 goto do_return_unboxed;
1688 case bci_RETURN_N:
1689 Sp_subW(1);
1690 SpW(0) = (W_)&stg_ret_n_info;
1691 goto do_return_unboxed;
1692 case bci_RETURN_F:
1693 Sp_subW(1);
1694 SpW(0) = (W_)&stg_ret_f_info;
1695 goto do_return_unboxed;
1696 case bci_RETURN_D:
1697 Sp_subW(1);
1698 SpW(0) = (W_)&stg_ret_d_info;
1699 goto do_return_unboxed;
1700 case bci_RETURN_L:
1701 Sp_subW(1);
1702 SpW(0) = (W_)&stg_ret_l_info;
1703 goto do_return_unboxed;
1704 case bci_RETURN_V:
1705 Sp_subW(1);
1706 SpW(0) = (W_)&stg_ret_v_info;
1707 goto do_return_unboxed;
1708
1709 case bci_SWIZZLE: {
1710 int stkoff = BCO_NEXT;
1711 signed short n = (signed short)(BCO_NEXT);
1712 SpW(stkoff) += (W_)n;
1713 goto nextInsn;
1714 }
1715
1716 case bci_CCALL: {
1717 void *tok;
1718 int stk_offset = BCO_NEXT;
1719 int o_itbl = BCO_GET_LARGE_ARG;
1720 int flags = BCO_NEXT;
1721 bool interruptible = flags & 0x1;
1722 bool unsafe_call = flags & 0x2;
1723 void(*marshall_fn)(void*) = (void (*)(void*))BCO_LIT(o_itbl);
1724
1725 /* the stack looks like this:
1726
1727 | | <- Sp + stk_offset
1728 +-------------+
1729 | |
1730 | args |
1731 | | <- Sp + ret_size + 1
1732 +-------------+
1733 | C fun | <- Sp + ret_size
1734 +-------------+
1735 | ret | <- Sp
1736 +-------------+
1737
1738 ret is a placeholder for the return address, and may be
1739 up to 2 words.
1740
1741 We need to copy the args out of the TSO, because when
1742 we call suspendThread() we no longer own the TSO stack,
1743 and it may move at any time - indeed suspendThread()
1744 itself may do stack squeezing and move our args.
1745 So we make a copy of the argument block.
1746 */
1747
1748 #define ROUND_UP_WDS(p) ((((StgWord)(p)) + sizeof(W_)-1)/sizeof(W_))
1749
1750 ffi_cif *cif = (ffi_cif *)marshall_fn;
1751 uint32_t nargs = cif->nargs;
1752 uint32_t ret_size;
1753 uint32_t i;
1754 int j;
1755 StgPtr p;
1756 W_ ret[2]; // max needed
1757 W_ *arguments[stk_offset]; // max needed
1758 void *argptrs[nargs];
1759 void (*fn)(void);
1760
1761 if (cif->rtype->type == FFI_TYPE_VOID) {
1762 // necessary because cif->rtype->size == 1 for void,
1763 // but the bytecode generator has not pushed a
1764 // placeholder in this case.
1765 ret_size = 0;
1766 } else {
1767 ret_size = ROUND_UP_WDS(cif->rtype->size);
1768 }
1769
1770 memcpy(arguments, Sp_plusW(ret_size+1),
1771 sizeof(W_) * (stk_offset-1-ret_size));
1772
1773 // libffi expects the args as an array of pointers to
1774 // values, so we have to construct this array before making
1775 // the call.
1776 p = (StgPtr)arguments;
1777 for (i = 0; i < nargs; i++) {
1778 argptrs[i] = (void *)p;
1779 // get the size from the cif
1780 p += ROUND_UP_WDS(cif->arg_types[i]->size);
1781 }
1782
1783 // this is the function we're going to call
1784 fn = (void(*)(void))SpW(ret_size);
1785
1786 // Restore the Haskell thread's current value of errno
1787 errno = cap->r.rCurrentTSO->saved_errno;
1788
1789 // There are a bunch of non-ptr words on the stack (the
1790 // ccall args, the ccall fun address and space for the
1791 // result), which we need to cover with an info table
1792 // since we might GC during this call.
1793 //
1794 // We know how many (non-ptr) words there are before the
1795 // next valid stack frame: it is the stk_offset arg to the
1796 // CCALL instruction. So we overwrite this area of the
1797 // stack with empty stack frames (stg_ret_v_info);
1798 //
1799 for (j = 0; j < stk_offset; j++) {
1800 SpW(j) = (W_)&stg_ret_v_info; /* an empty stack frame */
1801 }
1802
1803 // save obj (pointer to the current BCO), since this
1804 // might move during the call. We push an stg_ret_p frame
1805 // for this.
1806 Sp_subW(2);
1807 SpW(1) = (W_)obj;
1808 SpW(0) = (W_)&stg_ret_p_info;
1809
1810 if (!unsafe_call) {
1811 SAVE_THREAD_STATE();
1812 tok = suspendThread(&cap->r, interruptible);
1813 }
1814
1815 // We already made a copy of the arguments above.
1816 ffi_call(cif, fn, ret, argptrs);
1817
1818 // And restart the thread again, popping the stg_ret_p frame.
1819 if (!unsafe_call) {
1820 cap = (Capability *)((void *)((unsigned char*)resumeThread(tok) - STG_FIELD_OFFSET(Capability,r)));
1821 LOAD_THREAD_STATE();
1822 }
1823
1824 if (SpW(0) != (W_)&stg_ret_p_info) {
1825 // the stack is not how we left it. This probably
1826 // means that an exception got raised on exit from the
1827 // foreign call, so we should just continue with
1828 // whatever is on top of the stack now.
1829 RETURN_TO_SCHEDULER_NO_PAUSE(ThreadRunGHC, ThreadYielding);
1830 }
1831
1832 // Re-load the pointer to the BCO from the stg_ret_p frame,
1833 // it might have moved during the call. Also reload the
1834 // pointers to the components of the BCO.
1835 obj = (StgClosure*)SpW(1);
1836 bco = (StgBCO*)obj;
1837 instrs = (StgWord16*)(bco->instrs->payload);
1838 literals = (StgWord*)(&bco->literals->payload[0]);
1839 ptrs = (StgPtr*)(&bco->ptrs->payload[0]);
1840
1841 Sp_addW(2); // pop the stg_ret_p frame
1842
1843 // Save the Haskell thread's current value of errno
1844 cap->r.rCurrentTSO->saved_errno = errno;
1845
1846 // Copy the return value back to the TSO stack. It is at
1847 // most 2 words large, and resides at arguments[0].
1848 memcpy(Sp, ret, sizeof(W_) * stg_min(stk_offset,ret_size));
1849
1850 goto nextInsn;
1851 }
1852
1853 case bci_JMP: {
1854 /* BCO_NEXT modifies bciPtr, so be conservative. */
1855 int nextpc = BCO_GET_LARGE_ARG;
1856 bciPtr = nextpc;
1857 goto nextInsn;
1858 }
1859
1860 case bci_CASEFAIL:
1861 barf("interpretBCO: hit a CASEFAIL");
1862
1863 // Errors
1864 default:
1865 barf("interpretBCO: unknown or unimplemented opcode %d",
1866 (int)(bci & 0xFF));
1867
1868 } /* switch on opcode */
1869 }
1870 }
1871
1872 barf("interpretBCO: fell off end of the interpreter");
1873 }