0ca8ddf62378ecfb2ea4b442d5b624a50ffbf43f
[ghc.git] / rts / Interpreter.c
1 /* -----------------------------------------------------------------------------
2 * Bytecode interpreter
3 *
4 * Copyright (c) The GHC Team, 1994-2002.
5 * ---------------------------------------------------------------------------*/
6
7 #include "PosixSource.h"
8 #include "Rts.h"
9 #include "RtsAPI.h"
10 #include "RtsUtils.h"
11 #include "Closures.h"
12 #include "TSO.h"
13 #include "Schedule.h"
14 #include "RtsFlags.h"
15 #include "LdvProfile.h"
16 #include "Updates.h"
17 #include "Sanity.h"
18 #include "Liveness.h"
19 #include "Prelude.h"
20
21 #include "Bytecodes.h"
22 #include "Printer.h"
23 #include "Disassembler.h"
24 #include "Interpreter.h"
25
26 #include <string.h> /* for memcpy */
27 #ifdef HAVE_ERRNO_H
28 #include <errno.h>
29 #endif
30
31
32 /* --------------------------------------------------------------------------
33 * The bytecode interpreter
34 * ------------------------------------------------------------------------*/
35
36 /* Gather stats about entry, opcode, opcode-pair frequencies. For
37 tuning the interpreter. */
38
39 /* #define INTERP_STATS */
40
41
42 /* Sp points to the lowest live word on the stack. */
43
44 #define BCO_NEXT instrs[bciPtr++]
45 #define BCO_NEXT_32 (bciPtr += 2, (((StgWord) instrs[bciPtr-2]) << 16) + ((StgWord) instrs[bciPtr-1]))
46 #define BCO_NEXT_64 (bciPtr += 4, (((StgWord) instrs[bciPtr-4]) << 48) + (((StgWord) instrs[bciPtr-3]) << 32) + (((StgWord) instrs[bciPtr-2]) << 16) + ((StgWord) instrs[bciPtr-1]))
47 #if WORD_SIZE_IN_BITS == 32
48 #define BCO_NEXT_WORD BCO_NEXT_32
49 #elif WORD_SIZE_IN_BITS == 64
50 #define BCO_NEXT_WORD BCO_NEXT_64
51 #else
52 #error Cannot cope with WORD_SIZE_IN_BITS being nether 32 nor 64
53 #endif
54 #define BCO_GET_LARGE_ARG ((bci & bci_FLAG_LARGE_ARGS) ? BCO_NEXT_WORD : BCO_NEXT)
55
56 #define BCO_PTR(n) (W_)ptrs[n]
57 #define BCO_LIT(n) literals[n]
58
59 #define LOAD_STACK_POINTERS \
60 Sp = cap->r.rCurrentTSO->sp; \
61 /* We don't change this ... */ \
62 SpLim = cap->r.rCurrentTSO->stack + RESERVED_STACK_WORDS;
63
64 #define SAVE_STACK_POINTERS \
65 cap->r.rCurrentTSO->sp = Sp
66
67 #define RETURN_TO_SCHEDULER(todo,retcode) \
68 SAVE_STACK_POINTERS; \
69 cap->r.rCurrentTSO->what_next = (todo); \
70 threadPaused(cap,cap->r.rCurrentTSO); \
71 cap->r.rRet = (retcode); \
72 return cap;
73
74 #define RETURN_TO_SCHEDULER_NO_PAUSE(todo,retcode) \
75 SAVE_STACK_POINTERS; \
76 cap->r.rCurrentTSO->what_next = (todo); \
77 cap->r.rRet = (retcode); \
78 return cap;
79
80
81 STATIC_INLINE StgPtr
82 allocate_NONUPD (int n_words)
83 {
84 return allocate(stg_max(sizeofW(StgHeader)+MIN_PAYLOAD_SIZE, n_words));
85 }
86
87 int rts_stop_next_breakpoint = 0;
88 int rts_stop_on_exception = 0;
89
90 #ifdef INTERP_STATS
91
92 /* Hacky stats, for tuning the interpreter ... */
93 int it_unknown_entries[N_CLOSURE_TYPES];
94 int it_total_unknown_entries;
95 int it_total_entries;
96
97 int it_retto_BCO;
98 int it_retto_UPDATE;
99 int it_retto_other;
100
101 int it_slides;
102 int it_insns;
103 int it_BCO_entries;
104
105 int it_ofreq[27];
106 int it_oofreq[27][27];
107 int it_lastopc;
108
109
110 #define INTERP_TICK(n) (n)++
111
112 void interp_startup ( void )
113 {
114 int i, j;
115 it_retto_BCO = it_retto_UPDATE = it_retto_other = 0;
116 it_total_entries = it_total_unknown_entries = 0;
117 for (i = 0; i < N_CLOSURE_TYPES; i++)
118 it_unknown_entries[i] = 0;
119 it_slides = it_insns = it_BCO_entries = 0;
120 for (i = 0; i < 27; i++) it_ofreq[i] = 0;
121 for (i = 0; i < 27; i++)
122 for (j = 0; j < 27; j++)
123 it_oofreq[i][j] = 0;
124 it_lastopc = 0;
125 }
126
127 void interp_shutdown ( void )
128 {
129 int i, j, k, o_max, i_max, j_max;
130 debugBelch("%d constrs entered -> (%d BCO, %d UPD, %d ??? )\n",
131 it_retto_BCO + it_retto_UPDATE + it_retto_other,
132 it_retto_BCO, it_retto_UPDATE, it_retto_other );
133 debugBelch("%d total entries, %d unknown entries \n",
134 it_total_entries, it_total_unknown_entries);
135 for (i = 0; i < N_CLOSURE_TYPES; i++) {
136 if (it_unknown_entries[i] == 0) continue;
137 debugBelch(" type %2d: unknown entries (%4.1f%%) == %d\n",
138 i, 100.0 * ((double)it_unknown_entries[i]) /
139 ((double)it_total_unknown_entries),
140 it_unknown_entries[i]);
141 }
142 debugBelch("%d insns, %d slides, %d BCO_entries\n",
143 it_insns, it_slides, it_BCO_entries);
144 for (i = 0; i < 27; i++)
145 debugBelch("opcode %2d got %d\n", i, it_ofreq[i] );
146
147 for (k = 1; k < 20; k++) {
148 o_max = 0;
149 i_max = j_max = 0;
150 for (i = 0; i < 27; i++) {
151 for (j = 0; j < 27; j++) {
152 if (it_oofreq[i][j] > o_max) {
153 o_max = it_oofreq[i][j];
154 i_max = i; j_max = j;
155 }
156 }
157 }
158
159 debugBelch("%d: count (%4.1f%%) %6d is %d then %d\n",
160 k, ((double)o_max) * 100.0 / ((double)it_insns), o_max,
161 i_max, j_max );
162 it_oofreq[i_max][j_max] = 0;
163
164 }
165 }
166
167 #else // !INTERP_STATS
168
169 #define INTERP_TICK(n) /* nothing */
170
171 #endif
172
173 static StgWord app_ptrs_itbl[] = {
174 (W_)&stg_ap_p_info,
175 (W_)&stg_ap_pp_info,
176 (W_)&stg_ap_ppp_info,
177 (W_)&stg_ap_pppp_info,
178 (W_)&stg_ap_ppppp_info,
179 (W_)&stg_ap_pppppp_info,
180 };
181
182 HsStablePtr rts_breakpoint_io_action; // points to the IO action which is executed on a breakpoint
183 // it is set in main/GHC.hs:runStmt
184
185 Capability *
186 interpretBCO (Capability* cap)
187 {
188 // Use of register here is primarily to make it clear to compilers
189 // that these entities are non-aliasable.
190 register StgPtr Sp; // local state -- stack pointer
191 register StgPtr SpLim; // local state -- stack lim pointer
192 register StgClosure *tagged_obj = 0, *obj;
193 nat n, m;
194
195 LOAD_STACK_POINTERS;
196
197 // ------------------------------------------------------------------------
198 // Case 1:
199 //
200 // We have a closure to evaluate. Stack looks like:
201 //
202 // | XXXX_info |
203 // +---------------+
204 // Sp | -------------------> closure
205 // +---------------+
206 //
207 if (Sp[0] == (W_)&stg_enter_info) {
208 Sp++;
209 goto eval;
210 }
211
212 // ------------------------------------------------------------------------
213 // Case 2:
214 //
215 // We have a BCO application to perform. Stack looks like:
216 //
217 // | .... |
218 // +---------------+
219 // | arg1 |
220 // +---------------+
221 // | BCO |
222 // +---------------+
223 // Sp | RET_BCO |
224 // +---------------+
225 //
226 else if (Sp[0] == (W_)&stg_apply_interp_info) {
227 obj = UNTAG_CLOSURE((StgClosure *)Sp[1]);
228 Sp += 2;
229 goto run_BCO_fun;
230 }
231
232 // ------------------------------------------------------------------------
233 // Case 3:
234 //
235 // We have an unboxed value to return. See comment before
236 // do_return_unboxed, below.
237 //
238 else {
239 goto do_return_unboxed;
240 }
241
242 // Evaluate the object on top of the stack.
243 eval:
244 tagged_obj = (StgClosure*)Sp[0]; Sp++;
245
246 eval_obj:
247 obj = UNTAG_CLOSURE(tagged_obj);
248 INTERP_TICK(it_total_evals);
249
250 IF_DEBUG(interpreter,
251 debugBelch(
252 "\n---------------------------------------------------------------\n");
253 debugBelch("Evaluating: "); printObj(obj);
254 debugBelch("Sp = %p\n", Sp);
255 debugBelch("\n" );
256
257 printStackChunk(Sp,cap->r.rCurrentTSO->stack+cap->r.rCurrentTSO->stack_size);
258 debugBelch("\n\n");
259 );
260
261 IF_DEBUG(sanity,checkStackChunk(Sp, cap->r.rCurrentTSO->stack+cap->r.rCurrentTSO->stack_size));
262
263 switch ( get_itbl(obj)->type ) {
264
265 case IND:
266 case IND_OLDGEN:
267 case IND_PERM:
268 case IND_OLDGEN_PERM:
269 case IND_STATIC:
270 {
271 tagged_obj = ((StgInd*)obj)->indirectee;
272 goto eval_obj;
273 }
274
275 case CONSTR:
276 case CONSTR_1_0:
277 case CONSTR_0_1:
278 case CONSTR_2_0:
279 case CONSTR_1_1:
280 case CONSTR_0_2:
281 case CONSTR_STATIC:
282 case CONSTR_NOCAF_STATIC:
283 case FUN:
284 case FUN_1_0:
285 case FUN_0_1:
286 case FUN_2_0:
287 case FUN_1_1:
288 case FUN_0_2:
289 case FUN_STATIC:
290 case PAP:
291 // already in WHNF
292 break;
293
294 case BCO:
295 {
296 ASSERT(((StgBCO *)obj)->arity > 0);
297 break;
298 }
299
300 case AP: /* Copied from stg_AP_entry. */
301 {
302 nat i, words;
303 StgAP *ap;
304
305 ap = (StgAP*)obj;
306 words = ap->n_args;
307
308 // Stack check
309 if (Sp - (words+sizeofW(StgUpdateFrame)) < SpLim) {
310 Sp -= 2;
311 Sp[1] = (W_)tagged_obj;
312 Sp[0] = (W_)&stg_enter_info;
313 RETURN_TO_SCHEDULER(ThreadInterpret, StackOverflow);
314 }
315
316 /* Ok; we're safe. Party on. Push an update frame. */
317 Sp -= sizeofW(StgUpdateFrame);
318 {
319 StgUpdateFrame *__frame;
320 __frame = (StgUpdateFrame *)Sp;
321 SET_INFO(__frame, (StgInfoTable *)&stg_upd_frame_info);
322 __frame->updatee = (StgClosure *)(ap);
323 }
324
325 /* Reload the stack */
326 Sp -= words;
327 for (i=0; i < words; i++) {
328 Sp[i] = (W_)ap->payload[i];
329 }
330
331 obj = UNTAG_CLOSURE((StgClosure*)ap->fun);
332 ASSERT(get_itbl(obj)->type == BCO);
333 goto run_BCO_fun;
334 }
335
336 default:
337 #ifdef INTERP_STATS
338 {
339 int j;
340
341 j = get_itbl(obj)->type;
342 ASSERT(j >= 0 && j < N_CLOSURE_TYPES);
343 it_unknown_entries[j]++;
344 it_total_unknown_entries++;
345 }
346 #endif
347 {
348 // Can't handle this object; yield to scheduler
349 IF_DEBUG(interpreter,
350 debugBelch("evaluating unknown closure -- yielding to sched\n");
351 printObj(obj);
352 );
353 Sp -= 2;
354 Sp[1] = (W_)tagged_obj;
355 Sp[0] = (W_)&stg_enter_info;
356 RETURN_TO_SCHEDULER_NO_PAUSE(ThreadRunGHC, ThreadYielding);
357 }
358 }
359
360 // ------------------------------------------------------------------------
361 // We now have an evaluated object (tagged_obj). The next thing to
362 // do is return it to the stack frame on top of the stack.
363 do_return:
364 obj = UNTAG_CLOSURE(tagged_obj);
365 ASSERT(closure_HNF(obj));
366
367 IF_DEBUG(interpreter,
368 debugBelch(
369 "\n---------------------------------------------------------------\n");
370 debugBelch("Returning: "); printObj(obj);
371 debugBelch("Sp = %p\n", Sp);
372 debugBelch("\n" );
373 printStackChunk(Sp,cap->r.rCurrentTSO->stack+cap->r.rCurrentTSO->stack_size);
374 debugBelch("\n\n");
375 );
376
377 IF_DEBUG(sanity,checkStackChunk(Sp, cap->r.rCurrentTSO->stack+cap->r.rCurrentTSO->stack_size));
378
379 switch (get_itbl((StgClosure *)Sp)->type) {
380
381 case RET_SMALL: {
382 const StgInfoTable *info;
383
384 // NOTE: not using get_itbl().
385 info = ((StgClosure *)Sp)->header.info;
386 if (info == (StgInfoTable *)&stg_ap_v_info) {
387 n = 1; m = 0; goto do_apply;
388 }
389 if (info == (StgInfoTable *)&stg_ap_f_info) {
390 n = 1; m = 1; goto do_apply;
391 }
392 if (info == (StgInfoTable *)&stg_ap_d_info) {
393 n = 1; m = sizeofW(StgDouble); goto do_apply;
394 }
395 if (info == (StgInfoTable *)&stg_ap_l_info) {
396 n = 1; m = sizeofW(StgInt64); goto do_apply;
397 }
398 if (info == (StgInfoTable *)&stg_ap_n_info) {
399 n = 1; m = 1; goto do_apply;
400 }
401 if (info == (StgInfoTable *)&stg_ap_p_info) {
402 n = 1; m = 1; goto do_apply;
403 }
404 if (info == (StgInfoTable *)&stg_ap_pp_info) {
405 n = 2; m = 2; goto do_apply;
406 }
407 if (info == (StgInfoTable *)&stg_ap_ppp_info) {
408 n = 3; m = 3; goto do_apply;
409 }
410 if (info == (StgInfoTable *)&stg_ap_pppp_info) {
411 n = 4; m = 4; goto do_apply;
412 }
413 if (info == (StgInfoTable *)&stg_ap_ppppp_info) {
414 n = 5; m = 5; goto do_apply;
415 }
416 if (info == (StgInfoTable *)&stg_ap_pppppp_info) {
417 n = 6; m = 6; goto do_apply;
418 }
419 goto do_return_unrecognised;
420 }
421
422 case UPDATE_FRAME:
423 // Returning to an update frame: do the update, pop the update
424 // frame, and continue with the next stack frame.
425 //
426 // NB. we must update with the *tagged* pointer. Some tags
427 // are not optional, and if we omit the tag bits when updating
428 // then bad things can happen (albeit very rarely). See #1925.
429 // What happened was an indirection was created with an
430 // untagged pointer, and this untagged pointer was propagated
431 // to a PAP by the GC, violating the invariant that PAPs
432 // always contain a tagged pointer to the function.
433 INTERP_TICK(it_retto_UPDATE);
434 UPD_IND(((StgUpdateFrame *)Sp)->updatee, tagged_obj);
435 Sp += sizeofW(StgUpdateFrame);
436 goto do_return;
437
438 case RET_BCO:
439 // Returning to an interpreted continuation: put the object on
440 // the stack, and start executing the BCO.
441 INTERP_TICK(it_retto_BCO);
442 Sp--;
443 Sp[0] = (W_)obj;
444 // NB. return the untagged object; the bytecode expects it to
445 // be untagged. XXX this doesn't seem right.
446 obj = (StgClosure*)Sp[2];
447 ASSERT(get_itbl(obj)->type == BCO);
448 goto run_BCO_return;
449
450 default:
451 do_return_unrecognised:
452 {
453 // Can't handle this return address; yield to scheduler
454 INTERP_TICK(it_retto_other);
455 IF_DEBUG(interpreter,
456 debugBelch("returning to unknown frame -- yielding to sched\n");
457 printStackChunk(Sp,cap->r.rCurrentTSO->stack+cap->r.rCurrentTSO->stack_size);
458 );
459 Sp -= 2;
460 Sp[1] = (W_)tagged_obj;
461 Sp[0] = (W_)&stg_enter_info;
462 RETURN_TO_SCHEDULER_NO_PAUSE(ThreadRunGHC, ThreadYielding);
463 }
464 }
465
466 // -------------------------------------------------------------------------
467 // Returning an unboxed value. The stack looks like this:
468 //
469 // | .... |
470 // +---------------+
471 // | fv2 |
472 // +---------------+
473 // | fv1 |
474 // +---------------+
475 // | BCO |
476 // +---------------+
477 // | stg_ctoi_ret_ |
478 // +---------------+
479 // | retval |
480 // +---------------+
481 // | XXXX_info |
482 // +---------------+
483 //
484 // where XXXX_info is one of the stg_gc_unbx_r1_info family.
485 //
486 // We're only interested in the case when the real return address
487 // is a BCO; otherwise we'll return to the scheduler.
488
489 do_return_unboxed:
490 {
491 int offset;
492
493 ASSERT( Sp[0] == (W_)&stg_gc_unbx_r1_info
494 || Sp[0] == (W_)&stg_gc_unpt_r1_info
495 || Sp[0] == (W_)&stg_gc_f1_info
496 || Sp[0] == (W_)&stg_gc_d1_info
497 || Sp[0] == (W_)&stg_gc_l1_info
498 || Sp[0] == (W_)&stg_gc_void_info // VoidRep
499 );
500
501 // get the offset of the stg_ctoi_ret_XXX itbl
502 offset = stack_frame_sizeW((StgClosure *)Sp);
503
504 switch (get_itbl((StgClosure *)Sp+offset)->type) {
505
506 case RET_BCO:
507 // Returning to an interpreted continuation: put the object on
508 // the stack, and start executing the BCO.
509 INTERP_TICK(it_retto_BCO);
510 obj = (StgClosure*)Sp[offset+1];
511 ASSERT(get_itbl(obj)->type == BCO);
512 goto run_BCO_return_unboxed;
513
514 default:
515 {
516 // Can't handle this return address; yield to scheduler
517 INTERP_TICK(it_retto_other);
518 IF_DEBUG(interpreter,
519 debugBelch("returning to unknown frame -- yielding to sched\n");
520 printStackChunk(Sp,cap->r.rCurrentTSO->stack+cap->r.rCurrentTSO->stack_size);
521 );
522 RETURN_TO_SCHEDULER_NO_PAUSE(ThreadRunGHC, ThreadYielding);
523 }
524 }
525 }
526 // not reached.
527
528
529 // -------------------------------------------------------------------------
530 // Application...
531
532 do_apply:
533 ASSERT(obj == UNTAG_CLOSURE(tagged_obj));
534 // we have a function to apply (obj), and n arguments taking up m
535 // words on the stack. The info table (stg_ap_pp_info or whatever)
536 // is on top of the arguments on the stack.
537 {
538 switch (get_itbl(obj)->type) {
539
540 case PAP: {
541 StgPAP *pap;
542 nat i, arity;
543
544 pap = (StgPAP *)obj;
545
546 // we only cope with PAPs whose function is a BCO
547 if (get_itbl(UNTAG_CLOSURE(pap->fun))->type != BCO) {
548 goto defer_apply_to_sched;
549 }
550
551 Sp++;
552 arity = pap->arity;
553 ASSERT(arity > 0);
554 if (arity < n) {
555 // n must be greater than 1, and the only kinds of
556 // application we support with more than one argument
557 // are all pointers...
558 //
559 // Shuffle the args for this function down, and put
560 // the appropriate info table in the gap.
561 for (i = 0; i < arity; i++) {
562 Sp[(int)i-1] = Sp[i];
563 // ^^^^^ careful, i-1 might be negative, but i in unsigned
564 }
565 Sp[arity-1] = app_ptrs_itbl[n-arity-1];
566 Sp--;
567 // unpack the PAP's arguments onto the stack
568 Sp -= pap->n_args;
569 for (i = 0; i < pap->n_args; i++) {
570 Sp[i] = (W_)pap->payload[i];
571 }
572 obj = UNTAG_CLOSURE(pap->fun);
573 goto run_BCO_fun;
574 }
575 else if (arity == n) {
576 Sp -= pap->n_args;
577 for (i = 0; i < pap->n_args; i++) {
578 Sp[i] = (W_)pap->payload[i];
579 }
580 obj = UNTAG_CLOSURE(pap->fun);
581 goto run_BCO_fun;
582 }
583 else /* arity > n */ {
584 // build a new PAP and return it.
585 StgPAP *new_pap;
586 new_pap = (StgPAP *)allocate(PAP_sizeW(pap->n_args + m));
587 SET_HDR(new_pap,&stg_PAP_info,CCCS);
588 new_pap->arity = pap->arity - n;
589 new_pap->n_args = pap->n_args + m;
590 new_pap->fun = pap->fun;
591 for (i = 0; i < pap->n_args; i++) {
592 new_pap->payload[i] = pap->payload[i];
593 }
594 for (i = 0; i < m; i++) {
595 new_pap->payload[pap->n_args + i] = (StgClosure *)Sp[i];
596 }
597 tagged_obj = (StgClosure *)new_pap;
598 Sp += m;
599 goto do_return;
600 }
601 }
602
603 case BCO: {
604 nat arity, i;
605
606 Sp++;
607 arity = ((StgBCO *)obj)->arity;
608 ASSERT(arity > 0);
609 if (arity < n) {
610 // n must be greater than 1, and the only kinds of
611 // application we support with more than one argument
612 // are all pointers...
613 //
614 // Shuffle the args for this function down, and put
615 // the appropriate info table in the gap.
616 for (i = 0; i < arity; i++) {
617 Sp[(int)i-1] = Sp[i];
618 // ^^^^^ careful, i-1 might be negative, but i in unsigned
619 }
620 Sp[arity-1] = app_ptrs_itbl[n-arity-1];
621 Sp--;
622 goto run_BCO_fun;
623 }
624 else if (arity == n) {
625 goto run_BCO_fun;
626 }
627 else /* arity > n */ {
628 // build a PAP and return it.
629 StgPAP *pap;
630 nat i;
631 pap = (StgPAP *)allocate(PAP_sizeW(m));
632 SET_HDR(pap, &stg_PAP_info,CCCS);
633 pap->arity = arity - n;
634 pap->fun = obj;
635 pap->n_args = m;
636 for (i = 0; i < m; i++) {
637 pap->payload[i] = (StgClosure *)Sp[i];
638 }
639 tagged_obj = (StgClosure *)pap;
640 Sp += m;
641 goto do_return;
642 }
643 }
644
645 // No point in us applying machine-code functions
646 default:
647 defer_apply_to_sched:
648 Sp -= 2;
649 Sp[1] = (W_)tagged_obj;
650 Sp[0] = (W_)&stg_enter_info;
651 RETURN_TO_SCHEDULER_NO_PAUSE(ThreadRunGHC, ThreadYielding);
652 }
653
654 // ------------------------------------------------------------------------
655 // Ok, we now have a bco (obj), and its arguments are all on the
656 // stack. We can start executing the byte codes.
657 //
658 // The stack is in one of two states. First, if this BCO is a
659 // function:
660 //
661 // | .... |
662 // +---------------+
663 // | arg2 |
664 // +---------------+
665 // | arg1 |
666 // +---------------+
667 //
668 // Second, if this BCO is a continuation:
669 //
670 // | .... |
671 // +---------------+
672 // | fv2 |
673 // +---------------+
674 // | fv1 |
675 // +---------------+
676 // | BCO |
677 // +---------------+
678 // | stg_ctoi_ret_ |
679 // +---------------+
680 // | retval |
681 // +---------------+
682 //
683 // where retval is the value being returned to this continuation.
684 // In the event of a stack check, heap check, or context switch,
685 // we need to leave the stack in a sane state so the garbage
686 // collector can find all the pointers.
687 //
688 // (1) BCO is a function: the BCO's bitmap describes the
689 // pointerhood of the arguments.
690 //
691 // (2) BCO is a continuation: BCO's bitmap describes the
692 // pointerhood of the free variables.
693 //
694 // Sadly we have three different kinds of stack/heap/cswitch check
695 // to do:
696
697
698 run_BCO_return:
699 // Heap check
700 if (doYouWantToGC()) {
701 Sp--; Sp[0] = (W_)&stg_enter_info;
702 RETURN_TO_SCHEDULER(ThreadInterpret, HeapOverflow);
703 }
704 // Stack checks aren't necessary at return points, the stack use
705 // is aggregated into the enclosing function entry point.
706
707 goto run_BCO;
708
709 run_BCO_return_unboxed:
710 // Heap check
711 if (doYouWantToGC()) {
712 RETURN_TO_SCHEDULER(ThreadInterpret, HeapOverflow);
713 }
714 // Stack checks aren't necessary at return points, the stack use
715 // is aggregated into the enclosing function entry point.
716
717 goto run_BCO;
718
719 run_BCO_fun:
720 IF_DEBUG(sanity,
721 Sp -= 2;
722 Sp[1] = (W_)obj;
723 Sp[0] = (W_)&stg_apply_interp_info;
724 checkStackChunk(Sp,SpLim);
725 Sp += 2;
726 );
727
728 // Heap check
729 if (doYouWantToGC()) {
730 Sp -= 2;
731 Sp[1] = (W_)obj;
732 Sp[0] = (W_)&stg_apply_interp_info; // placeholder, really
733 RETURN_TO_SCHEDULER(ThreadInterpret, HeapOverflow);
734 }
735
736 // Stack check
737 if (Sp - INTERP_STACK_CHECK_THRESH < SpLim) {
738 Sp -= 2;
739 Sp[1] = (W_)obj;
740 Sp[0] = (W_)&stg_apply_interp_info; // placeholder, really
741 RETURN_TO_SCHEDULER(ThreadInterpret, StackOverflow);
742 }
743
744 goto run_BCO;
745
746 // Now, actually interpret the BCO... (no returning to the
747 // scheduler again until the stack is in an orderly state).
748 run_BCO:
749 INTERP_TICK(it_BCO_entries);
750 {
751 register int bciPtr = 1; /* instruction pointer */
752 register StgWord16 bci;
753 register StgBCO* bco = (StgBCO*)obj;
754 register StgWord16* instrs = (StgWord16*)(bco->instrs->payload);
755 register StgWord* literals = (StgWord*)(&bco->literals->payload[0]);
756 register StgPtr* ptrs = (StgPtr*)(&bco->ptrs->payload[0]);
757
758 #ifdef INTERP_STATS
759 it_lastopc = 0; /* no opcode */
760 #endif
761
762 nextInsn:
763 ASSERT(bciPtr <= instrs[0]);
764 IF_DEBUG(interpreter,
765 //if (do_print_stack) {
766 //debugBelch("\n-- BEGIN stack\n");
767 //printStack(Sp,cap->r.rCurrentTSO->stack+cap->r.rCurrentTSO->stack_size,iSu);
768 //debugBelch("-- END stack\n\n");
769 //}
770 debugBelch("Sp = %p pc = %d ", Sp, bciPtr);
771 disInstr(bco,bciPtr);
772 if (0) { int i;
773 debugBelch("\n");
774 for (i = 8; i >= 0; i--) {
775 debugBelch("%d %p\n", i, (StgPtr)(*(Sp+i)));
776 }
777 debugBelch("\n");
778 }
779 //if (do_print_stack) checkStack(Sp,cap->r.rCurrentTSO->stack+cap->r.rCurrentTSO->stack_size,iSu);
780 );
781
782
783 INTERP_TICK(it_insns);
784
785 #ifdef INTERP_STATS
786 ASSERT( (int)instrs[bciPtr] >= 0 && (int)instrs[bciPtr] < 27 );
787 it_ofreq[ (int)instrs[bciPtr] ] ++;
788 it_oofreq[ it_lastopc ][ (int)instrs[bciPtr] ] ++;
789 it_lastopc = (int)instrs[bciPtr];
790 #endif
791
792 bci = BCO_NEXT;
793 /* We use the high 8 bits for flags, only the highest of which is
794 * currently allocated */
795 ASSERT((bci & 0xFF00) == (bci & 0x8000));
796
797 switch (bci & 0xFF) {
798
799 /* check for a breakpoint on the beginning of a let binding */
800 case bci_BRK_FUN:
801 {
802 int arg1_brk_array, arg2_array_index, arg3_freeVars;
803 StgArrWords *breakPoints;
804 int returning_from_break; // are we resuming execution from a breakpoint?
805 // if yes, then don't break this time around
806 StgClosure *ioAction; // the io action to run at a breakpoint
807
808 StgAP_STACK *new_aps; // a closure to save the top stack frame on the heap
809 int i;
810 int size_words;
811
812 arg1_brk_array = BCO_NEXT; // 1st arg of break instruction
813 arg2_array_index = BCO_NEXT; // 2nd arg of break instruction
814 arg3_freeVars = BCO_NEXT; // 3rd arg of break instruction
815
816 // check if we are returning from a breakpoint - this info
817 // is stored in the flags field of the current TSO
818 returning_from_break = cap->r.rCurrentTSO->flags & TSO_STOPPED_ON_BREAKPOINT;
819
820 // if we are returning from a break then skip this section
821 // and continue executing
822 if (!returning_from_break)
823 {
824 breakPoints = (StgArrWords *) BCO_PTR(arg1_brk_array);
825
826 // stop the current thread if either the
827 // "rts_stop_next_breakpoint" flag is true OR if the
828 // breakpoint flag for this particular expression is
829 // true
830 if (rts_stop_next_breakpoint == rtsTrue ||
831 breakPoints->payload[arg2_array_index] == rtsTrue)
832 {
833 // make sure we don't automatically stop at the
834 // next breakpoint
835 rts_stop_next_breakpoint = rtsFalse;
836
837 // allocate memory for a new AP_STACK, enough to
838 // store the top stack frame plus an
839 // stg_apply_interp_info pointer and a pointer to
840 // the BCO
841 size_words = BCO_BITMAP_SIZE(obj) + 2;
842 new_aps = (StgAP_STACK *) allocate (AP_STACK_sizeW(size_words));
843 SET_HDR(new_aps,&stg_AP_STACK_info,CCS_SYSTEM);
844 new_aps->size = size_words;
845 new_aps->fun = &stg_dummy_ret_closure;
846
847 // fill in the payload of the AP_STACK
848 new_aps->payload[0] = (StgClosure *)&stg_apply_interp_info;
849 new_aps->payload[1] = (StgClosure *)obj;
850
851 // copy the contents of the top stack frame into the AP_STACK
852 for (i = 2; i < size_words; i++)
853 {
854 new_aps->payload[i] = (StgClosure *)Sp[i-2];
855 }
856
857 // prepare the stack so that we can call the
858 // rts_breakpoint_io_action and ensure that the stack is
859 // in a reasonable state for the GC and so that
860 // execution of this BCO can continue when we resume
861 ioAction = (StgClosure *) deRefStablePtr (rts_breakpoint_io_action);
862 Sp -= 9;
863 Sp[8] = (W_)obj;
864 Sp[7] = (W_)&stg_apply_interp_info;
865 Sp[6] = (W_)&stg_noforceIO_info; // see [unreg] below
866 Sp[5] = (W_)new_aps; // the AP_STACK
867 Sp[4] = (W_)BCO_PTR(arg3_freeVars); // the info about local vars of the breakpoint
868 Sp[3] = (W_)False_closure; // True <=> a breakpoint
869 Sp[2] = (W_)&stg_ap_pppv_info;
870 Sp[1] = (W_)ioAction; // apply the IO action to its two arguments above
871 Sp[0] = (W_)&stg_enter_info; // get ready to run the IO action
872 // Note [unreg]: in unregisterised mode, the return
873 // convention for IO is different. The
874 // stg_noForceIO_info stack frame is necessary to
875 // account for this difference.
876
877 // set the flag in the TSO to say that we are now
878 // stopping at a breakpoint so that when we resume
879 // we don't stop on the same breakpoint that we
880 // already stopped at just now
881 cap->r.rCurrentTSO->flags |= TSO_STOPPED_ON_BREAKPOINT;
882
883 // stop this thread and return to the scheduler -
884 // eventually we will come back and the IO action on
885 // the top of the stack will be executed
886 RETURN_TO_SCHEDULER_NO_PAUSE(ThreadRunGHC, ThreadYielding);
887 }
888 }
889 // record that this thread is not stopped at a breakpoint anymore
890 cap->r.rCurrentTSO->flags &= ~TSO_STOPPED_ON_BREAKPOINT;
891
892 // continue normal execution of the byte code instructions
893 goto nextInsn;
894 }
895
896 case bci_STKCHECK: {
897 // Explicit stack check at the beginning of a function
898 // *only* (stack checks in case alternatives are
899 // propagated to the enclosing function).
900 StgWord stk_words_reqd = BCO_GET_LARGE_ARG + 1;
901 if (Sp - stk_words_reqd < SpLim) {
902 Sp -= 2;
903 Sp[1] = (W_)obj;
904 Sp[0] = (W_)&stg_apply_interp_info;
905 RETURN_TO_SCHEDULER(ThreadInterpret, StackOverflow);
906 } else {
907 goto nextInsn;
908 }
909 }
910
911 case bci_PUSH_L: {
912 int o1 = BCO_NEXT;
913 Sp[-1] = Sp[o1];
914 Sp--;
915 goto nextInsn;
916 }
917
918 case bci_PUSH_LL: {
919 int o1 = BCO_NEXT;
920 int o2 = BCO_NEXT;
921 Sp[-1] = Sp[o1];
922 Sp[-2] = Sp[o2];
923 Sp -= 2;
924 goto nextInsn;
925 }
926
927 case bci_PUSH_LLL: {
928 int o1 = BCO_NEXT;
929 int o2 = BCO_NEXT;
930 int o3 = BCO_NEXT;
931 Sp[-1] = Sp[o1];
932 Sp[-2] = Sp[o2];
933 Sp[-3] = Sp[o3];
934 Sp -= 3;
935 goto nextInsn;
936 }
937
938 case bci_PUSH_G: {
939 int o1 = BCO_NEXT;
940 Sp[-1] = BCO_PTR(o1);
941 Sp -= 1;
942 goto nextInsn;
943 }
944
945 case bci_PUSH_ALTS: {
946 int o_bco = BCO_NEXT;
947 Sp[-2] = (W_)&stg_ctoi_R1p_info;
948 Sp[-1] = BCO_PTR(o_bco);
949 Sp -= 2;
950 goto nextInsn;
951 }
952
953 case bci_PUSH_ALTS_P: {
954 int o_bco = BCO_NEXT;
955 Sp[-2] = (W_)&stg_ctoi_R1unpt_info;
956 Sp[-1] = BCO_PTR(o_bco);
957 Sp -= 2;
958 goto nextInsn;
959 }
960
961 case bci_PUSH_ALTS_N: {
962 int o_bco = BCO_NEXT;
963 Sp[-2] = (W_)&stg_ctoi_R1n_info;
964 Sp[-1] = BCO_PTR(o_bco);
965 Sp -= 2;
966 goto nextInsn;
967 }
968
969 case bci_PUSH_ALTS_F: {
970 int o_bco = BCO_NEXT;
971 Sp[-2] = (W_)&stg_ctoi_F1_info;
972 Sp[-1] = BCO_PTR(o_bco);
973 Sp -= 2;
974 goto nextInsn;
975 }
976
977 case bci_PUSH_ALTS_D: {
978 int o_bco = BCO_NEXT;
979 Sp[-2] = (W_)&stg_ctoi_D1_info;
980 Sp[-1] = BCO_PTR(o_bco);
981 Sp -= 2;
982 goto nextInsn;
983 }
984
985 case bci_PUSH_ALTS_L: {
986 int o_bco = BCO_NEXT;
987 Sp[-2] = (W_)&stg_ctoi_L1_info;
988 Sp[-1] = BCO_PTR(o_bco);
989 Sp -= 2;
990 goto nextInsn;
991 }
992
993 case bci_PUSH_ALTS_V: {
994 int o_bco = BCO_NEXT;
995 Sp[-2] = (W_)&stg_ctoi_V_info;
996 Sp[-1] = BCO_PTR(o_bco);
997 Sp -= 2;
998 goto nextInsn;
999 }
1000
1001 case bci_PUSH_APPLY_N:
1002 Sp--; Sp[0] = (W_)&stg_ap_n_info;
1003 goto nextInsn;
1004 case bci_PUSH_APPLY_V:
1005 Sp--; Sp[0] = (W_)&stg_ap_v_info;
1006 goto nextInsn;
1007 case bci_PUSH_APPLY_F:
1008 Sp--; Sp[0] = (W_)&stg_ap_f_info;
1009 goto nextInsn;
1010 case bci_PUSH_APPLY_D:
1011 Sp--; Sp[0] = (W_)&stg_ap_d_info;
1012 goto nextInsn;
1013 case bci_PUSH_APPLY_L:
1014 Sp--; Sp[0] = (W_)&stg_ap_l_info;
1015 goto nextInsn;
1016 case bci_PUSH_APPLY_P:
1017 Sp--; Sp[0] = (W_)&stg_ap_p_info;
1018 goto nextInsn;
1019 case bci_PUSH_APPLY_PP:
1020 Sp--; Sp[0] = (W_)&stg_ap_pp_info;
1021 goto nextInsn;
1022 case bci_PUSH_APPLY_PPP:
1023 Sp--; Sp[0] = (W_)&stg_ap_ppp_info;
1024 goto nextInsn;
1025 case bci_PUSH_APPLY_PPPP:
1026 Sp--; Sp[0] = (W_)&stg_ap_pppp_info;
1027 goto nextInsn;
1028 case bci_PUSH_APPLY_PPPPP:
1029 Sp--; Sp[0] = (W_)&stg_ap_ppppp_info;
1030 goto nextInsn;
1031 case bci_PUSH_APPLY_PPPPPP:
1032 Sp--; Sp[0] = (W_)&stg_ap_pppppp_info;
1033 goto nextInsn;
1034
1035 case bci_PUSH_UBX: {
1036 int i;
1037 int o_lits = BCO_NEXT;
1038 int n_words = BCO_NEXT;
1039 Sp -= n_words;
1040 for (i = 0; i < n_words; i++) {
1041 Sp[i] = (W_)BCO_LIT(o_lits+i);
1042 }
1043 goto nextInsn;
1044 }
1045
1046 case bci_SLIDE: {
1047 int n = BCO_NEXT;
1048 int by = BCO_NEXT;
1049 /* a_1, .. a_n, b_1, .. b_by, s => a_1, .. a_n, s */
1050 while(--n >= 0) {
1051 Sp[n+by] = Sp[n];
1052 }
1053 Sp += by;
1054 INTERP_TICK(it_slides);
1055 goto nextInsn;
1056 }
1057
1058 case bci_ALLOC_AP: {
1059 StgAP* ap;
1060 int n_payload = BCO_NEXT;
1061 ap = (StgAP*)allocate(AP_sizeW(n_payload));
1062 Sp[-1] = (W_)ap;
1063 ap->n_args = n_payload;
1064 SET_HDR(ap, &stg_AP_info, CCS_SYSTEM/*ToDo*/)
1065 Sp --;
1066 goto nextInsn;
1067 }
1068
1069 case bci_ALLOC_AP_NOUPD: {
1070 StgAP* ap;
1071 int n_payload = BCO_NEXT;
1072 ap = (StgAP*)allocate(AP_sizeW(n_payload));
1073 Sp[-1] = (W_)ap;
1074 ap->n_args = n_payload;
1075 SET_HDR(ap, &stg_AP_NOUPD_info, CCS_SYSTEM/*ToDo*/)
1076 Sp --;
1077 goto nextInsn;
1078 }
1079
1080 case bci_ALLOC_PAP: {
1081 StgPAP* pap;
1082 int arity = BCO_NEXT;
1083 int n_payload = BCO_NEXT;
1084 pap = (StgPAP*)allocate(PAP_sizeW(n_payload));
1085 Sp[-1] = (W_)pap;
1086 pap->n_args = n_payload;
1087 pap->arity = arity;
1088 SET_HDR(pap, &stg_PAP_info, CCS_SYSTEM/*ToDo*/)
1089 Sp --;
1090 goto nextInsn;
1091 }
1092
1093 case bci_MKAP: {
1094 int i;
1095 int stkoff = BCO_NEXT;
1096 int n_payload = BCO_NEXT;
1097 StgAP* ap = (StgAP*)Sp[stkoff];
1098 ASSERT((int)ap->n_args == n_payload);
1099 ap->fun = (StgClosure*)Sp[0];
1100
1101 // The function should be a BCO, and its bitmap should
1102 // cover the payload of the AP correctly.
1103 ASSERT(get_itbl(ap->fun)->type == BCO
1104 && BCO_BITMAP_SIZE(ap->fun) == ap->n_args);
1105
1106 for (i = 0; i < n_payload; i++)
1107 ap->payload[i] = (StgClosure*)Sp[i+1];
1108 Sp += n_payload+1;
1109 IF_DEBUG(interpreter,
1110 debugBelch("\tBuilt ");
1111 printObj((StgClosure*)ap);
1112 );
1113 goto nextInsn;
1114 }
1115
1116 case bci_MKPAP: {
1117 int i;
1118 int stkoff = BCO_NEXT;
1119 int n_payload = BCO_NEXT;
1120 StgPAP* pap = (StgPAP*)Sp[stkoff];
1121 ASSERT((int)pap->n_args == n_payload);
1122 pap->fun = (StgClosure*)Sp[0];
1123
1124 // The function should be a BCO
1125 ASSERT(get_itbl(pap->fun)->type == BCO);
1126
1127 for (i = 0; i < n_payload; i++)
1128 pap->payload[i] = (StgClosure*)Sp[i+1];
1129 Sp += n_payload+1;
1130 IF_DEBUG(interpreter,
1131 debugBelch("\tBuilt ");
1132 printObj((StgClosure*)pap);
1133 );
1134 goto nextInsn;
1135 }
1136
1137 case bci_UNPACK: {
1138 /* Unpack N ptr words from t.o.s constructor */
1139 int i;
1140 int n_words = BCO_NEXT;
1141 StgClosure* con = (StgClosure*)Sp[0];
1142 Sp -= n_words;
1143 for (i = 0; i < n_words; i++) {
1144 Sp[i] = (W_)con->payload[i];
1145 }
1146 goto nextInsn;
1147 }
1148
1149 case bci_PACK: {
1150 int i;
1151 int o_itbl = BCO_NEXT;
1152 int n_words = BCO_NEXT;
1153 StgInfoTable* itbl = INFO_PTR_TO_STRUCT(BCO_LIT(o_itbl));
1154 int request = CONSTR_sizeW( itbl->layout.payload.ptrs,
1155 itbl->layout.payload.nptrs );
1156 StgClosure* con = (StgClosure*)allocate_NONUPD(request);
1157 ASSERT( itbl->layout.payload.ptrs + itbl->layout.payload.nptrs > 0);
1158 SET_HDR(con, (StgInfoTable*)BCO_LIT(o_itbl), CCS_SYSTEM/*ToDo*/);
1159 for (i = 0; i < n_words; i++) {
1160 con->payload[i] = (StgClosure*)Sp[i];
1161 }
1162 Sp += n_words;
1163 Sp --;
1164 Sp[0] = (W_)con;
1165 IF_DEBUG(interpreter,
1166 debugBelch("\tBuilt ");
1167 printObj((StgClosure*)con);
1168 );
1169 goto nextInsn;
1170 }
1171
1172 case bci_TESTLT_P: {
1173 unsigned int discr = BCO_NEXT;
1174 int failto = BCO_NEXT;
1175 StgClosure* con = (StgClosure*)Sp[0];
1176 if (GET_TAG(con) >= discr) {
1177 bciPtr = failto;
1178 }
1179 goto nextInsn;
1180 }
1181
1182 case bci_TESTEQ_P: {
1183 unsigned int discr = BCO_NEXT;
1184 int failto = BCO_NEXT;
1185 StgClosure* con = (StgClosure*)Sp[0];
1186 if (GET_TAG(con) != discr) {
1187 bciPtr = failto;
1188 }
1189 goto nextInsn;
1190 }
1191
1192 case bci_TESTLT_I: {
1193 // There should be an Int at Sp[1], and an info table at Sp[0].
1194 int discr = BCO_NEXT;
1195 int failto = BCO_NEXT;
1196 I_ stackInt = (I_)Sp[1];
1197 if (stackInt >= (I_)BCO_LIT(discr))
1198 bciPtr = failto;
1199 goto nextInsn;
1200 }
1201
1202 case bci_TESTEQ_I: {
1203 // There should be an Int at Sp[1], and an info table at Sp[0].
1204 int discr = BCO_NEXT;
1205 int failto = BCO_NEXT;
1206 I_ stackInt = (I_)Sp[1];
1207 if (stackInt != (I_)BCO_LIT(discr)) {
1208 bciPtr = failto;
1209 }
1210 goto nextInsn;
1211 }
1212
1213 case bci_TESTLT_D: {
1214 // There should be a Double at Sp[1], and an info table at Sp[0].
1215 int discr = BCO_NEXT;
1216 int failto = BCO_NEXT;
1217 StgDouble stackDbl, discrDbl;
1218 stackDbl = PK_DBL( & Sp[1] );
1219 discrDbl = PK_DBL( & BCO_LIT(discr) );
1220 if (stackDbl >= discrDbl) {
1221 bciPtr = failto;
1222 }
1223 goto nextInsn;
1224 }
1225
1226 case bci_TESTEQ_D: {
1227 // There should be a Double at Sp[1], and an info table at Sp[0].
1228 int discr = BCO_NEXT;
1229 int failto = BCO_NEXT;
1230 StgDouble stackDbl, discrDbl;
1231 stackDbl = PK_DBL( & Sp[1] );
1232 discrDbl = PK_DBL( & BCO_LIT(discr) );
1233 if (stackDbl != discrDbl) {
1234 bciPtr = failto;
1235 }
1236 goto nextInsn;
1237 }
1238
1239 case bci_TESTLT_F: {
1240 // There should be a Float at Sp[1], and an info table at Sp[0].
1241 int discr = BCO_NEXT;
1242 int failto = BCO_NEXT;
1243 StgFloat stackFlt, discrFlt;
1244 stackFlt = PK_FLT( & Sp[1] );
1245 discrFlt = PK_FLT( & BCO_LIT(discr) );
1246 if (stackFlt >= discrFlt) {
1247 bciPtr = failto;
1248 }
1249 goto nextInsn;
1250 }
1251
1252 case bci_TESTEQ_F: {
1253 // There should be a Float at Sp[1], and an info table at Sp[0].
1254 int discr = BCO_NEXT;
1255 int failto = BCO_NEXT;
1256 StgFloat stackFlt, discrFlt;
1257 stackFlt = PK_FLT( & Sp[1] );
1258 discrFlt = PK_FLT( & BCO_LIT(discr) );
1259 if (stackFlt != discrFlt) {
1260 bciPtr = failto;
1261 }
1262 goto nextInsn;
1263 }
1264
1265 // Control-flow ish things
1266 case bci_ENTER:
1267 // Context-switch check. We put it here to ensure that
1268 // the interpreter has done at least *some* work before
1269 // context switching: sometimes the scheduler can invoke
1270 // the interpreter with context_switch == 1, particularly
1271 // if the -C0 flag has been given on the cmd line.
1272 if (context_switch) {
1273 Sp--; Sp[0] = (W_)&stg_enter_info;
1274 RETURN_TO_SCHEDULER(ThreadInterpret, ThreadYielding);
1275 }
1276 goto eval;
1277
1278 case bci_RETURN:
1279 tagged_obj = (StgClosure *)Sp[0];
1280 Sp++;
1281 goto do_return;
1282
1283 case bci_RETURN_P:
1284 Sp--;
1285 Sp[0] = (W_)&stg_gc_unpt_r1_info;
1286 goto do_return_unboxed;
1287 case bci_RETURN_N:
1288 Sp--;
1289 Sp[0] = (W_)&stg_gc_unbx_r1_info;
1290 goto do_return_unboxed;
1291 case bci_RETURN_F:
1292 Sp--;
1293 Sp[0] = (W_)&stg_gc_f1_info;
1294 goto do_return_unboxed;
1295 case bci_RETURN_D:
1296 Sp--;
1297 Sp[0] = (W_)&stg_gc_d1_info;
1298 goto do_return_unboxed;
1299 case bci_RETURN_L:
1300 Sp--;
1301 Sp[0] = (W_)&stg_gc_l1_info;
1302 goto do_return_unboxed;
1303 case bci_RETURN_V:
1304 Sp--;
1305 Sp[0] = (W_)&stg_gc_void_info;
1306 goto do_return_unboxed;
1307
1308 case bci_SWIZZLE: {
1309 int stkoff = BCO_NEXT;
1310 signed short n = (signed short)(BCO_NEXT);
1311 Sp[stkoff] += (W_)n;
1312 goto nextInsn;
1313 }
1314
1315 case bci_CCALL: {
1316 void *tok;
1317 int stk_offset = BCO_NEXT;
1318 int o_itbl = BCO_NEXT;
1319 void(*marshall_fn)(void*) = (void (*)(void*))BCO_LIT(o_itbl);
1320 int ret_dyn_size =
1321 RET_DYN_BITMAP_SIZE + RET_DYN_NONPTR_REGS_SIZE
1322 + sizeofW(StgRetDyn);
1323
1324 #ifdef THREADED_RTS
1325 // Threaded RTS:
1326 // Arguments on the TSO stack are not good, because garbage
1327 // collection might move the TSO as soon as we call
1328 // suspendThread below.
1329
1330 W_ arguments[stk_offset];
1331
1332 memcpy(arguments, Sp, sizeof(W_) * stk_offset);
1333 #endif
1334
1335 // Restore the Haskell thread's current value of errno
1336 errno = cap->r.rCurrentTSO->saved_errno;
1337
1338 // There are a bunch of non-ptr words on the stack (the
1339 // ccall args, the ccall fun address and space for the
1340 // result), which we need to cover with an info table
1341 // since we might GC during this call.
1342 //
1343 // We know how many (non-ptr) words there are before the
1344 // next valid stack frame: it is the stk_offset arg to the
1345 // CCALL instruction. So we build a RET_DYN stack frame
1346 // on the stack frame to describe this chunk of stack.
1347 //
1348 Sp -= ret_dyn_size;
1349 ((StgRetDyn *)Sp)->liveness = R1_PTR | N_NONPTRS(stk_offset);
1350 ((StgRetDyn *)Sp)->info = (StgInfoTable *)&stg_gc_gen_info;
1351
1352 // save obj (pointer to the current BCO), since this
1353 // might move during the call. We use the R1 slot in the
1354 // RET_DYN frame for this, hence R1_PTR above.
1355 ((StgRetDyn *)Sp)->payload[0] = (StgClosure *)obj;
1356
1357 SAVE_STACK_POINTERS;
1358 tok = suspendThread(&cap->r);
1359
1360 #ifndef THREADED_RTS
1361 // Careful:
1362 // suspendThread might have shifted the stack
1363 // around (stack squeezing), so we have to grab the real
1364 // Sp out of the TSO to find the ccall args again.
1365
1366 marshall_fn ( (void*)(cap->r.rCurrentTSO->sp + ret_dyn_size) );
1367 #else
1368 // Threaded RTS:
1369 // We already made a copy of the arguments above.
1370
1371 marshall_fn ( arguments );
1372 #endif
1373
1374 // And restart the thread again, popping the RET_DYN frame.
1375 cap = (Capability *)((void *)((unsigned char*)resumeThread(tok) - sizeof(StgFunTable)));
1376 LOAD_STACK_POINTERS;
1377
1378 // Re-load the pointer to the BCO from the RET_DYN frame,
1379 // it might have moved during the call. Also reload the
1380 // pointers to the components of the BCO.
1381 obj = ((StgRetDyn *)Sp)->payload[0];
1382 bco = (StgBCO*)obj;
1383 instrs = (StgWord16*)(bco->instrs->payload);
1384 literals = (StgWord*)(&bco->literals->payload[0]);
1385 ptrs = (StgPtr*)(&bco->ptrs->payload[0]);
1386
1387 Sp += ret_dyn_size;
1388
1389 // Save the Haskell thread's current value of errno
1390 cap->r.rCurrentTSO->saved_errno = errno;
1391
1392 #ifdef THREADED_RTS
1393 // Threaded RTS:
1394 // Copy the "arguments", which might include a return value,
1395 // back to the TSO stack. It would of course be enough to
1396 // just copy the return value, but we don't know the offset.
1397 memcpy(Sp, arguments, sizeof(W_) * stk_offset);
1398 #endif
1399
1400 goto nextInsn;
1401 }
1402
1403 case bci_JMP: {
1404 /* BCO_NEXT modifies bciPtr, so be conservative. */
1405 int nextpc = BCO_NEXT;
1406 bciPtr = nextpc;
1407 goto nextInsn;
1408 }
1409
1410 case bci_CASEFAIL:
1411 barf("interpretBCO: hit a CASEFAIL");
1412
1413 // Errors
1414 default:
1415 barf("interpretBCO: unknown or unimplemented opcode %d",
1416 (int)(bci & 0xFF));
1417
1418 } /* switch on opcode */
1419 }
1420 }
1421
1422 barf("interpretBCO: fell off end of the interpreter");
1423 }