Fix unwinding of C -> Haskell FFI calls with -threaded
[ghc.git] / rts / StgCRun.c
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 1998-2011
4 *
5 * STG-to-C glue.
6 *
7 * To run an STG function from C land, call
8 *
9 * rv = StgRun(f,BaseReg);
10 *
11 * where "f" is the STG function to call, and BaseReg is the address of the
12 * RegTable for this run (we might have separate RegTables if we're running
13 * multiple threads on an SMP machine).
14 *
15 * In the end, "f" must JMP to StgReturn (defined below), passing the
16 * return-value "rv" in R1, to return to the caller of StgRun returning "rv" in
17 * the whatever way C returns a value.
18 *
19 * NOTE: StgRun/StgReturn do *NOT* load or store Hp or any other registers
20 * (other than saving the C callee-saves registers). Instead, the called
21 * function "f" must do that in STG land.
22 *
23 * We also initially make sure that there are @RESERVED_C_STACK_BYTES@ on the
24 * C-stack. This is done to reserve some space for the allocation of
25 * temporaries in STG code.
26 *
27 * -------------------------------------------------------------------------- */
28
29 #include "PosixSource.h"
30 #include "ghcconfig.h"
31
32 #if defined(sparc_HOST_ARCH) || defined(USE_MINIINTERPRETER)
33 /* include Stg.h first because we want real machine regs in here: we
34 * have to get the value of R1 back from Stg land to C land intact.
35 */
36
37 /* We include windows.h very early, as on Win64 the CONTEXT type has
38 fields "R8", "R9" and "R10", which goes bad if we've already
39 #define'd those names for our own purposes (in stg/Regs.h) */
40 #if defined(HAVE_WINDOWS_H)
41 #include <windows.h>
42 #endif
43
44 #define IN_STGCRUN 1
45 #include "Stg.h"
46 #include "Rts.h"
47 #else
48 /* The other architectures do not require the actual register macro definitions
49 * here because they use hand written assembly to implement the StgRun
50 * function. Including Stg.h first will define the R1 values using GCC specific
51 * techniques, which we don't want for LLVM based C compilers. Since we don't
52 * actually need the real machine register definitions here, we include the
53 * headers in the opposite order to allow LLVM-based C compilers to work.
54 */
55 #include "Rts.h"
56 #include "Stg.h"
57 #endif
58
59 #include "StgRun.h"
60 #include "Capability.h"
61
62 #include "RtsUtils.h"
63 #if defined(DEBUG)
64 #include "Printer.h"
65 #endif
66
67 #if defined(USE_MINIINTERPRETER)
68
69 /* -----------------------------------------------------------------------------
70 any architecture (using miniinterpreter)
71 -------------------------------------------------------------------------- */
72
73 StgRegTable * StgRun(StgFunPtr f, StgRegTable *basereg STG_UNUSED)
74 {
75 while (f) {
76 IF_DEBUG(interpreter,
77 debugBelch("Jumping to ");
78 printPtr((P_)f); fflush(stdout);
79 debugBelch("\n");
80 );
81 f = (StgFunPtr) (f)();
82 }
83 return (StgRegTable *)R1.p;
84 }
85
86 StgFunPtr StgReturn(void)
87 {
88 return 0;
89 }
90
91 #else /* !USE_MINIINTERPRETER */
92
93 #if defined(mingw32_HOST_OS)
94 /*
95 * Note [Windows Stack allocations]
96 *
97 * On windows the stack has to be allocated 4k at a time, otherwise
98 * we get a segfault. The C compiler knows how to do this (it calls
99 * _alloca()), so we make sure that we can allocate as much stack as
100 * we need. However since we are doing a local stack allocation and the value
101 * isn't valid outside the frame, compilers are free to optimize this allocation
102 * and the corresponding stack check away. So to prevent that we request that
103 * this function never be optimized (See #14669). */
104 STG_NO_OPTIMIZE StgWord8 *win32AllocStack(void)
105 {
106 StgWord8 stack[RESERVED_C_STACK_BYTES + 16 + 12];
107 return stack;
108 }
109 #endif
110
111 /* -----------------------------------------------------------------------------
112 x86 architecture
113 -------------------------------------------------------------------------- */
114
115 #if defined(i386_HOST_ARCH)
116
117 #if defined(darwin_HOST_OS) || defined(ios_HOST_OS)
118 #define STG_GLOBAL ".globl "
119 #define STG_HIDDEN ".private_extern "
120 #else
121 #define STG_GLOBAL ".global "
122 #define STG_HIDDEN ".hidden "
123 #endif
124
125 /*
126 * Note [Stack Alignment on X86]
127 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
128 *
129 * On X86 (both 32bit and 64bit) we keep the stack aligned on function calls at
130 * a 16-byte boundary. This is done because on a number of architectures the
131 * ABI requires this (x64, Mac OSX 32bit/64bit) as well as interfacing with
132 * other libraries through the FFI.
133 *
134 * As part of this arrangment we must maintain the stack at a 16-byte boundary
135 * - word_size-bytes (so 16n - 4 for i386 and 16n - 8 for x64) on entry to a
136 * procedure since both GCC and LLVM expect this. This is because the stack
137 * should have been 16-byte boundary aligned and then a call made which pushes
138 * a return address onto the stack (so word_size more space used). In STG code
139 * we only jump to other STG procedures, so we maintain the 16n - word_size
140 * alignment for these jumps.
141 *
142 * This gives us binary compatibility with LLVM and GCC as well as dealing
143 * with the FFI. Previously we just maintianed a 16n byte alignment for
144 * procedure entry and calls, which led to bugs (see #4211 and #5250).
145 *
146 * To change this convention you need to change the code here, and in
147 * compiler/nativeGen/X86/CodeGen.hs::GenCCall, and maybe the adjustor
148 * code for thunks in rts/AdjustorAsm.s, rts/Adjustor.c.
149 *
150 * A quick way to see if this is wrong is to compile this code:
151 *
152 * main = System.Exit.exitWith ExitSuccess
153 *
154 * And run it with +RTS -sstderr. The stats code in the RTS, in
155 * particular statsPrintf(), relies on the stack alignment because
156 * it saves the %xmm regs on the stack, so it'll fall over if the
157 * stack isn't aligned, and calling exitWith from Haskell invokes
158 * shutdownHaskellAndExit using a C call.
159 *
160 * If you edit the sequence below be sure to update the unwinding information
161 * for stg_stop_thread in StgStartup.cmm.
162 */
163
164 static void GNUC3_ATTRIBUTE(used)
165 StgRunIsImplementedInAssembler(void)
166 {
167 __asm__ volatile (
168 STG_GLOBAL STG_RUN "\n"
169 #if !defined(mingw32_HOST_OS)
170 STG_HIDDEN STG_RUN "\n"
171 #endif
172 STG_RUN ":\n\t"
173
174 /*
175 * move %esp down to reserve an area for temporary storage
176 * during the execution of STG code.
177 *
178 * The stack pointer has to be aligned to a multiple of 16
179 * bytes from here - this is a requirement of the C ABI, so
180 * that C code can assign SSE2 registers directly to/from
181 * stack locations.
182 */
183 "subl %0, %%esp\n\t"
184
185 /*
186 * save callee-saves registers on behalf of the STG code.
187 */
188 "movl %%esp, %%eax\n\t"
189 "addl %0-16, %%eax\n\t"
190 "movl %%ebx,0(%%eax)\n\t"
191 "movl %%esi,4(%%eax)\n\t"
192 "movl %%edi,8(%%eax)\n\t"
193 "movl %%ebp,12(%%eax)\n\t"
194 /*
195 * Set BaseReg
196 */
197 "movl 24(%%eax),%%ebx\n\t"
198 /*
199 * grab the function argument from the stack
200 */
201 "movl 20(%%eax),%%eax\n\t"
202 /*
203 * jump to it
204 */
205 "jmp *%%eax\n\t"
206
207 STG_GLOBAL STG_RETURN "\n"
208 STG_RETURN ":\n\t"
209
210 "movl %%esi, %%eax\n\t" /* Return value in R1 */
211
212 /*
213 * restore callee-saves registers. (Don't stomp on %%eax!)
214 */
215 "movl %%esp, %%edx\n\t"
216 "addl %0-16, %%edx\n\t"
217 "movl 0(%%edx),%%ebx\n\t" /* restore the registers saved above */
218 "movl 4(%%edx),%%esi\n\t"
219 "movl 8(%%edx),%%edi\n\t"
220 "movl 12(%%edx),%%ebp\n\t"
221
222 "addl %0, %%esp\n\t"
223 "ret"
224
225 : : "i" (RESERVED_C_STACK_BYTES + 16)
226 // + 16 to make room for the 4 registers we have to save
227 // See Note [Stack Alignment on X86]
228 );
229 }
230
231 #endif // defined(i386_HOST_ARCH)
232
233 /* ----------------------------------------------------------------------------
234 x86-64 is almost the same as plain x86.
235
236 I've done it using entirely inline assembler, because I couldn't
237 get gcc to generate the correct subtraction from %rsp by using
238 the local array variable trick. It didn't seem to reserve
239 enough space. Oh well, it's not much harder this way.
240 ------------------------------------------------------------------------- */
241
242 #if defined(x86_64_HOST_ARCH)
243
244 #define STG_GLOBAL ".globl "
245
246 #if defined(darwin_HOST_OS) || defined(ios_HOST_OS)
247 #define STG_HIDDEN ".private_extern "
248 #else
249 #define STG_HIDDEN ".hidden "
250 #endif
251
252 /*
253 Note [Unwinding foreign exports on x86-64]
254 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
255 For foreign exports, that is Haskell functions exported as C functions when
256 we unwind we have to unwind from Haskell code into C code. The current story
257 is as follows:
258
259 * The Haskell stack always has stg_stop_thread_info frame at the bottom
260 * We annotate stg_stop_thread_info to unwind the instruction pointer to a
261 label inside StgRun called StgRunJmp. It's the last instruction before the
262 code jumps into Haskell.
263 * StgRun - which is implemented in assembler is annotated with some manual
264 unwinding information. It unwinds all the registers that it has saved
265 on the stack. This is important as rsp and rbp are often required for
266 getting to the next frame and the rest of the saved registers are useful
267 when inspecting locals in gdb.
268
269
270 Example x86-64 stack for an FFI call
271 from C into a Haskell function:
272
273
274 HASKELL HEAP
275 "ADDRESS SPACE"
276
277 +--------------------+ <------ rbp
278 | |
279 | |
280 | |
281 | |
282 | Haskell |
283 | evaluation stack |
284 | |
285 | |
286 |--------------------|
287 |stg_catch_frame_info|
288 |--------------------|
289 | stg_forceIO_info |
290 |--------------------|
291 |stg_stop_thread_info| -------
292 +--------------------+ |
293 ... |
294 (other heap objects) |
295 ... |
296 |
297 |
298 |
299 C STACK "ADDRESS SPACE" |
300 v
301 +-----------------------------+ <------ rsp
302 | |
303 | RESERVED_C_STACK_BYTES ~16k |
304 | |
305 |-----------------------------|
306 | rbx ||
307 |-----------------------------| \
308 | rbp | |
309 |-----------------------------| \
310 | r12 | |
311 |-----------------------------| \
312 | r13 | | STG_RUN_STACK_FRAME_SIZE
313 |-----------------------------| /
314 | r14 | |
315 |-----------------------------| /
316 | r15 | |
317 |-----------------------------|/
318 | rip saved by call StgRun |
319 | in schedule() |
320 +-----------------------------+
321 ...
322 schedule() stack frame
323
324
325 Lower addresses on the top
326
327 One little snag in this approach is that the annotations accepted by the
328 assembler are surprisingly unexpressive. I had to resort to a .cfi_escape
329 and hand-assemble a DWARF expression. What made it worse was that big numbers
330 are LEB128 encoded, which makes them variable byte length, with length depending
331 on the magnitude. I took a shortcut and assumed the magnitude of the relevant
332 constant. I think it changes very rarely, so it shouldn't be a big burden.
333
334 Here's an example stack generated this way:
335
336 Thread 1 "m" hit Breakpoint 1, Fib_zdfstableZZC0ZZCmainZZCFibZZCfib1_info () at Fib.hs:9
337 9 fib a = return (a + 1)
338 #0 Fib_zdfstableZZC0ZZCmainZZCFibZZCfib1_info () at Fib.hs:9
339 #1 stg_catch_frame_info () at rts/Exception.cmm:372
340 #2 stg_forceIO_info () at rts/StgStartup.cmm:178
341 #3 stg_stop_thread_info () at rts/StgStartup.cmm:42
342 #4 0x00000000007048ab in StgRunIsImplementedInAssembler () at rts/StgCRun.c:255
343 #5 0x00000000006fcf42 in schedule (initialCapability=initialCapability@entry=0x8adac0 <MainCapability>, task=task@entry=0x8cf2a0) at rts/Schedule.c:451
344 #6 0x00000000006fe18e in scheduleWaitThread (tso=0x4200006388, ret=<optimized out>, pcap=0x7fffffffdac0) at rts/Schedule.c:2533
345 #7 0x000000000040a21e in hs_fib ()
346 #8 0x000000000040a083 in main (argc=1, argv=0x7fffffffdc48) at m.cpp:15
347
348 (This is from patched gdb. See Note [Info Offset].)
349
350 The previous approach was to encode the unwinding information for select
351 registers in stg_stop_thread_info with Cmm annotations. The unfortunate thing
352 about that approach was that it required introduction of an artificial MachSp
353 register that wasn't meaningful outside unwinding. I discovered that to get
354 stack unwinding working under -threaded runtime I also needed to unwind rbp
355 which would require adding MachRbp. If we wanted to see saved locals in gdb,
356 we'd have to add more. The core of the problem is that Cmm is architecture
357 independent, while unwinding isn't.
358
359 Note [Unwinding foreign imports]
360 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
361 For unwinding foreign imports, that is C functions exposed as Haskell functions
362 no special handling is required. The C function unwinds according to the rip
363 saved on the stack by the call instruction. Then we perform regular Haskell
364 stack unwinding.
365 */
366
367
368 static void GNUC3_ATTRIBUTE(used)
369 StgRunIsImplementedInAssembler(void)
370 {
371 __asm__ volatile (
372 /*
373 * save callee-saves registers on behalf of the STG code.
374 */
375 STG_GLOBAL STG_RUN "\n"
376 #if !defined(mingw32_HOST_OS)
377 STG_HIDDEN STG_RUN "\n"
378 #endif
379 STG_RUN ":\n\t"
380 "subq %1, %%rsp\n\t"
381 "movq %%rsp, %%rax\n\t"
382 "subq %0, %%rsp\n\t"
383 "movq %%rbx,0(%%rax)\n\t"
384 "movq %%rbp,8(%%rax)\n\t"
385 "movq %%r12,16(%%rax)\n\t"
386 "movq %%r13,24(%%rax)\n\t"
387 "movq %%r14,32(%%rax)\n\t"
388 "movq %%r15,40(%%rax)\n\t"
389 #if defined(mingw32_HOST_OS)
390 /*
391 * Additional callee saved registers on Win64. This must match
392 * callClobberedRegisters in compiler/nativeGen/X86/Regs.hs as
393 * both represent the Win64 calling convention.
394 */
395 "movq %%rdi,48(%%rax)\n\t"
396 "movq %%rsi,56(%%rax)\n\t"
397 "movq %%xmm6, 64(%%rax)\n\t"
398 "movq %%xmm7, 72(%%rax)\n\t"
399 "movq %%xmm8, 80(%%rax)\n\t"
400 "movq %%xmm9, 88(%%rax)\n\t"
401 "movq %%xmm10, 96(%%rax)\n\t"
402 "movq %%xmm11,104(%%rax)\n\t"
403 "movq %%xmm12,112(%%rax)\n\t"
404 "movq %%xmm13,120(%%rax)\n\t"
405 "movq %%xmm14,128(%%rax)\n\t"
406 "movq %%xmm15,136(%%rax)\n\t"
407 #endif
408
409 /*
410 * Let the unwinder know where we saved the registers
411 * See Note [Unwinding foreign exports on x86-64].
412 */
413 ".cfi_def_cfa rsp, 0\n\t"
414 ".cfi_offset rbx, %c2\n\t"
415 ".cfi_offset rbp, %c3\n\t"
416 ".cfi_offset r12, %c4\n\t"
417 ".cfi_offset r13, %c5\n\t"
418 ".cfi_offset r14, %c6\n\t"
419 ".cfi_offset r15, %c7\n\t"
420 ".cfi_offset rip, %c8\n\t"
421 ".cfi_escape " // DW_CFA_val_expression is not expressible otherwise
422 "0x16, " // DW_CFA_val_expression
423 "0x07, " // register num 7 - rsp
424 "0x04, " // block length
425 "0x77, " // DW_OP_breg7 - signed LEB128 offset from rsp
426 "%c9, " // signed LEB128 encoded delta - byte 1
427 "%c10, " // signed LEB128 encoded delta - byte 2
428 "%c11\n\t" // signed LEB128 encoded delta - byte 2
429
430 /*
431 * Set BaseReg
432 */
433 #if defined(mingw32_HOST_OS)
434 "movq %%rdx,%%r13\n\t"
435 #else
436 "movq %%rsi,%%r13\n\t"
437 #endif
438 /*
439 * grab the function argument from the stack, and jump to it.
440 */
441 #if defined(mingw32_HOST_OS)
442 "movq %%rcx,%%rax\n\t"
443 #else
444 "movq %%rdi,%%rax\n\t"
445 #endif
446
447 STG_GLOBAL xstr(STG_RUN_JMP) "\n"
448 #if !defined(mingw32_HOST_OS)
449 STG_HIDDEN xstr(STG_RUN_JMP) "\n"
450 #endif
451 xstr(STG_RUN_JMP) ":\n\t"
452 "jmp *%%rax\n\t"
453
454 ".globl " STG_RETURN "\n"
455 STG_RETURN ":\n\t"
456
457 "movq %%rbx, %%rax\n\t" /* Return value in R1 */
458
459 /*
460 * restore callee-saves registers. (Don't stomp on %%rax!)
461 */
462 "addq %0, %%rsp\n\t"
463 "movq 0(%%rsp),%%rbx\n\t" /* restore the registers saved above */
464 "movq 8(%%rsp),%%rbp\n\t"
465 "movq 16(%%rsp),%%r12\n\t"
466 "movq 24(%%rsp),%%r13\n\t"
467 "movq 32(%%rsp),%%r14\n\t"
468 "movq 40(%%rsp),%%r15\n\t"
469 #if defined(mingw32_HOST_OS)
470 "movq 48(%%rsp),%%rdi\n\t"
471 "movq 56(%%rsp),%%rsi\n\t"
472 "movq 64(%%rsp),%%xmm6\n\t"
473 "movq 72(%%rax),%%xmm7\n\t"
474 "movq 80(%%rax),%%xmm8\n\t"
475 "movq 88(%%rax),%%xmm9\n\t"
476 "movq 96(%%rax),%%xmm10\n\t"
477 "movq 104(%%rax),%%xmm11\n\t"
478 "movq 112(%%rax),%%xmm12\n\t"
479 "movq 120(%%rax),%%xmm13\n\t"
480 "movq 128(%%rax),%%xmm14\n\t"
481 "movq 136(%%rax),%%xmm15\n\t"
482 #endif
483 "addq %1, %%rsp\n\t"
484 "retq"
485
486 :
487 : "i"(RESERVED_C_STACK_BYTES),
488 "i"(STG_RUN_STACK_FRAME_SIZE /* stack frame size */),
489 "i"(RESERVED_C_STACK_BYTES /* rbx relative to cfa (rsp) */),
490 "i"(RESERVED_C_STACK_BYTES + 8 /* rbp relative to cfa (rsp) */),
491 "i"(RESERVED_C_STACK_BYTES + 16 /* r12 relative to cfa (rsp) */),
492 "i"(RESERVED_C_STACK_BYTES + 24 /* r13 relative to cfa (rsp) */),
493 "i"(RESERVED_C_STACK_BYTES + 32 /* r14 relative to cfa (rsp) */),
494 "i"(RESERVED_C_STACK_BYTES + 40 /* r15 relative to cfa (rsp) */),
495 "i"(RESERVED_C_STACK_BYTES + STG_RUN_STACK_FRAME_SIZE
496 /* rip relative to cfa */),
497 #define RSP_DELTA (RESERVED_C_STACK_BYTES + STG_RUN_STACK_FRAME_SIZE + 8)
498 #if (RSP_DELTA < (1<<14)) || (RSP_DELTA >= (1<<21))
499 #error "RSP_DELTA signed LEB128 encoding isn't 3 bytes"
500 #endif
501 "i"((RSP_DELTA & 127) | 128
502 /* signed LEB128-encoded delta from rsp - byte 1 */),
503 "i"(((RSP_DELTA >> 7) & 127) | 128
504 /* signed LEB128-encoded delta from rsp - byte 2 */),
505 "i"((RSP_DELTA >> 14) & 127
506 /* signed LEB128-encoded delta from rsp - byte 3 */)
507 #undef RSP_DELTA
508 );
509 /*
510 * See Note [Stack Alignment on X86]
511 */
512 }
513
514 #endif /* x86-64 */
515
516 /* -----------------------------------------------------------------------------
517 Sparc architecture
518
519 --
520 OLD COMMENT from GHC-3.02:
521
522 We want tailjumps to be calls, because `call xxx' is the only Sparc
523 branch that allows an arbitrary label as a target. (Gcc's ``goto
524 *target'' construct ends up loading the label into a register and
525 then jumping, at the cost of two extra instructions for the 32-bit
526 load.)
527
528 When entering the threaded world, we stash our return address in a
529 known location so that \tr{%i7} is available as an extra
530 callee-saves register. Of course, we have to restore this when
531 coming out of the threaded world.
532
533 I hate this god-forsaken architecture. Since the top of the
534 reserved stack space is used for globals and the bottom is reserved
535 for outgoing arguments, we have to stick our return address
536 somewhere in the middle. Currently, I'm allowing 100 extra
537 outgoing arguments beyond the first 6. --JSM
538
539 Updated info (GHC 4.06): we don't appear to use %i7 any more, so
540 I'm not sure whether we still need to save it. Incedentally, what
541 does the last paragraph above mean when it says "the top of the
542 stack is used for globals"? What globals? --SDM
543
544 Updated info (GHC 4.08.2): not saving %i7 any more (see below).
545 -------------------------------------------------------------------------- */
546
547 #if defined(sparc_HOST_ARCH)
548
549 StgRegTable *
550 StgRun(StgFunPtr f, StgRegTable *basereg) {
551
552 unsigned char space[RESERVED_C_STACK_BYTES];
553 #if 0
554 register void *i7 __asm__("%i7");
555 ((void **)(space))[100] = i7;
556 #endif
557 f();
558 __asm__ volatile (
559 ".align 4\n"
560 ".global " STG_RETURN "\n"
561 STG_RETURN ":"
562 : : "p" (space) : "l0","l1","l2","l3","l4","l5","l6","l7");
563 /* we tell the C compiler that l0-l7 are clobbered on return to
564 * StgReturn, otherwise it tries to use these to save eg. the
565 * address of space[100] across the call. The correct thing
566 * to do would be to save all the callee-saves regs, but we
567 * can't be bothered to do that.
568 *
569 * We also explicitly mark space as used since gcc eliminates it
570 * otherwise.
571 *
572 * The code that gcc generates for this little fragment is now
573 * terrible. We could do much better by coding it directly in
574 * assembler.
575 */
576 #if 0
577 /* updated 4.08.2: we don't save %i7 in the middle of the reserved
578 * space any more, since gcc tries to save its address across the
579 * call to f(), this gets clobbered in STG land and we end up
580 * dereferencing a bogus pointer in StgReturn.
581 */
582 __asm__ volatile ("ld %1,%0"
583 : "=r" (i7) : "m" (((void **)(space))[100]));
584 #endif
585 return (StgRegTable *)R1.i;
586 }
587
588 #endif
589
590 /* -----------------------------------------------------------------------------
591 PowerPC architecture
592
593 Everything is in assembler, so we don't have to deal with GCC...
594 -------------------------------------------------------------------------- */
595
596 #if defined(powerpc_HOST_ARCH)
597
598 #define STG_GLOBAL ".globl "
599
600 #if defined(darwin_HOST_OS)
601 #define STG_HIDDEN ".private_extern "
602 #else
603 #define STG_HIDDEN ".hidden "
604 #endif
605
606 #if defined(aix_HOST_OS)
607
608 // implementation is in StgCRunAsm.S
609
610 #elif defined(darwin_HOST_OS)
611 void StgRunIsImplementedInAssembler(void)
612 {
613 #if HAVE_SUBSECTIONS_VIA_SYMBOLS
614 // if the toolchain supports deadstripping, we have to
615 // prevent it here (it tends to get confused here).
616 __asm__ volatile (".no_dead_strip _StgRunIsImplementedInAssembler\n");
617 #endif
618 __asm__ volatile (
619 STG_GLOBAL STG_RUN "\n"
620 STG_HIDDEN STG_RUN "\n"
621 STG_RUN ":\n"
622 "\tmflr r0\n"
623 "\tbl saveFP # f14\n"
624 "\tstmw r13,-220(r1)\n"
625 "\tstwu r1,-%0(r1)\n"
626 "\tmr r27,r4\n" // BaseReg == r27
627 "\tmtctr r3\n"
628 "\tmr r12,r3\n"
629 "\tbctr\n"
630 ".globl _StgReturn\n"
631 "_StgReturn:\n"
632 "\tmr r3,r14\n"
633 "\tla r1,%0(r1)\n"
634 "\tlmw r13,-220(r1)\n"
635 "\tb restFP # f14\n"
636 : : "i"(RESERVED_C_STACK_BYTES+224 /*stack frame size*/));
637 }
638 #else
639
640 // This version is for PowerPC Linux.
641
642 // Differences from the Darwin/Mac OS X version:
643 // *) Different Assembler Syntax
644 // *) Doesn't use Register Saving Helper Functions (although they exist somewhere)
645 // *) We may not access positive stack offsets
646 // (no "Red Zone" as in the Darwin ABI)
647 // *) The Link Register is saved to a different offset in the caller's stack frame
648 // (Linux: 4(r1), Darwin 8(r1))
649
650 static void GNUC3_ATTRIBUTE(used)
651 StgRunIsImplementedInAssembler(void)
652 {
653 __asm__ volatile (
654 "\t.globl StgRun\n"
655 "\t.hidden StgRun\n"
656 "\t.type StgRun,@function\n"
657 "StgRun:\n"
658 "\tmflr 0\n"
659 "\tstw 0,4(1)\n"
660 "\tmr 5,1\n"
661 "\tstwu 1,-%0(1)\n"
662 "\tstmw 13,-220(5)\n"
663 "\tstfd 14,-144(5)\n"
664 "\tstfd 15,-136(5)\n"
665 "\tstfd 16,-128(5)\n"
666 "\tstfd 17,-120(5)\n"
667 "\tstfd 18,-112(5)\n"
668 "\tstfd 19,-104(5)\n"
669 "\tstfd 20,-96(5)\n"
670 "\tstfd 21,-88(5)\n"
671 "\tstfd 22,-80(5)\n"
672 "\tstfd 23,-72(5)\n"
673 "\tstfd 24,-64(5)\n"
674 "\tstfd 25,-56(5)\n"
675 "\tstfd 26,-48(5)\n"
676 "\tstfd 27,-40(5)\n"
677 "\tstfd 28,-32(5)\n"
678 "\tstfd 29,-24(5)\n"
679 "\tstfd 30,-16(5)\n"
680 "\tstfd 31,-8(5)\n"
681 "\tmr 27,4\n" // BaseReg == r27
682 "\tmtctr 3\n"
683 "\tmr 12,3\n"
684 "\tbctr\n"
685 ".globl StgReturn\n"
686 "\t.type StgReturn,@function\n"
687 "StgReturn:\n"
688 "\tmr 3,14\n"
689 "\tla 5,%0(1)\n"
690 "\tlmw 13,-220(5)\n"
691 "\tlfd 14,-144(5)\n"
692 "\tlfd 15,-136(5)\n"
693 "\tlfd 16,-128(5)\n"
694 "\tlfd 17,-120(5)\n"
695 "\tlfd 18,-112(5)\n"
696 "\tlfd 19,-104(5)\n"
697 "\tlfd 20,-96(5)\n"
698 "\tlfd 21,-88(5)\n"
699 "\tlfd 22,-80(5)\n"
700 "\tlfd 23,-72(5)\n"
701 "\tlfd 24,-64(5)\n"
702 "\tlfd 25,-56(5)\n"
703 "\tlfd 26,-48(5)\n"
704 "\tlfd 27,-40(5)\n"
705 "\tlfd 28,-32(5)\n"
706 "\tlfd 29,-24(5)\n"
707 "\tlfd 30,-16(5)\n"
708 "\tlfd 31,-8(5)\n"
709 "\tmr 1,5\n"
710 "\tlwz 0,4(1)\n"
711 "\tmtlr 0\n"
712 "\tblr\n"
713 : : "i"(RESERVED_C_STACK_BYTES+224 /*stack frame size*/));
714 }
715 #endif
716
717 #endif
718
719 /* -----------------------------------------------------------------------------
720 PowerPC 64 architecture
721
722 Everything is in assembler, so we don't have to deal with GCC...
723 -------------------------------------------------------------------------- */
724
725 #if defined(powerpc64_HOST_ARCH)
726
727 #if defined(linux_HOST_OS)
728 static void GNUC3_ATTRIBUTE(used)
729 StgRunIsImplementedInAssembler(void)
730 {
731 // r0 volatile
732 // r1 stack pointer
733 // r2 toc - needs to be saved
734 // r3-r10 argument passing, volatile
735 // r11, r12 very volatile (not saved across cross-module calls)
736 // r13 thread local state (never modified, don't need to save)
737 // r14-r31 callee-save
738 __asm__ volatile (
739 ".section \".opd\",\"aw\"\n"
740 ".align 3\n"
741 ".globl StgRun\n"
742 ".hidden StgRun\n"
743 "StgRun:\n"
744 "\t.quad\t.StgRun,.TOC.@tocbase,0\n"
745 "\t.size StgRun,24\n"
746 ".globl StgReturn\n"
747 "StgReturn:\n"
748 "\t.quad\t.StgReturn,.TOC.@tocbase,0\n"
749 "\t.size StgReturn,24\n"
750 ".previous\n"
751 ".globl .StgRun\n"
752 ".type .StgRun,@function\n"
753 ".StgRun:\n"
754 "\tmflr 0\n"
755 "\tmr 5, 1\n"
756 "\tstd 0, 16(1)\n"
757 "\tstdu 1, -%0(1)\n"
758 "\tstd 2, -296(5)\n"
759 "\tstd 14, -288(5)\n"
760 "\tstd 15, -280(5)\n"
761 "\tstd 16, -272(5)\n"
762 "\tstd 17, -264(5)\n"
763 "\tstd 18, -256(5)\n"
764 "\tstd 19, -248(5)\n"
765 "\tstd 20, -240(5)\n"
766 "\tstd 21, -232(5)\n"
767 "\tstd 22, -224(5)\n"
768 "\tstd 23, -216(5)\n"
769 "\tstd 24, -208(5)\n"
770 "\tstd 25, -200(5)\n"
771 "\tstd 26, -192(5)\n"
772 "\tstd 27, -184(5)\n"
773 "\tstd 28, -176(5)\n"
774 "\tstd 29, -168(5)\n"
775 "\tstd 30, -160(5)\n"
776 "\tstd 31, -152(5)\n"
777 "\tstfd 14, -144(5)\n"
778 "\tstfd 15, -136(5)\n"
779 "\tstfd 16, -128(5)\n"
780 "\tstfd 17, -120(5)\n"
781 "\tstfd 18, -112(5)\n"
782 "\tstfd 19, -104(5)\n"
783 "\tstfd 20, -96(5)\n"
784 "\tstfd 21, -88(5)\n"
785 "\tstfd 22, -80(5)\n"
786 "\tstfd 23, -72(5)\n"
787 "\tstfd 24, -64(5)\n"
788 "\tstfd 25, -56(5)\n"
789 "\tstfd 26, -48(5)\n"
790 "\tstfd 27, -40(5)\n"
791 "\tstfd 28, -32(5)\n"
792 "\tstfd 29, -24(5)\n"
793 "\tstfd 30, -16(5)\n"
794 "\tstfd 31, -8(5)\n"
795 "\tmr 27, 4\n" // BaseReg == r27
796 "\tld 2, 8(3)\n"
797 "\tld 3, 0(3)\n"
798 "\tmtctr 3\n"
799 "\tbctr\n"
800 ".globl .StgReturn\n"
801 ".type .StgReturn,@function\n"
802 ".StgReturn:\n"
803 "\tmr 3,14\n"
804 "\tla 5, %0(1)\n" // load address == addi r5, r1, %0
805 "\tld 2, -296(5)\n"
806 "\tld 14, -288(5)\n"
807 "\tld 15, -280(5)\n"
808 "\tld 16, -272(5)\n"
809 "\tld 17, -264(5)\n"
810 "\tld 18, -256(5)\n"
811 "\tld 19, -248(5)\n"
812 "\tld 20, -240(5)\n"
813 "\tld 21, -232(5)\n"
814 "\tld 22, -224(5)\n"
815 "\tld 23, -216(5)\n"
816 "\tld 24, -208(5)\n"
817 "\tld 25, -200(5)\n"
818 "\tld 26, -192(5)\n"
819 "\tld 27, -184(5)\n"
820 "\tld 28, -176(5)\n"
821 "\tld 29, -168(5)\n"
822 "\tld 30, -160(5)\n"
823 "\tld 31, -152(5)\n"
824 "\tlfd 14, -144(5)\n"
825 "\tlfd 15, -136(5)\n"
826 "\tlfd 16, -128(5)\n"
827 "\tlfd 17, -120(5)\n"
828 "\tlfd 18, -112(5)\n"
829 "\tlfd 19, -104(5)\n"
830 "\tlfd 20, -96(5)\n"
831 "\tlfd 21, -88(5)\n"
832 "\tlfd 22, -80(5)\n"
833 "\tlfd 23, -72(5)\n"
834 "\tlfd 24, -64(5)\n"
835 "\tlfd 25, -56(5)\n"
836 "\tlfd 26, -48(5)\n"
837 "\tlfd 27, -40(5)\n"
838 "\tlfd 28, -32(5)\n"
839 "\tlfd 29, -24(5)\n"
840 "\tlfd 30, -16(5)\n"
841 "\tlfd 31, -8(5)\n"
842 "\tmr 1, 5\n"
843 "\tld 0, 16(1)\n"
844 "\tmtlr 0\n"
845 "\tblr\n"
846 : : "i"(RESERVED_C_STACK_BYTES+304 /*stack frame size*/));
847 }
848
849 #else // linux_HOST_OS
850 #error Only Linux support for power64 right now.
851 #endif
852
853 #endif
854
855 #if defined(powerpc64le_HOST_ARCH)
856 /* -----------------------------------------------------------------------------
857 PowerPC 64 little endian architecture
858
859 Really everything is in assembler, so we don't have to deal with GCC...
860 -------------------------------------------------------------------------- */
861 #endif
862
863 /* -----------------------------------------------------------------------------
864 ARM architecture
865 -------------------------------------------------------------------------- */
866
867 #if defined(arm_HOST_ARCH)
868
869 #if defined(__thumb__)
870 #define THUMB_FUNC ".thumb\n\t.thumb_func\n\t"
871 #else
872 #define THUMB_FUNC
873 #endif
874
875 StgRegTable *
876 StgRun(StgFunPtr f, StgRegTable *basereg) {
877 StgRegTable * r;
878 __asm__ volatile (
879 /*
880 * save callee-saves registers on behalf of the STG code.
881 */
882 "stmfd sp!, {r4-r11, ip, lr}\n\t"
883 #if !defined(arm_HOST_ARCH_PRE_ARMv6)
884 "vstmdb sp!, {d8-d11}\n\t"
885 #endif
886 /*
887 * allocate some space for Stg machine's temporary storage.
888 * Note: RESERVED_C_STACK_BYTES has to be a round number here or
889 * the assembler can't assemble it.
890 */
891 "sub sp, sp, %3\n\t"
892 /*
893 * Set BaseReg
894 */
895 "mov r4, %2\n\t"
896 /*
897 * Jump to function argument.
898 */
899 "bx %1\n\t"
900
901 ".globl " STG_RETURN "\n\t"
902 THUMB_FUNC
903 #if !defined(ios_HOST_OS)
904 ".type " STG_RETURN ", %%function\n"
905 #endif
906 STG_RETURN ":\n\t"
907 /*
908 * Free the space we allocated
909 */
910 "add sp, sp, %3\n\t"
911 /*
912 * Return the new register table, taking it from Stg's R1 (ARM's R7).
913 */
914 "mov %0, r7\n\t"
915 /*
916 * restore callee-saves registers.
917 */
918 #if !defined(arm_HOST_ARCH_PRE_ARMv6)
919 "vldmia sp!, {d8-d11}\n\t"
920 #endif
921 "ldmfd sp!, {r4-r11, ip, lr}\n\t"
922 : "=r" (r)
923 : "r" (f), "r" (basereg), "i" (RESERVED_C_STACK_BYTES)
924 #if !defined(__thumb__)
925 /* In ARM mode, r11/fp is frame-pointer and so we cannot mark
926 it as clobbered. If we do so, GCC complains with error. */
927 : "%r4", "%r5", "%r6", "%r7", "%r8", "%r9", "%r10", "%ip", "%lr"
928 #else
929 /* In Thumb mode r7 is frame-pointer and so we cannot mark it
930 as clobbered. On the other hand we mark as clobbered also
931 those regs not used in Thumb mode. Hard to judge if this is
932 needed, but certainly Haskell code is using them for
933 placing GHC's virtual registers there. See
934 includes/stg/MachRegs.h Please note that Haskell code is
935 compiled by GHC/LLVM into ARM code (not Thumb!), at least
936 as of February 2012 */
937 : "%r4", "%r5", "%r6", "%r8", "%r9", "%r10", "%11", "%ip", "%lr"
938 #endif
939 );
940 return r;
941 }
942 #endif
943
944 #if defined(aarch64_HOST_ARCH)
945
946 StgRegTable *
947 StgRun(StgFunPtr f, StgRegTable *basereg) {
948 StgRegTable * r;
949 __asm__ volatile (
950 /*
951 * Save callee-saves registers on behalf of the STG code.
952 * Floating point registers only need the bottom 64 bits preserved.
953 * We need to use the names x16, x17, x29 and x30 instead of ip0
954 * ip1, fp and lp because one of either clang or gcc doesn't understand
955 * the later names.
956 */
957 "stp x29, x30, [sp, #-16]!\n\t"
958 "mov x29, sp\n\t"
959 "stp x16, x17, [sp, #-16]!\n\t"
960 "stp x19, x20, [sp, #-16]!\n\t"
961 "stp x21, x22, [sp, #-16]!\n\t"
962 "stp x23, x24, [sp, #-16]!\n\t"
963 "stp x25, x26, [sp, #-16]!\n\t"
964 "stp x27, x28, [sp, #-16]!\n\t"
965 "stp d8, d9, [sp, #-16]!\n\t"
966 "stp d10, d11, [sp, #-16]!\n\t"
967 "stp d12, d13, [sp, #-16]!\n\t"
968 "stp d14, d15, [sp, #-16]!\n\t"
969
970 /*
971 * allocate some space for Stg machine's temporary storage.
972 * Note: RESERVED_C_STACK_BYTES has to be a round number here or
973 * the assembler can't assemble it.
974 */
975 "sub sp, sp, %3\n\t"
976 /*
977 * Set BaseReg
978 */
979 "mov x19, %2\n\t"
980 /*
981 * Jump to function argument.
982 */
983 "br %1\n\t"
984
985 ".globl " STG_RETURN "\n\t"
986 #if !defined(ios_HOST_OS)
987 ".type " STG_RETURN ", %%function\n"
988 #endif
989 STG_RETURN ":\n\t"
990 /*
991 * Free the space we allocated
992 */
993 "add sp, sp, %3\n\t"
994 /*
995 * Return the new register table, taking it from Stg's R1 (ARM64's R22).
996 */
997 "mov %0, x22\n\t"
998 /*
999 * restore callee-saves registers.
1000 */
1001
1002 "ldp d14, d15, [sp], #16\n\t"
1003 "ldp d12, d13, [sp], #16\n\t"
1004 "ldp d10, d11, [sp], #16\n\t"
1005 "ldp d8, d9, [sp], #16\n\t"
1006 "ldp x27, x28, [sp], #16\n\t"
1007 "ldp x25, x26, [sp], #16\n\t"
1008 "ldp x23, x24, [sp], #16\n\t"
1009 "ldp x21, x22, [sp], #16\n\t"
1010 "ldp x19, x20, [sp], #16\n\t"
1011 "ldp x16, x17, [sp], #16\n\t"
1012 "ldp x29, x30, [sp], #16\n\t"
1013
1014 : "=r" (r)
1015 : "r" (f), "r" (basereg), "i" (RESERVED_C_STACK_BYTES)
1016 : "%x19", "%x20", "%x21", "%x22", "%x23", "%x24", "%x25", "%x26", "%x27", "%x28",
1017 "%x16", "%x17", "%x30"
1018 );
1019 return r;
1020 }
1021
1022 #endif
1023
1024 #endif /* !USE_MINIINTERPRETER */