Restore Xmm registers properly in StgCRun.c
[ghc.git] / rts / StgCRun.c
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 1998-2011
4 *
5 * STG-to-C glue.
6 *
7 * To run an STG function from C land, call
8 *
9 * rv = StgRun(f,BaseReg);
10 *
11 * where "f" is the STG function to call, and BaseReg is the address of the
12 * RegTable for this run (we might have separate RegTables if we're running
13 * multiple threads on an SMP machine).
14 *
15 * In the end, "f" must JMP to StgReturn (defined below), passing the
16 * return-value "rv" in R1, to return to the caller of StgRun returning "rv" in
17 * the whatever way C returns a value.
18 *
19 * NOTE: StgRun/StgReturn do *NOT* load or store Hp or any other registers
20 * (other than saving the C callee-saves registers). Instead, the called
21 * function "f" must do that in STG land.
22 *
23 * We also initially make sure that there are @RESERVED_C_STACK_BYTES@ on the
24 * C-stack. This is done to reserve some space for the allocation of
25 * temporaries in STG code.
26 *
27 * -------------------------------------------------------------------------- */
28
29 #include "PosixSource.h"
30 #include "ghcconfig.h"
31
32 #if defined(sparc_HOST_ARCH) || defined(USE_MINIINTERPRETER)
33 /* include Stg.h first because we want real machine regs in here: we
34 * have to get the value of R1 back from Stg land to C land intact.
35 */
36
37 /* We include windows.h very early, as on Win64 the CONTEXT type has
38 fields "R8", "R9" and "R10", which goes bad if we've already
39 #define'd those names for our own purposes (in stg/Regs.h) */
40 #if defined(HAVE_WINDOWS_H)
41 #include <windows.h>
42 #endif
43
44 #define IN_STGCRUN 1
45 #include "Stg.h"
46 #include "Rts.h"
47 #else
48 /* The other architectures do not require the actual register macro definitions
49 * here because they use hand written assembly to implement the StgRun
50 * function. Including Stg.h first will define the R1 values using GCC specific
51 * techniques, which we don't want for LLVM based C compilers. Since we don't
52 * actually need the real machine register definitions here, we include the
53 * headers in the opposite order to allow LLVM-based C compilers to work.
54 */
55 #include "Rts.h"
56 #include "Stg.h"
57 #endif
58
59 #include "StgRun.h"
60 #include "Capability.h"
61
62 #include "RtsUtils.h"
63 #if defined(DEBUG)
64 #include "Printer.h"
65 #endif
66
67 #if defined(USE_MINIINTERPRETER)
68
69 /* -----------------------------------------------------------------------------
70 any architecture (using miniinterpreter)
71 -------------------------------------------------------------------------- */
72
73 StgRegTable * StgRun(StgFunPtr f, StgRegTable *basereg STG_UNUSED)
74 {
75 while (f) {
76 IF_DEBUG(interpreter,
77 debugBelch("Jumping to ");
78 printPtr((P_)f); fflush(stdout);
79 debugBelch("\n");
80 );
81 f = (StgFunPtr) (f)();
82 }
83 return (StgRegTable *)R1.p;
84 }
85
86 StgFunPtr StgReturn(void)
87 {
88 return 0;
89 }
90
91 #else /* !USE_MINIINTERPRETER */
92
93 #if defined(mingw32_HOST_OS)
94 /*
95 * Note [Windows Stack allocations]
96 *
97 * On windows the stack has to be allocated 4k at a time, otherwise
98 * we get a segfault. The C compiler knows how to do this (it calls
99 * _alloca()), so we make sure that we can allocate as much stack as
100 * we need. However since we are doing a local stack allocation and the value
101 * isn't valid outside the frame, compilers are free to optimize this allocation
102 * and the corresponding stack check away. So to prevent that we request that
103 * this function never be optimized (See #14669). */
104 STG_NO_OPTIMIZE StgWord8 *win32AllocStack(void)
105 {
106 StgWord8 stack[RESERVED_C_STACK_BYTES + 16 + 12];
107 return stack;
108 }
109 #endif
110
111 /* -----------------------------------------------------------------------------
112 x86 architecture
113 -------------------------------------------------------------------------- */
114
115 #if defined(i386_HOST_ARCH)
116
117 #if defined(darwin_HOST_OS) || defined(ios_HOST_OS)
118 #define STG_GLOBAL ".globl "
119 #define STG_HIDDEN ".private_extern "
120 #else
121 #define STG_GLOBAL ".global "
122 #define STG_HIDDEN ".hidden "
123 #endif
124
125 /*
126 * Note [Stack Alignment on X86]
127 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
128 *
129 * On X86 (both 32bit and 64bit) we keep the stack aligned on function calls at
130 * a 16-byte boundary. This is done because on a number of architectures the
131 * ABI requires this (x64, Mac OSX 32bit/64bit) as well as interfacing with
132 * other libraries through the FFI.
133 *
134 * As part of this arrangment we must maintain the stack at a 16-byte boundary
135 * - word_size-bytes (so 16n - 4 for i386 and 16n - 8 for x64) on entry to a
136 * procedure since both GCC and LLVM expect this. This is because the stack
137 * should have been 16-byte boundary aligned and then a call made which pushes
138 * a return address onto the stack (so word_size more space used). In STG code
139 * we only jump to other STG procedures, so we maintain the 16n - word_size
140 * alignment for these jumps.
141 *
142 * This gives us binary compatibility with LLVM and GCC as well as dealing
143 * with the FFI. Previously we just maintianed a 16n byte alignment for
144 * procedure entry and calls, which led to bugs (see #4211 and #5250).
145 *
146 * To change this convention you need to change the code here, and in
147 * compiler/nativeGen/X86/CodeGen.hs::GenCCall, and maybe the adjustor
148 * code for thunks in rts/AdjustorAsm.s, rts/Adjustor.c.
149 *
150 * A quick way to see if this is wrong is to compile this code:
151 *
152 * main = System.Exit.exitWith ExitSuccess
153 *
154 * And run it with +RTS -sstderr. The stats code in the RTS, in
155 * particular statsPrintf(), relies on the stack alignment because
156 * it saves the %xmm regs on the stack, so it'll fall over if the
157 * stack isn't aligned, and calling exitWith from Haskell invokes
158 * shutdownHaskellAndExit using a C call.
159 *
160 * If you edit the sequence below be sure to update the unwinding information
161 * for stg_stop_thread in StgStartup.cmm.
162 */
163
164 static void GNUC3_ATTRIBUTE(used)
165 StgRunIsImplementedInAssembler(void)
166 {
167 __asm__ volatile (
168 STG_GLOBAL STG_RUN "\n"
169 #if !defined(mingw32_HOST_OS)
170 STG_HIDDEN STG_RUN "\n"
171 #endif
172 STG_RUN ":\n\t"
173
174 /*
175 * move %esp down to reserve an area for temporary storage
176 * during the execution of STG code.
177 *
178 * The stack pointer has to be aligned to a multiple of 16
179 * bytes from here - this is a requirement of the C ABI, so
180 * that C code can assign SSE2 registers directly to/from
181 * stack locations.
182 */
183 "subl %0, %%esp\n\t"
184
185 /*
186 * save callee-saves registers on behalf of the STG code.
187 */
188 "movl %%esp, %%eax\n\t"
189 "addl %0-16, %%eax\n\t"
190 "movl %%ebx,0(%%eax)\n\t"
191 "movl %%esi,4(%%eax)\n\t"
192 "movl %%edi,8(%%eax)\n\t"
193 "movl %%ebp,12(%%eax)\n\t"
194 /*
195 * Set BaseReg
196 */
197 "movl 24(%%eax),%%ebx\n\t"
198 /*
199 * grab the function argument from the stack
200 */
201 "movl 20(%%eax),%%eax\n\t"
202 /*
203 * jump to it
204 */
205 "jmp *%%eax\n\t"
206
207 STG_GLOBAL STG_RETURN "\n"
208 STG_RETURN ":\n\t"
209
210 "movl %%esi, %%eax\n\t" /* Return value in R1 */
211
212 /*
213 * restore callee-saves registers. (Don't stomp on %%eax!)
214 */
215 "movl %%esp, %%edx\n\t"
216 "addl %0-16, %%edx\n\t"
217 "movl 0(%%edx),%%ebx\n\t" /* restore the registers saved above */
218 "movl 4(%%edx),%%esi\n\t"
219 "movl 8(%%edx),%%edi\n\t"
220 "movl 12(%%edx),%%ebp\n\t"
221
222 "addl %0, %%esp\n\t"
223 "ret"
224
225 : : "i" (RESERVED_C_STACK_BYTES + 16)
226 // + 16 to make room for the 4 registers we have to save
227 // See Note [Stack Alignment on X86]
228 );
229 }
230
231 #endif // defined(i386_HOST_ARCH)
232
233 /* ----------------------------------------------------------------------------
234 x86-64 is almost the same as plain x86.
235
236 I've done it using entirely inline assembler, because I couldn't
237 get gcc to generate the correct subtraction from %rsp by using
238 the local array variable trick. It didn't seem to reserve
239 enough space. Oh well, it's not much harder this way.
240 ------------------------------------------------------------------------- */
241
242 #if defined(x86_64_HOST_ARCH)
243
244 #define STG_GLOBAL ".globl "
245
246 #if defined(darwin_HOST_OS) || defined(ios_HOST_OS)
247 #define STG_HIDDEN ".private_extern "
248 #else
249 #define STG_HIDDEN ".hidden "
250 #endif
251
252 /*
253 Note [Unwinding foreign exports on x86-64]
254 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
255 For foreign exports, that is Haskell functions exported as C functions when
256 we unwind we have to unwind from Haskell code into C code. The current story
257 is as follows:
258
259 * The Haskell stack always has stg_stop_thread_info frame at the bottom
260 * We annotate stg_stop_thread_info to unwind the instruction pointer to a
261 label inside StgRun called StgRunJmp. It's the last instruction before the
262 code jumps into Haskell.
263 * StgRun - which is implemented in assembler is annotated with some manual
264 unwinding information. It unwinds all the registers that it has saved
265 on the stack. This is important as rsp and rbp are often required for
266 getting to the next frame and the rest of the saved registers are useful
267 when inspecting locals in gdb.
268
269
270 Example x86-64 stack for an FFI call
271 from C into a Haskell function:
272
273
274 HASKELL HEAP
275 "ADDRESS SPACE"
276
277 +--------------------+ <------ rbp
278 | |
279 | |
280 | |
281 | |
282 | Haskell |
283 | evaluation stack |
284 | |
285 | |
286 |--------------------|
287 |stg_catch_frame_info|
288 |--------------------|
289 | stg_forceIO_info |
290 |--------------------|
291 |stg_stop_thread_info| -------
292 +--------------------+ |
293 ... |
294 (other heap objects) |
295 ... |
296 |
297 |
298 |
299 C STACK "ADDRESS SPACE" |
300 v
301 +-----------------------------+ <------ rsp
302 | |
303 | RESERVED_C_STACK_BYTES ~16k |
304 | |
305 |-----------------------------|
306 | rbx ||
307 |-----------------------------| \
308 | rbp | |
309 |-----------------------------| \
310 | r12 | |
311 |-----------------------------| \
312 | r13 | | STG_RUN_STACK_FRAME_SIZE
313 |-----------------------------| /
314 | r14 | |
315 |-----------------------------| /
316 | r15 | |
317 |-----------------------------|/
318 | rip saved by call StgRun |
319 | in schedule() |
320 +-----------------------------+
321 ...
322 schedule() stack frame
323
324
325 Lower addresses on the top
326
327 One little snag in this approach is that the annotations accepted by the
328 assembler are surprisingly unexpressive. I had to resort to a .cfi_escape
329 and hand-assemble a DWARF expression. What made it worse was that big numbers
330 are LEB128 encoded, which makes them variable byte length, with length depending
331 on the magnitude.
332
333 Here's an example stack generated this way:
334
335 Thread 1 "m" hit Breakpoint 1, Fib_zdfstableZZC0ZZCmainZZCFibZZCfib1_info () at Fib.hs:9
336 9 fib a = return (a + 1)
337 #0 Fib_zdfstableZZC0ZZCmainZZCFibZZCfib1_info () at Fib.hs:9
338 #1 stg_catch_frame_info () at rts/Exception.cmm:372
339 #2 stg_forceIO_info () at rts/StgStartup.cmm:178
340 #3 stg_stop_thread_info () at rts/StgStartup.cmm:42
341 #4 0x00000000007048ab in StgRunIsImplementedInAssembler () at rts/StgCRun.c:255
342 #5 0x00000000006fcf42 in schedule (initialCapability=initialCapability@entry=0x8adac0 <MainCapability>, task=task@entry=0x8cf2a0) at rts/Schedule.c:451
343 #6 0x00000000006fe18e in scheduleWaitThread (tso=0x4200006388, ret=<optimized out>, pcap=0x7fffffffdac0) at rts/Schedule.c:2533
344 #7 0x000000000040a21e in hs_fib ()
345 #8 0x000000000040a083 in main (argc=1, argv=0x7fffffffdc48) at m.cpp:15
346
347 (This is from patched gdb. See Note [Info Offset].)
348
349 The previous approach was to encode the unwinding information for select
350 registers in stg_stop_thread_info with Cmm annotations. The unfortunate thing
351 about that approach was that it required introduction of an artificial MachSp
352 register that wasn't meaningful outside unwinding. I discovered that to get
353 stack unwinding working under -threaded runtime I also needed to unwind rbp
354 which would require adding MachRbp. If we wanted to see saved locals in gdb,
355 we'd have to add more. The core of the problem is that Cmm is architecture
356 independent, while unwinding isn't.
357
358 Note [Unwinding foreign imports]
359 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
360 For unwinding foreign imports, that is C functions exposed as Haskell functions
361 no special handling is required. The C function unwinds according to the rip
362 saved on the stack by the call instruction. Then we perform regular Haskell
363 stack unwinding.
364 */
365
366
367 static void GNUC3_ATTRIBUTE(used)
368 StgRunIsImplementedInAssembler(void)
369 {
370 __asm__ volatile (
371 /*
372 * save callee-saves registers on behalf of the STG code.
373 */
374 STG_GLOBAL STG_RUN "\n"
375 #if !defined(mingw32_HOST_OS)
376 STG_HIDDEN STG_RUN "\n"
377 #endif
378 STG_RUN ":\n\t"
379 "subq %1, %%rsp\n\t"
380 "movq %%rsp, %%rax\n\t"
381 "subq %0, %%rsp\n\t"
382 "movq %%rbx,0(%%rax)\n\t"
383 "movq %%rbp,8(%%rax)\n\t"
384 "movq %%r12,16(%%rax)\n\t"
385 "movq %%r13,24(%%rax)\n\t"
386 "movq %%r14,32(%%rax)\n\t"
387 "movq %%r15,40(%%rax)\n\t"
388 #if defined(mingw32_HOST_OS)
389 /*
390 * Additional callee saved registers on Win64. This must match
391 * callClobberedRegisters in compiler/nativeGen/X86/Regs.hs as
392 * both represent the Win64 calling convention.
393 */
394 "movq %%rdi,48(%%rax)\n\t"
395 "movq %%rsi,56(%%rax)\n\t"
396 "movq %%xmm6, 64(%%rax)\n\t"
397 "movq %%xmm7, 72(%%rax)\n\t"
398 "movq %%xmm8, 80(%%rax)\n\t"
399 "movq %%xmm9, 88(%%rax)\n\t"
400 "movq %%xmm10, 96(%%rax)\n\t"
401 "movq %%xmm11,104(%%rax)\n\t"
402 "movq %%xmm12,112(%%rax)\n\t"
403 "movq %%xmm13,120(%%rax)\n\t"
404 "movq %%xmm14,128(%%rax)\n\t"
405 "movq %%xmm15,136(%%rax)\n\t"
406 #endif
407
408 #if !defined(darwin_HOST_OS)
409 /*
410 * Let the unwinder know where we saved the registers
411 * See Note [Unwinding foreign exports on x86-64].
412 *
413 * N.B. We don't support unwinding on Darwin due to
414 * various toolchain insanity.
415 */
416 ".cfi_def_cfa rsp, 0\n\t"
417 ".cfi_offset rbx, %c2\n\t"
418 ".cfi_offset rbp, %c3\n\t"
419 ".cfi_offset r12, %c4\n\t"
420 ".cfi_offset r13, %c5\n\t"
421 ".cfi_offset r14, %c6\n\t"
422 ".cfi_offset r15, %c7\n\t"
423 ".cfi_offset rip, %c8\n\t"
424 ".cfi_escape " // DW_CFA_val_expression is not expressible otherwise
425 "0x16, " // DW_CFA_val_expression
426 "0x07, " // register num 7 - rsp
427 "0x04, " // block length
428 "0x77, " // DW_OP_breg7 - signed LEB128 offset from rsp
429 #define RSP_DELTA (RESERVED_C_STACK_BYTES + STG_RUN_STACK_FRAME_SIZE + 8)
430 "%c9" // signed LEB128 encoded delta - byte 1
431 #if (RSP_DELTA >> 7) > 0
432 ", %c10" // signed LEB128 encoded delta - byte 2
433 #endif
434
435 #if (RSP_DELTA >> 14) > 0
436 ", %c11" // signed LEB128 encoded delta - byte 3
437 #endif
438
439 #if (RSP_DELTA >> 21) > 0
440 ", %c12" // signed LEB128 encoded delta - byte 4
441 #endif
442
443 #if (RSP_DELTA >> 28) > 0
444 #error "RSP_DELTA too big"
445 #endif
446 "\n\t"
447 #endif /* !defined(darwin_HOST_OS) */
448
449 /*
450 * Set BaseReg
451 */
452 #if defined(mingw32_HOST_OS)
453 "movq %%rdx,%%r13\n\t"
454 #else
455 "movq %%rsi,%%r13\n\t"
456 #endif
457 /*
458 * grab the function argument from the stack, and jump to it.
459 */
460 #if defined(mingw32_HOST_OS)
461 "movq %%rcx,%%rax\n\t"
462 #else
463 "movq %%rdi,%%rax\n\t"
464 #endif
465
466 STG_GLOBAL xstr(STG_RUN_JMP) "\n"
467 #if !defined(mingw32_HOST_OS)
468 STG_HIDDEN xstr(STG_RUN_JMP) "\n"
469 #endif
470 #if HAVE_SUBSECTIONS_VIA_SYMBOLS
471 // If we have deadstripping enabled and a label is detected as unused
472 // the code gets nop'd out.
473 ".no_dead_strip " xstr(STG_RUN_JMP) "\n"
474 #endif
475 xstr(STG_RUN_JMP) ":\n\t"
476 "jmp *%%rax\n\t"
477
478 ".globl " STG_RETURN "\n"
479 STG_RETURN ":\n\t"
480
481 "movq %%rbx, %%rax\n\t" /* Return value in R1 */
482
483 /*
484 * restore callee-saves registers. (Don't stomp on %%rax!)
485 */
486 "addq %0, %%rsp\n\t"
487 "movq 0(%%rsp),%%rbx\n\t" /* restore the registers saved above */
488 "movq 8(%%rsp),%%rbp\n\t"
489 "movq 16(%%rsp),%%r12\n\t"
490 "movq 24(%%rsp),%%r13\n\t"
491 "movq 32(%%rsp),%%r14\n\t"
492 "movq 40(%%rsp),%%r15\n\t"
493 #if defined(mingw32_HOST_OS)
494 "movq 48(%%rsp),%%rdi\n\t"
495 "movq 56(%%rsp),%%rsi\n\t"
496 "movq 64(%%rsp),%%xmm6\n\t"
497 "movq 72(%%rsp),%%xmm7\n\t"
498 "movq 80(%%rsp),%%xmm8\n\t"
499 "movq 88(%%rsp),%%xmm9\n\t"
500 "movq 96(%%rsp),%%xmm10\n\t"
501 "movq 104(%%rsp),%%xmm11\n\t"
502 "movq 112(%%rsp),%%xmm12\n\t"
503 "movq 120(%%rsp),%%xmm13\n\t"
504 "movq 128(%%rsp),%%xmm14\n\t"
505 "movq 136(%%rsp),%%xmm15\n\t"
506 #endif
507 "addq %1, %%rsp\n\t"
508 "retq"
509
510 :
511 : "i"(RESERVED_C_STACK_BYTES),
512 "i"(STG_RUN_STACK_FRAME_SIZE /* stack frame size */),
513 "i"(RESERVED_C_STACK_BYTES /* rbx relative to cfa (rsp) */),
514 "i"(RESERVED_C_STACK_BYTES + 8 /* rbp relative to cfa (rsp) */),
515 "i"(RESERVED_C_STACK_BYTES + 16 /* r12 relative to cfa (rsp) */),
516 "i"(RESERVED_C_STACK_BYTES + 24 /* r13 relative to cfa (rsp) */),
517 "i"(RESERVED_C_STACK_BYTES + 32 /* r14 relative to cfa (rsp) */),
518 "i"(RESERVED_C_STACK_BYTES + 40 /* r15 relative to cfa (rsp) */),
519 "i"(RESERVED_C_STACK_BYTES + STG_RUN_STACK_FRAME_SIZE
520 /* rip relative to cfa */)
521
522 #if !defined(darwin_HOST_OS)
523 , "i"((RSP_DELTA & 127) | (128 * ((RSP_DELTA >> 7) > 0)))
524 /* signed LEB128-encoded delta from rsp - byte 1 */
525 #if (RSP_DELTA >> 7) > 0
526 , "i"(((RSP_DELTA >> 7) & 127) | (128 * ((RSP_DELTA >> 14) > 0)))
527 /* signed LEB128-encoded delta from rsp - byte 2 */
528 #endif
529
530 #if (RSP_DELTA >> 14) > 0
531 , "i"(((RSP_DELTA >> 14) & 127) | (128 * ((RSP_DELTA >> 21) > 0)))
532 /* signed LEB128-encoded delta from rsp - byte 3 */
533 #endif
534
535 #if (RSP_DELTA >> 21) > 0
536 , "i"(((RSP_DELTA >> 21) & 127) | (128 * ((RSP_DELTA >> 28) > 0)))
537 /* signed LEB128-encoded delta from rsp - byte 4 */
538 #endif
539 #undef RSP_DELTA
540
541 #endif /* !defined(darwin_HOST_OS) */
542
543 );
544 /*
545 * See Note [Stack Alignment on X86]
546 */
547 }
548
549 #endif /* x86-64 */
550
551 /* -----------------------------------------------------------------------------
552 Sparc architecture
553
554 --
555 OLD COMMENT from GHC-3.02:
556
557 We want tailjumps to be calls, because `call xxx' is the only Sparc
558 branch that allows an arbitrary label as a target. (Gcc's ``goto
559 *target'' construct ends up loading the label into a register and
560 then jumping, at the cost of two extra instructions for the 32-bit
561 load.)
562
563 When entering the threaded world, we stash our return address in a
564 known location so that \tr{%i7} is available as an extra
565 callee-saves register. Of course, we have to restore this when
566 coming out of the threaded world.
567
568 I hate this god-forsaken architecture. Since the top of the
569 reserved stack space is used for globals and the bottom is reserved
570 for outgoing arguments, we have to stick our return address
571 somewhere in the middle. Currently, I'm allowing 100 extra
572 outgoing arguments beyond the first 6. --JSM
573
574 Updated info (GHC 4.06): we don't appear to use %i7 any more, so
575 I'm not sure whether we still need to save it. Incedentally, what
576 does the last paragraph above mean when it says "the top of the
577 stack is used for globals"? What globals? --SDM
578
579 Updated info (GHC 4.08.2): not saving %i7 any more (see below).
580 -------------------------------------------------------------------------- */
581
582 #if defined(sparc_HOST_ARCH)
583
584 StgRegTable *
585 StgRun(StgFunPtr f, StgRegTable *basereg) {
586
587 unsigned char space[RESERVED_C_STACK_BYTES];
588 #if 0
589 register void *i7 __asm__("%i7");
590 ((void **)(space))[100] = i7;
591 #endif
592 f();
593 __asm__ volatile (
594 ".align 4\n"
595 ".global " STG_RETURN "\n"
596 STG_RETURN ":"
597 : : "p" (space) : "l0","l1","l2","l3","l4","l5","l6","l7");
598 /* we tell the C compiler that l0-l7 are clobbered on return to
599 * StgReturn, otherwise it tries to use these to save eg. the
600 * address of space[100] across the call. The correct thing
601 * to do would be to save all the callee-saves regs, but we
602 * can't be bothered to do that.
603 *
604 * We also explicitly mark space as used since gcc eliminates it
605 * otherwise.
606 *
607 * The code that gcc generates for this little fragment is now
608 * terrible. We could do much better by coding it directly in
609 * assembler.
610 */
611 #if 0
612 /* updated 4.08.2: we don't save %i7 in the middle of the reserved
613 * space any more, since gcc tries to save its address across the
614 * call to f(), this gets clobbered in STG land and we end up
615 * dereferencing a bogus pointer in StgReturn.
616 */
617 __asm__ volatile ("ld %1,%0"
618 : "=r" (i7) : "m" (((void **)(space))[100]));
619 #endif
620 return (StgRegTable *)R1.i;
621 }
622
623 #endif
624
625 /* -----------------------------------------------------------------------------
626 PowerPC architecture
627
628 Everything is in assembler, so we don't have to deal with GCC...
629 -------------------------------------------------------------------------- */
630
631 #if defined(powerpc_HOST_ARCH)
632
633 #define STG_GLOBAL ".globl "
634
635 #define STG_HIDDEN ".hidden "
636
637 #if defined(aix_HOST_OS)
638
639 // implementation is in StgCRunAsm.S
640
641 #else
642
643 // This version is for PowerPC Linux.
644
645 static void GNUC3_ATTRIBUTE(used)
646 StgRunIsImplementedInAssembler(void)
647 {
648 __asm__ volatile (
649 "\t.globl StgRun\n"
650 "\t.hidden StgRun\n"
651 "\t.type StgRun,@function\n"
652 "StgRun:\n"
653 "\tmflr 0\n"
654 "\tstw 0,4(1)\n"
655 "\tmr 5,1\n"
656 "\tstwu 1,-%0(1)\n"
657 "\tstmw 13,-220(5)\n"
658 "\tstfd 14,-144(5)\n"
659 "\tstfd 15,-136(5)\n"
660 "\tstfd 16,-128(5)\n"
661 "\tstfd 17,-120(5)\n"
662 "\tstfd 18,-112(5)\n"
663 "\tstfd 19,-104(5)\n"
664 "\tstfd 20,-96(5)\n"
665 "\tstfd 21,-88(5)\n"
666 "\tstfd 22,-80(5)\n"
667 "\tstfd 23,-72(5)\n"
668 "\tstfd 24,-64(5)\n"
669 "\tstfd 25,-56(5)\n"
670 "\tstfd 26,-48(5)\n"
671 "\tstfd 27,-40(5)\n"
672 "\tstfd 28,-32(5)\n"
673 "\tstfd 29,-24(5)\n"
674 "\tstfd 30,-16(5)\n"
675 "\tstfd 31,-8(5)\n"
676 "\tmr 27,4\n" // BaseReg == r27
677 "\tmtctr 3\n"
678 "\tmr 12,3\n"
679 "\tbctr\n"
680 ".globl StgReturn\n"
681 "\t.type StgReturn,@function\n"
682 "StgReturn:\n"
683 "\tmr 3,14\n"
684 "\tla 5,%0(1)\n"
685 "\tlmw 13,-220(5)\n"
686 "\tlfd 14,-144(5)\n"
687 "\tlfd 15,-136(5)\n"
688 "\tlfd 16,-128(5)\n"
689 "\tlfd 17,-120(5)\n"
690 "\tlfd 18,-112(5)\n"
691 "\tlfd 19,-104(5)\n"
692 "\tlfd 20,-96(5)\n"
693 "\tlfd 21,-88(5)\n"
694 "\tlfd 22,-80(5)\n"
695 "\tlfd 23,-72(5)\n"
696 "\tlfd 24,-64(5)\n"
697 "\tlfd 25,-56(5)\n"
698 "\tlfd 26,-48(5)\n"
699 "\tlfd 27,-40(5)\n"
700 "\tlfd 28,-32(5)\n"
701 "\tlfd 29,-24(5)\n"
702 "\tlfd 30,-16(5)\n"
703 "\tlfd 31,-8(5)\n"
704 "\tmr 1,5\n"
705 "\tlwz 0,4(1)\n"
706 "\tmtlr 0\n"
707 "\tblr\n"
708 : : "i"(RESERVED_C_STACK_BYTES+224 /*stack frame size*/));
709 }
710 #endif
711
712 #endif
713
714 /* -----------------------------------------------------------------------------
715 PowerPC 64 architecture
716
717 Everything is in assembler, so we don't have to deal with GCC...
718 -------------------------------------------------------------------------- */
719
720 #if defined(powerpc64_HOST_ARCH)
721
722 static void GNUC3_ATTRIBUTE(used)
723 StgRunIsImplementedInAssembler(void)
724 {
725 // r0 volatile
726 // r1 stack pointer
727 // r2 toc - needs to be saved
728 // r3-r10 argument passing, volatile
729 // r11, r12 very volatile (not saved across cross-module calls)
730 // r13 thread local state (never modified, don't need to save)
731 // r14-r31 callee-save
732 __asm__ volatile (
733 ".section \".opd\",\"aw\"\n"
734 ".align 3\n"
735 ".globl StgRun\n"
736 ".hidden StgRun\n"
737 "StgRun:\n"
738 "\t.quad\t.StgRun,.TOC.@tocbase,0\n"
739 "\t.size StgRun,24\n"
740 ".globl StgReturn\n"
741 "StgReturn:\n"
742 "\t.quad\t.StgReturn,.TOC.@tocbase,0\n"
743 "\t.size StgReturn,24\n"
744 ".previous\n"
745 ".globl .StgRun\n"
746 ".type .StgRun,@function\n"
747 ".StgRun:\n"
748 "\tmflr 0\n"
749 "\tmr 5, 1\n"
750 "\tstd 0, 16(1)\n"
751 "\tstdu 1, -%0(1)\n"
752 "\tstd 2, -296(5)\n"
753 "\tstd 14, -288(5)\n"
754 "\tstd 15, -280(5)\n"
755 "\tstd 16, -272(5)\n"
756 "\tstd 17, -264(5)\n"
757 "\tstd 18, -256(5)\n"
758 "\tstd 19, -248(5)\n"
759 "\tstd 20, -240(5)\n"
760 "\tstd 21, -232(5)\n"
761 "\tstd 22, -224(5)\n"
762 "\tstd 23, -216(5)\n"
763 "\tstd 24, -208(5)\n"
764 "\tstd 25, -200(5)\n"
765 "\tstd 26, -192(5)\n"
766 "\tstd 27, -184(5)\n"
767 "\tstd 28, -176(5)\n"
768 "\tstd 29, -168(5)\n"
769 "\tstd 30, -160(5)\n"
770 "\tstd 31, -152(5)\n"
771 "\tstfd 14, -144(5)\n"
772 "\tstfd 15, -136(5)\n"
773 "\tstfd 16, -128(5)\n"
774 "\tstfd 17, -120(5)\n"
775 "\tstfd 18, -112(5)\n"
776 "\tstfd 19, -104(5)\n"
777 "\tstfd 20, -96(5)\n"
778 "\tstfd 21, -88(5)\n"
779 "\tstfd 22, -80(5)\n"
780 "\tstfd 23, -72(5)\n"
781 "\tstfd 24, -64(5)\n"
782 "\tstfd 25, -56(5)\n"
783 "\tstfd 26, -48(5)\n"
784 "\tstfd 27, -40(5)\n"
785 "\tstfd 28, -32(5)\n"
786 "\tstfd 29, -24(5)\n"
787 "\tstfd 30, -16(5)\n"
788 "\tstfd 31, -8(5)\n"
789 "\tmr 27, 4\n" // BaseReg == r27
790 "\tld 2, 8(3)\n"
791 "\tld 3, 0(3)\n"
792 "\tmtctr 3\n"
793 "\tbctr\n"
794 ".globl .StgReturn\n"
795 ".type .StgReturn,@function\n"
796 ".StgReturn:\n"
797 "\tmr 3,14\n"
798 "\tla 5, %0(1)\n" // load address == addi r5, r1, %0
799 "\tld 2, -296(5)\n"
800 "\tld 14, -288(5)\n"
801 "\tld 15, -280(5)\n"
802 "\tld 16, -272(5)\n"
803 "\tld 17, -264(5)\n"
804 "\tld 18, -256(5)\n"
805 "\tld 19, -248(5)\n"
806 "\tld 20, -240(5)\n"
807 "\tld 21, -232(5)\n"
808 "\tld 22, -224(5)\n"
809 "\tld 23, -216(5)\n"
810 "\tld 24, -208(5)\n"
811 "\tld 25, -200(5)\n"
812 "\tld 26, -192(5)\n"
813 "\tld 27, -184(5)\n"
814 "\tld 28, -176(5)\n"
815 "\tld 29, -168(5)\n"
816 "\tld 30, -160(5)\n"
817 "\tld 31, -152(5)\n"
818 "\tlfd 14, -144(5)\n"
819 "\tlfd 15, -136(5)\n"
820 "\tlfd 16, -128(5)\n"
821 "\tlfd 17, -120(5)\n"
822 "\tlfd 18, -112(5)\n"
823 "\tlfd 19, -104(5)\n"
824 "\tlfd 20, -96(5)\n"
825 "\tlfd 21, -88(5)\n"
826 "\tlfd 22, -80(5)\n"
827 "\tlfd 23, -72(5)\n"
828 "\tlfd 24, -64(5)\n"
829 "\tlfd 25, -56(5)\n"
830 "\tlfd 26, -48(5)\n"
831 "\tlfd 27, -40(5)\n"
832 "\tlfd 28, -32(5)\n"
833 "\tlfd 29, -24(5)\n"
834 "\tlfd 30, -16(5)\n"
835 "\tlfd 31, -8(5)\n"
836 "\tmr 1, 5\n"
837 "\tld 0, 16(1)\n"
838 "\tmtlr 0\n"
839 "\tblr\n"
840 : : "i"(RESERVED_C_STACK_BYTES+304 /*stack frame size*/));
841 }
842
843 #endif
844
845 #if defined(powerpc64le_HOST_ARCH)
846 /* -----------------------------------------------------------------------------
847 PowerPC 64 little endian architecture
848
849 Really everything is in assembler, so we don't have to deal with GCC...
850 -------------------------------------------------------------------------- */
851 #endif
852
853 /* -----------------------------------------------------------------------------
854 ARM architecture
855 -------------------------------------------------------------------------- */
856
857 #if defined(arm_HOST_ARCH)
858
859 #if defined(__thumb__)
860 #define THUMB_FUNC ".thumb\n\t.thumb_func\n\t"
861 #else
862 #define THUMB_FUNC
863 #endif
864
865 StgRegTable *
866 StgRun(StgFunPtr f, StgRegTable *basereg) {
867 StgRegTable * r;
868 __asm__ volatile (
869 /*
870 * save callee-saves registers on behalf of the STG code.
871 */
872 "stmfd sp!, {r4-r11, ip, lr}\n\t"
873 #if !defined(arm_HOST_ARCH_PRE_ARMv6)
874 "vstmdb sp!, {d8-d11}\n\t"
875 #endif
876 /*
877 * allocate some space for Stg machine's temporary storage.
878 * Note: RESERVED_C_STACK_BYTES has to be a round number here or
879 * the assembler can't assemble it.
880 */
881 "sub sp, sp, %3\n\t"
882 /*
883 * Set BaseReg
884 */
885 "mov r4, %2\n\t"
886 /*
887 * Jump to function argument.
888 */
889 "bx %1\n\t"
890
891 ".globl " STG_RETURN "\n\t"
892 THUMB_FUNC
893 #if !defined(ios_HOST_OS)
894 ".type " STG_RETURN ", %%function\n"
895 #endif
896 STG_RETURN ":\n\t"
897 /*
898 * Free the space we allocated
899 */
900 "add sp, sp, %3\n\t"
901 /*
902 * Return the new register table, taking it from Stg's R1 (ARM's R7).
903 */
904 "mov %0, r7\n\t"
905 /*
906 * restore callee-saves registers.
907 */
908 #if !defined(arm_HOST_ARCH_PRE_ARMv6)
909 "vldmia sp!, {d8-d11}\n\t"
910 #endif
911 "ldmfd sp!, {r4-r11, ip, lr}\n\t"
912 : "=r" (r)
913 : "r" (f), "r" (basereg), "i" (RESERVED_C_STACK_BYTES)
914 #if !defined(__thumb__)
915 /* In ARM mode, r11/fp is frame-pointer and so we cannot mark
916 it as clobbered. If we do so, GCC complains with error. */
917 : "%r4", "%r5", "%r6", "%r7", "%r8", "%r9", "%r10", "%ip", "%lr"
918 #else
919 /* In Thumb mode r7 is frame-pointer and so we cannot mark it
920 as clobbered. On the other hand we mark as clobbered also
921 those regs not used in Thumb mode. Hard to judge if this is
922 needed, but certainly Haskell code is using them for
923 placing GHC's virtual registers there. See
924 includes/stg/MachRegs.h Please note that Haskell code is
925 compiled by GHC/LLVM into ARM code (not Thumb!), at least
926 as of February 2012 */
927 : "%r4", "%r5", "%r6", "%r8", "%r9", "%r10", "%11", "%ip", "%lr"
928 #endif
929 );
930 return r;
931 }
932 #endif
933
934 #if defined(aarch64_HOST_ARCH)
935
936 StgRegTable *
937 StgRun(StgFunPtr f, StgRegTable *basereg) {
938 StgRegTable * r;
939 __asm__ volatile (
940 /*
941 * Save callee-saves registers on behalf of the STG code.
942 * Floating point registers only need the bottom 64 bits preserved.
943 * We need to use the names x16, x17, x29 and x30 instead of ip0
944 * ip1, fp and lp because one of either clang or gcc doesn't understand
945 * the later names.
946 */
947 "stp x29, x30, [sp, #-16]!\n\t"
948 "mov x29, sp\n\t"
949 "stp x16, x17, [sp, #-16]!\n\t"
950 "stp x19, x20, [sp, #-16]!\n\t"
951 "stp x21, x22, [sp, #-16]!\n\t"
952 "stp x23, x24, [sp, #-16]!\n\t"
953 "stp x25, x26, [sp, #-16]!\n\t"
954 "stp x27, x28, [sp, #-16]!\n\t"
955 "stp d8, d9, [sp, #-16]!\n\t"
956 "stp d10, d11, [sp, #-16]!\n\t"
957 "stp d12, d13, [sp, #-16]!\n\t"
958 "stp d14, d15, [sp, #-16]!\n\t"
959
960 /*
961 * allocate some space for Stg machine's temporary storage.
962 * Note: RESERVED_C_STACK_BYTES has to be a round number here or
963 * the assembler can't assemble it.
964 */
965 "sub sp, sp, %3\n\t"
966 /*
967 * Set BaseReg
968 */
969 "mov x19, %2\n\t"
970 /*
971 * Jump to function argument.
972 */
973 "br %1\n\t"
974
975 ".globl " STG_RETURN "\n\t"
976 #if !defined(ios_HOST_OS)
977 ".type " STG_RETURN ", %%function\n"
978 #endif
979 STG_RETURN ":\n\t"
980 /*
981 * Free the space we allocated
982 */
983 "add sp, sp, %3\n\t"
984 /*
985 * Return the new register table, taking it from Stg's R1 (ARM64's R22).
986 */
987 "mov %0, x22\n\t"
988 /*
989 * restore callee-saves registers.
990 */
991
992 "ldp d14, d15, [sp], #16\n\t"
993 "ldp d12, d13, [sp], #16\n\t"
994 "ldp d10, d11, [sp], #16\n\t"
995 "ldp d8, d9, [sp], #16\n\t"
996 "ldp x27, x28, [sp], #16\n\t"
997 "ldp x25, x26, [sp], #16\n\t"
998 "ldp x23, x24, [sp], #16\n\t"
999 "ldp x21, x22, [sp], #16\n\t"
1000 "ldp x19, x20, [sp], #16\n\t"
1001 "ldp x16, x17, [sp], #16\n\t"
1002 "ldp x29, x30, [sp], #16\n\t"
1003
1004 : "=r" (r)
1005 : "r" (f), "r" (basereg), "i" (RESERVED_C_STACK_BYTES)
1006 : "%x19", "%x20", "%x21", "%x22", "%x23", "%x24", "%x25", "%x26", "%x27", "%x28",
1007 "%x16", "%x17", "%x30"
1008 );
1009 return r;
1010 }
1011
1012 #endif
1013
1014 #endif /* !USE_MINIINTERPRETER */