2e0ffb45b884a0bff8b42f017799e130198282a0
[libffi.git] / src / closures.c
1 /* -----------------------------------------------------------------------
2 closures.c - Copyright (c) 2007, 2009, 2010 Red Hat, Inc.
3 Copyright (C) 2007, 2009, 2010 Free Software Foundation, Inc
4 Copyright (c) 2011 Plausible Labs Cooperative, Inc.
5
6 Code to allocate and deallocate memory for closures.
7
8 Permission is hereby granted, free of charge, to any person obtaining
9 a copy of this software and associated documentation files (the
10 ``Software''), to deal in the Software without restriction, including
11 without limitation the rights to use, copy, modify, merge, publish,
12 distribute, sublicense, and/or sell copies of the Software, and to
13 permit persons to whom the Software is furnished to do so, subject to
14 the following conditions:
15
16 The above copyright notice and this permission notice shall be included
17 in all copies or substantial portions of the Software.
18
19 THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
20 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
22 NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
23 HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
24 WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25 OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26 DEALINGS IN THE SOFTWARE.
27 ----------------------------------------------------------------------- */
28
29 #if defined __linux__ && !defined _GNU_SOURCE
30 #define _GNU_SOURCE 1
31 #endif
32
33 #include <fficonfig.h>
34 #include <ffi.h>
35 #include <ffi_common.h>
36
37 #if !FFI_MMAP_EXEC_WRIT && !FFI_EXEC_TRAMPOLINE_TABLE
38 # if __linux__ && !defined(__ANDROID__)
39 /* This macro indicates it may be forbidden to map anonymous memory
40 with both write and execute permission. Code compiled when this
41 option is defined will attempt to map such pages once, but if it
42 fails, it falls back to creating a temporary file in a writable and
43 executable filesystem and mapping pages from it into separate
44 locations in the virtual memory space, one location writable and
45 another executable. */
46 # define FFI_MMAP_EXEC_WRIT 1
47 # define HAVE_MNTENT 1
48 # endif
49 # if defined(X86_WIN32) || defined(X86_WIN64) || defined(__OS2__)
50 /* Windows systems may have Data Execution Protection (DEP) enabled,
51 which requires the use of VirtualMalloc/VirtualFree to alloc/free
52 executable memory. */
53 # define FFI_MMAP_EXEC_WRIT 1
54 # endif
55 #endif
56
57 #if FFI_MMAP_EXEC_WRIT && !defined FFI_MMAP_EXEC_SELINUX
58 # ifdef __linux__
59 /* When defined to 1 check for SELinux and if SELinux is active,
60 don't attempt PROT_EXEC|PROT_WRITE mapping at all, as that
61 might cause audit messages. */
62 # define FFI_MMAP_EXEC_SELINUX 1
63 # endif
64 #endif
65
66 #if FFI_CLOSURES
67
68 #if FFI_EXEC_TRAMPOLINE_TABLE
69
70 #ifdef __MACH__
71
72 #include <mach/mach.h>
73 #include <pthread.h>
74 #include <stdio.h>
75 #include <stdlib.h>
76
77 extern void *ffi_closure_trampoline_table_page;
78
79 typedef struct ffi_trampoline_table ffi_trampoline_table;
80 typedef struct ffi_trampoline_table_entry ffi_trampoline_table_entry;
81
82 struct ffi_trampoline_table
83 {
84 /* contiguous writable and executable pages */
85 vm_address_t config_page;
86 vm_address_t trampoline_page;
87
88 /* free list tracking */
89 uint16_t free_count;
90 ffi_trampoline_table_entry *free_list;
91 ffi_trampoline_table_entry *free_list_pool;
92
93 ffi_trampoline_table *prev;
94 ffi_trampoline_table *next;
95 };
96
97 struct ffi_trampoline_table_entry
98 {
99 void *(*trampoline) ();
100 ffi_trampoline_table_entry *next;
101 };
102
103 /* Total number of trampolines that fit in one trampoline table */
104 #define FFI_TRAMPOLINE_COUNT (PAGE_MAX_SIZE / FFI_TRAMPOLINE_SIZE)
105
106 static pthread_mutex_t ffi_trampoline_lock = PTHREAD_MUTEX_INITIALIZER;
107 static ffi_trampoline_table *ffi_trampoline_tables = NULL;
108
109 static ffi_trampoline_table *
110 ffi_trampoline_table_alloc ()
111 {
112 ffi_trampoline_table *table = NULL;
113
114 /* Loop until we can allocate two contiguous pages */
115 while (table == NULL)
116 {
117 vm_address_t config_page = 0x0;
118 kern_return_t kt;
119
120 /* Try to allocate two pages */
121 kt =
122 vm_allocate (mach_task_self (), &config_page, PAGE_MAX_SIZE * 2,
123 VM_FLAGS_ANYWHERE);
124 if (kt != KERN_SUCCESS)
125 {
126 fprintf (stderr, "vm_allocate() failure: %d at %s:%d\n", kt,
127 __FILE__, __LINE__);
128 break;
129 }
130
131 /* Now drop the second half of the allocation to make room for the trampoline table */
132 vm_address_t trampoline_page = config_page + PAGE_MAX_SIZE;
133 kt = vm_deallocate (mach_task_self (), trampoline_page, PAGE_MAX_SIZE);
134 if (kt != KERN_SUCCESS)
135 {
136 fprintf (stderr, "vm_deallocate() failure: %d at %s:%d\n", kt,
137 __FILE__, __LINE__);
138 break;
139 }
140
141 /* Remap the trampoline table to directly follow the config page */
142 vm_prot_t cur_prot;
143 vm_prot_t max_prot;
144
145 vm_address_t trampoline_page_template = (vm_address_t)&ffi_closure_trampoline_table_page;
146 #ifdef __arm__
147 /* ffi_closure_trampoline_table_page can be thumb-biased on some ARM archs */
148 trampoline_page_template &= ~1UL;
149 #endif
150
151 kt =
152 vm_remap (mach_task_self (), &trampoline_page, PAGE_MAX_SIZE, 0x0, FALSE,
153 mach_task_self (), trampoline_page_template, FALSE,
154 &cur_prot, &max_prot, VM_INHERIT_SHARE);
155
156 /* If we lost access to the destination trampoline page, drop our config allocation mapping and retry */
157 if (kt != KERN_SUCCESS)
158 {
159 /* Log unexpected failures */
160 if (kt != KERN_NO_SPACE)
161 {
162 fprintf (stderr, "vm_remap() failure: %d at %s:%d\n", kt,
163 __FILE__, __LINE__);
164 }
165
166 vm_deallocate (mach_task_self (), config_page, PAGE_SIZE);
167 continue;
168 }
169
170 /* We have valid trampoline and config pages */
171 table = calloc (1, sizeof (ffi_trampoline_table));
172 table->free_count = FFI_TRAMPOLINE_COUNT;
173 table->config_page = config_page;
174 table->trampoline_page = trampoline_page;
175
176 /* Create and initialize the free list */
177 table->free_list_pool =
178 calloc (FFI_TRAMPOLINE_COUNT, sizeof (ffi_trampoline_table_entry));
179
180 uint16_t i;
181 for (i = 0; i < table->free_count; i++)
182 {
183 ffi_trampoline_table_entry *entry = &table->free_list_pool[i];
184 entry->trampoline =
185 (void *) (table->trampoline_page + (i * FFI_TRAMPOLINE_SIZE));
186
187 if (i < table->free_count - 1)
188 entry->next = &table->free_list_pool[i + 1];
189 }
190
191 table->free_list = table->free_list_pool;
192 }
193
194 return table;
195 }
196
197 void *
198 ffi_closure_alloc (size_t size, void **code)
199 {
200 /* Create the closure */
201 ffi_closure *closure = malloc (size);
202 if (closure == NULL)
203 return NULL;
204
205 pthread_mutex_lock (&ffi_trampoline_lock);
206
207 /* Check for an active trampoline table with available entries. */
208 ffi_trampoline_table *table = ffi_trampoline_tables;
209 if (table == NULL || table->free_list == NULL)
210 {
211 table = ffi_trampoline_table_alloc ();
212 if (table == NULL)
213 {
214 free (closure);
215 return NULL;
216 }
217
218 /* Insert the new table at the top of the list */
219 table->next = ffi_trampoline_tables;
220 if (table->next != NULL)
221 table->next->prev = table;
222
223 ffi_trampoline_tables = table;
224 }
225
226 /* Claim the free entry */
227 ffi_trampoline_table_entry *entry = ffi_trampoline_tables->free_list;
228 ffi_trampoline_tables->free_list = entry->next;
229 ffi_trampoline_tables->free_count--;
230 entry->next = NULL;
231
232 pthread_mutex_unlock (&ffi_trampoline_lock);
233
234 /* Initialize the return values */
235 *code = entry->trampoline;
236 closure->trampoline_table = table;
237 closure->trampoline_table_entry = entry;
238
239 return closure;
240 }
241
242 void
243 ffi_closure_free (void *ptr)
244 {
245 ffi_closure *closure = ptr;
246
247 pthread_mutex_lock (&ffi_trampoline_lock);
248
249 /* Fetch the table and entry references */
250 ffi_trampoline_table *table = closure->trampoline_table;
251 ffi_trampoline_table_entry *entry = closure->trampoline_table_entry;
252
253 /* Return the entry to the free list */
254 entry->next = table->free_list;
255 table->free_list = entry;
256 table->free_count++;
257
258 /* If all trampolines within this table are free, and at least one other table exists, deallocate
259 * the table */
260 if (table->free_count == FFI_TRAMPOLINE_COUNT
261 && ffi_trampoline_tables != table)
262 {
263 /* Remove from the list */
264 if (table->prev != NULL)
265 table->prev->next = table->next;
266
267 if (table->next != NULL)
268 table->next->prev = table->prev;
269
270 /* Deallocate pages */
271 kern_return_t kt;
272 kt = vm_deallocate (mach_task_self (), table->config_page, PAGE_SIZE);
273 if (kt != KERN_SUCCESS)
274 fprintf (stderr, "vm_deallocate() failure: %d at %s:%d\n", kt,
275 __FILE__, __LINE__);
276
277 kt =
278 vm_deallocate (mach_task_self (), table->trampoline_page, PAGE_SIZE);
279 if (kt != KERN_SUCCESS)
280 fprintf (stderr, "vm_deallocate() failure: %d at %s:%d\n", kt,
281 __FILE__, __LINE__);
282
283 /* Deallocate free list */
284 free (table->free_list_pool);
285 free (table);
286 }
287 else if (ffi_trampoline_tables != table)
288 {
289 /* Otherwise, bump this table to the top of the list */
290 table->prev = NULL;
291 table->next = ffi_trampoline_tables;
292 if (ffi_trampoline_tables != NULL)
293 ffi_trampoline_tables->prev = table;
294
295 ffi_trampoline_tables = table;
296 }
297
298 pthread_mutex_unlock (&ffi_trampoline_lock);
299
300 /* Free the closure */
301 free (closure);
302 }
303
304 #endif
305
306 // Per-target implementation; It's unclear what can reasonable be shared between two OS/architecture implementations.
307
308 #elif FFI_MMAP_EXEC_WRIT /* !FFI_EXEC_TRAMPOLINE_TABLE */
309
310 #define USE_LOCKS 1
311 #define USE_DL_PREFIX 1
312 #ifdef __GNUC__
313 #ifndef USE_BUILTIN_FFS
314 #define USE_BUILTIN_FFS 1
315 #endif
316 #endif
317
318 /* We need to use mmap, not sbrk. */
319 #define HAVE_MORECORE 0
320
321 /* We could, in theory, support mremap, but it wouldn't buy us anything. */
322 #define HAVE_MREMAP 0
323
324 /* We have no use for this, so save some code and data. */
325 #define NO_MALLINFO 1
326
327 /* We need all allocations to be in regular segments, otherwise we
328 lose track of the corresponding code address. */
329 #define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T
330
331 /* Don't allocate more than a page unless needed. */
332 #define DEFAULT_GRANULARITY ((size_t)malloc_getpagesize)
333
334 #include <sys/types.h>
335 #include <sys/stat.h>
336 #include <fcntl.h>
337 #include <errno.h>
338 #ifndef _MSC_VER
339 #include <unistd.h>
340 #endif
341 #include <string.h>
342 #include <stdio.h>
343 #if !defined(X86_WIN32) && !defined(X86_WIN64)
344 #ifdef HAVE_MNTENT
345 #include <mntent.h>
346 #endif /* HAVE_MNTENT */
347 #include <sys/param.h>
348 #include <pthread.h>
349
350 /* We don't want sys/mman.h to be included after we redefine mmap and
351 dlmunmap. */
352 #include <sys/mman.h>
353 #define LACKS_SYS_MMAN_H 1
354
355 #if FFI_MMAP_EXEC_SELINUX
356 #include <sys/statfs.h>
357 #include <stdlib.h>
358
359 static int selinux_enabled = -1;
360
361 static int
362 selinux_enabled_check (void)
363 {
364 struct statfs sfs;
365 FILE *f;
366 char *buf = NULL;
367 size_t len = 0;
368
369 if (statfs ("/selinux", &sfs) >= 0
370 && (unsigned int) sfs.f_type == 0xf97cff8cU)
371 return 1;
372 f = fopen ("/proc/mounts", "r");
373 if (f == NULL)
374 return 0;
375 while (getline (&buf, &len, f) >= 0)
376 {
377 char *p = strchr (buf, ' ');
378 if (p == NULL)
379 break;
380 p = strchr (p + 1, ' ');
381 if (p == NULL)
382 break;
383 if (strncmp (p + 1, "selinuxfs ", 10) == 0)
384 {
385 free (buf);
386 fclose (f);
387 return 1;
388 }
389 }
390 free (buf);
391 fclose (f);
392 return 0;
393 }
394
395 #define is_selinux_enabled() (selinux_enabled >= 0 ? selinux_enabled \
396 : (selinux_enabled = selinux_enabled_check ()))
397
398 #else
399
400 #define is_selinux_enabled() 0
401
402 #endif /* !FFI_MMAP_EXEC_SELINUX */
403
404 /* On PaX enable kernels that have MPROTECT enable we can't use PROT_EXEC. */
405 #ifdef FFI_MMAP_EXEC_EMUTRAMP_PAX
406 #include <stdlib.h>
407
408 static int emutramp_enabled = -1;
409
410 static int
411 emutramp_enabled_check (void)
412 {
413 char *buf = NULL;
414 size_t len = 0;
415 FILE *f;
416 int ret;
417 f = fopen ("/proc/self/status", "r");
418 if (f == NULL)
419 return 0;
420 ret = 0;
421
422 while (getline (&buf, &len, f) != -1)
423 if (!strncmp (buf, "PaX:", 4))
424 {
425 char emutramp;
426 if (sscanf (buf, "%*s %*c%c", &emutramp) == 1)
427 ret = (emutramp == 'E');
428 break;
429 }
430 free (buf);
431 fclose (f);
432 return ret;
433 }
434
435 #define is_emutramp_enabled() (emutramp_enabled >= 0 ? emutramp_enabled \
436 : (emutramp_enabled = emutramp_enabled_check ()))
437 #endif /* FFI_MMAP_EXEC_EMUTRAMP_PAX */
438
439 #elif defined (__CYGWIN__) || defined(__INTERIX)
440
441 #include <sys/mman.h>
442
443 /* Cygwin is Linux-like, but not quite that Linux-like. */
444 #define is_selinux_enabled() 0
445
446 #endif /* !defined(X86_WIN32) && !defined(X86_WIN64) */
447
448 #ifndef FFI_MMAP_EXEC_EMUTRAMP_PAX
449 #define is_emutramp_enabled() 0
450 #endif /* FFI_MMAP_EXEC_EMUTRAMP_PAX */
451
452 /* Declare all functions defined in dlmalloc.c as static. */
453 static void *dlmalloc(size_t);
454 static void dlfree(void*);
455 static void *dlcalloc(size_t, size_t) MAYBE_UNUSED;
456 static void *dlrealloc(void *, size_t) MAYBE_UNUSED;
457 static void *dlmemalign(size_t, size_t) MAYBE_UNUSED;
458 static void *dlvalloc(size_t) MAYBE_UNUSED;
459 static int dlmallopt(int, int) MAYBE_UNUSED;
460 static size_t dlmalloc_footprint(void) MAYBE_UNUSED;
461 static size_t dlmalloc_max_footprint(void) MAYBE_UNUSED;
462 static void** dlindependent_calloc(size_t, size_t, void**) MAYBE_UNUSED;
463 static void** dlindependent_comalloc(size_t, size_t*, void**) MAYBE_UNUSED;
464 static void *dlpvalloc(size_t) MAYBE_UNUSED;
465 static int dlmalloc_trim(size_t) MAYBE_UNUSED;
466 static size_t dlmalloc_usable_size(void*) MAYBE_UNUSED;
467 static void dlmalloc_stats(void) MAYBE_UNUSED;
468
469 #if !(defined(X86_WIN32) || defined(X86_WIN64) || defined(__OS2__)) || defined (__CYGWIN__) || defined(__INTERIX)
470 /* Use these for mmap and munmap within dlmalloc.c. */
471 static void *dlmmap(void *, size_t, int, int, int, off_t);
472 static int dlmunmap(void *, size_t);
473 #endif /* !(defined(X86_WIN32) || defined(X86_WIN64) || defined(__OS2__)) || defined (__CYGWIN__) || defined(__INTERIX) */
474
475 #define mmap dlmmap
476 #define munmap dlmunmap
477
478 #include "dlmalloc.c"
479
480 #undef mmap
481 #undef munmap
482
483 #if !(defined(X86_WIN32) || defined(X86_WIN64) || defined(__OS2__)) || defined (__CYGWIN__) || defined(__INTERIX)
484
485 /* A mutex used to synchronize access to *exec* variables in this file. */
486 static pthread_mutex_t open_temp_exec_file_mutex = PTHREAD_MUTEX_INITIALIZER;
487
488 /* A file descriptor of a temporary file from which we'll map
489 executable pages. */
490 static int execfd = -1;
491
492 /* The amount of space already allocated from the temporary file. */
493 static size_t execsize = 0;
494
495 /* Open a temporary file name, and immediately unlink it. */
496 static int
497 open_temp_exec_file_name (char *name, int flags)
498 {
499 int fd;
500
501 #ifdef HAVE_MKOSTEMP
502 fd = mkostemp (name, flags);
503 #else
504 fd = mkstemp (name);
505 #endif
506
507 if (fd != -1)
508 unlink (name);
509
510 return fd;
511 }
512
513 /* Open a temporary file in the named directory. */
514 static int
515 open_temp_exec_file_dir (const char *dir)
516 {
517 static const char suffix[] = "/ffiXXXXXX";
518 int lendir, flags;
519 char *tempname;
520 #ifdef O_TMPFILE
521 int fd;
522 #endif
523
524 #ifdef O_CLOEXEC
525 flags = O_CLOEXEC;
526 #else
527 flags = 0;
528 #endif
529
530 #ifdef O_TMPFILE
531 fd = open (dir, flags | O_RDWR | O_EXCL | O_TMPFILE, 0700);
532 /* If the running system does not support the O_TMPFILE flag then retry without it. */
533 if (fd != -1 || (errno != EINVAL && errno != EISDIR && errno != EOPNOTSUPP)) {
534 return fd;
535 } else {
536 errno = 0;
537 }
538 #endif
539
540 lendir = strlen (dir);
541 tempname = __builtin_alloca (lendir + sizeof (suffix));
542
543 if (!tempname)
544 return -1;
545
546 memcpy (tempname, dir, lendir);
547 memcpy (tempname + lendir, suffix, sizeof (suffix));
548
549 return open_temp_exec_file_name (tempname, flags);
550 }
551
552 /* Open a temporary file in the directory in the named environment
553 variable. */
554 static int
555 open_temp_exec_file_env (const char *envvar)
556 {
557 const char *value = getenv (envvar);
558
559 if (!value)
560 return -1;
561
562 return open_temp_exec_file_dir (value);
563 }
564
565 #ifdef HAVE_MNTENT
566 /* Open a temporary file in an executable and writable mount point
567 listed in the mounts file. Subsequent calls with the same mounts
568 keep searching for mount points in the same file. Providing NULL
569 as the mounts file closes the file. */
570 static int
571 open_temp_exec_file_mnt (const char *mounts)
572 {
573 static const char *last_mounts;
574 static FILE *last_mntent;
575
576 if (mounts != last_mounts)
577 {
578 if (last_mntent)
579 endmntent (last_mntent);
580
581 last_mounts = mounts;
582
583 if (mounts)
584 last_mntent = setmntent (mounts, "r");
585 else
586 last_mntent = NULL;
587 }
588
589 if (!last_mntent)
590 return -1;
591
592 for (;;)
593 {
594 int fd;
595 struct mntent mnt;
596 char buf[MAXPATHLEN * 3];
597
598 if (getmntent_r (last_mntent, &mnt, buf, sizeof (buf)) == NULL)
599 return -1;
600
601 if (hasmntopt (&mnt, "ro")
602 || hasmntopt (&mnt, "noexec")
603 || access (mnt.mnt_dir, W_OK))
604 continue;
605
606 fd = open_temp_exec_file_dir (mnt.mnt_dir);
607
608 if (fd != -1)
609 return fd;
610 }
611 }
612 #endif /* HAVE_MNTENT */
613
614 /* Instructions to look for a location to hold a temporary file that
615 can be mapped in for execution. */
616 static struct
617 {
618 int (*func)(const char *);
619 const char *arg;
620 int repeat;
621 } open_temp_exec_file_opts[] = {
622 { open_temp_exec_file_env, "TMPDIR", 0 },
623 { open_temp_exec_file_dir, "/tmp", 0 },
624 { open_temp_exec_file_dir, "/var/tmp", 0 },
625 { open_temp_exec_file_dir, "/dev/shm", 0 },
626 { open_temp_exec_file_env, "HOME", 0 },
627 #ifdef HAVE_MNTENT
628 { open_temp_exec_file_mnt, "/etc/mtab", 1 },
629 { open_temp_exec_file_mnt, "/proc/mounts", 1 },
630 #endif /* HAVE_MNTENT */
631 };
632
633 /* Current index into open_temp_exec_file_opts. */
634 static int open_temp_exec_file_opts_idx = 0;
635
636 /* Reset a current multi-call func, then advances to the next entry.
637 If we're at the last, go back to the first and return nonzero,
638 otherwise return zero. */
639 static int
640 open_temp_exec_file_opts_next (void)
641 {
642 if (open_temp_exec_file_opts[open_temp_exec_file_opts_idx].repeat)
643 open_temp_exec_file_opts[open_temp_exec_file_opts_idx].func (NULL);
644
645 open_temp_exec_file_opts_idx++;
646 if (open_temp_exec_file_opts_idx
647 == (sizeof (open_temp_exec_file_opts)
648 / sizeof (*open_temp_exec_file_opts)))
649 {
650 open_temp_exec_file_opts_idx = 0;
651 return 1;
652 }
653
654 return 0;
655 }
656
657 /* Return a file descriptor of a temporary zero-sized file in a
658 writable and executable filesystem. */
659 static int
660 open_temp_exec_file (void)
661 {
662 int fd;
663
664 do
665 {
666 fd = open_temp_exec_file_opts[open_temp_exec_file_opts_idx].func
667 (open_temp_exec_file_opts[open_temp_exec_file_opts_idx].arg);
668
669 if (!open_temp_exec_file_opts[open_temp_exec_file_opts_idx].repeat
670 || fd == -1)
671 {
672 if (open_temp_exec_file_opts_next ())
673 break;
674 }
675 }
676 while (fd == -1);
677
678 return fd;
679 }
680
681 /* Map in a chunk of memory from the temporary exec file into separate
682 locations in the virtual memory address space, one writable and one
683 executable. Returns the address of the writable portion, after
684 storing an offset to the corresponding executable portion at the
685 last word of the requested chunk. */
686 static void *
687 dlmmap_locked (void *start, size_t length, int prot, int flags, off_t offset)
688 {
689 void *ptr;
690
691 if (execfd == -1)
692 {
693 open_temp_exec_file_opts_idx = 0;
694 retry_open:
695 execfd = open_temp_exec_file ();
696 if (execfd == -1)
697 return MFAIL;
698 }
699
700 offset = execsize;
701
702 if (ftruncate (execfd, offset + length))
703 return MFAIL;
704
705 flags &= ~(MAP_PRIVATE | MAP_ANONYMOUS);
706 flags |= MAP_SHARED;
707
708 ptr = mmap (NULL, length, (prot & ~PROT_WRITE) | PROT_EXEC,
709 flags, execfd, offset);
710 if (ptr == MFAIL)
711 {
712 if (!offset)
713 {
714 close (execfd);
715 goto retry_open;
716 }
717 ftruncate (execfd, offset);
718 return MFAIL;
719 }
720 else if (!offset
721 && open_temp_exec_file_opts[open_temp_exec_file_opts_idx].repeat)
722 open_temp_exec_file_opts_next ();
723
724 start = mmap (start, length, prot, flags, execfd, offset);
725
726 if (start == MFAIL)
727 {
728 munmap (ptr, length);
729 ftruncate (execfd, offset);
730 return start;
731 }
732
733 mmap_exec_offset ((char *)start, length) = (char*)ptr - (char*)start;
734
735 execsize += length;
736
737 return start;
738 }
739
740 /* Map in a writable and executable chunk of memory if possible.
741 Failing that, fall back to dlmmap_locked. */
742 static void *
743 dlmmap (void *start, size_t length, int prot,
744 int flags, int fd, off_t offset)
745 {
746 void *ptr;
747
748 assert (start == NULL && length % malloc_getpagesize == 0
749 && prot == (PROT_READ | PROT_WRITE)
750 && flags == (MAP_PRIVATE | MAP_ANONYMOUS)
751 && fd == -1 && offset == 0);
752
753 if (execfd == -1 && is_emutramp_enabled ())
754 {
755 ptr = mmap (start, length, prot & ~PROT_EXEC, flags, fd, offset);
756 return ptr;
757 }
758
759 if (execfd == -1 && !is_selinux_enabled ())
760 {
761 ptr = mmap (start, length, prot | PROT_EXEC, flags, fd, offset);
762
763 if (ptr != MFAIL || (errno != EPERM && errno != EACCES))
764 /* Cool, no need to mess with separate segments. */
765 return ptr;
766
767 /* If MREMAP_DUP is ever introduced and implemented, try mmap
768 with ((prot & ~PROT_WRITE) | PROT_EXEC) and mremap with
769 MREMAP_DUP and prot at this point. */
770 }
771
772 if (execsize == 0 || execfd == -1)
773 {
774 pthread_mutex_lock (&open_temp_exec_file_mutex);
775 ptr = dlmmap_locked (start, length, prot, flags, offset);
776 pthread_mutex_unlock (&open_temp_exec_file_mutex);
777
778 return ptr;
779 }
780
781 return dlmmap_locked (start, length, prot, flags, offset);
782 }
783
784 /* Release memory at the given address, as well as the corresponding
785 executable page if it's separate. */
786 static int
787 dlmunmap (void *start, size_t length)
788 {
789 /* We don't bother decreasing execsize or truncating the file, since
790 we can't quite tell whether we're unmapping the end of the file.
791 We don't expect frequent deallocation anyway. If we did, we
792 could locate pages in the file by writing to the pages being
793 deallocated and checking that the file contents change.
794 Yuck. */
795 msegmentptr seg = segment_holding (gm, start);
796 void *code;
797
798 if (seg && (code = add_segment_exec_offset (start, seg)) != start)
799 {
800 int ret = munmap (code, length);
801 if (ret)
802 return ret;
803 }
804
805 return munmap (start, length);
806 }
807
808 #if FFI_CLOSURE_FREE_CODE
809 /* Return segment holding given code address. */
810 static msegmentptr
811 segment_holding_code (mstate m, char* addr)
812 {
813 msegmentptr sp = &m->seg;
814 for (;;) {
815 if (addr >= add_segment_exec_offset (sp->base, sp)
816 && addr < add_segment_exec_offset (sp->base, sp) + sp->size)
817 return sp;
818 if ((sp = sp->next) == 0)
819 return 0;
820 }
821 }
822 #endif
823
824 #endif /* !(defined(X86_WIN32) || defined(X86_WIN64) || defined(__OS2__)) || defined (__CYGWIN__) || defined(__INTERIX) */
825
826 /* Allocate a chunk of memory with the given size. Returns a pointer
827 to the writable address, and sets *CODE to the executable
828 corresponding virtual address. */
829 void *
830 ffi_closure_alloc (size_t size, void **code)
831 {
832 void *ptr;
833
834 if (!code)
835 return NULL;
836
837 ptr = dlmalloc (size);
838
839 if (ptr)
840 {
841 msegmentptr seg = segment_holding (gm, ptr);
842
843 *code = add_segment_exec_offset (ptr, seg);
844 }
845
846 return ptr;
847 }
848
849 /* Release a chunk of memory allocated with ffi_closure_alloc. If
850 FFI_CLOSURE_FREE_CODE is nonzero, the given address can be the
851 writable or the executable address given. Otherwise, only the
852 writable address can be provided here. */
853 void
854 ffi_closure_free (void *ptr)
855 {
856 #if FFI_CLOSURE_FREE_CODE
857 msegmentptr seg = segment_holding_code (gm, ptr);
858
859 if (seg)
860 ptr = sub_segment_exec_offset (ptr, seg);
861 #endif
862
863 dlfree (ptr);
864 }
865
866 # else /* ! FFI_MMAP_EXEC_WRIT */
867
868 /* On many systems, memory returned by malloc is writable and
869 executable, so just use it. */
870
871 #include <stdlib.h>
872
873 void *
874 ffi_closure_alloc (size_t size, void **code)
875 {
876 if (!code)
877 return NULL;
878
879 return *code = malloc (size);
880 }
881
882 void
883 ffi_closure_free (void *ptr)
884 {
885 free (ptr);
886 }
887
888 # endif /* ! FFI_MMAP_EXEC_WRIT */
889 #endif /* FFI_CLOSURES */