Merge pull request #238 from KubaKaszycki/master
[libffi.git] / src / closures.c
1 /* -----------------------------------------------------------------------
2 closures.c - Copyright (c) 2007, 2009, 2010 Red Hat, Inc.
3 Copyright (C) 2007, 2009, 2010 Free Software Foundation, Inc
4 Copyright (c) 2011 Plausible Labs Cooperative, Inc.
5
6 Code to allocate and deallocate memory for closures.
7
8 Permission is hereby granted, free of charge, to any person obtaining
9 a copy of this software and associated documentation files (the
10 ``Software''), to deal in the Software without restriction, including
11 without limitation the rights to use, copy, modify, merge, publish,
12 distribute, sublicense, and/or sell copies of the Software, and to
13 permit persons to whom the Software is furnished to do so, subject to
14 the following conditions:
15
16 The above copyright notice and this permission notice shall be included
17 in all copies or substantial portions of the Software.
18
19 THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
20 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
22 NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
23 HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
24 WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25 OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26 DEALINGS IN THE SOFTWARE.
27 ----------------------------------------------------------------------- */
28
29 #if defined __linux__ && !defined _GNU_SOURCE
30 #define _GNU_SOURCE 1
31 #endif
32
33 #include <fficonfig.h>
34 #include <ffi.h>
35 #include <ffi_common.h>
36
37 #if !FFI_MMAP_EXEC_WRIT && !FFI_EXEC_TRAMPOLINE_TABLE
38 # if __linux__ && !defined(__ANDROID__)
39 /* This macro indicates it may be forbidden to map anonymous memory
40 with both write and execute permission. Code compiled when this
41 option is defined will attempt to map such pages once, but if it
42 fails, it falls back to creating a temporary file in a writable and
43 executable filesystem and mapping pages from it into separate
44 locations in the virtual memory space, one location writable and
45 another executable. */
46 # define FFI_MMAP_EXEC_WRIT 1
47 # define HAVE_MNTENT 1
48 # endif
49 # if defined(X86_WIN32) || defined(X86_WIN64) || defined(__OS2__)
50 /* Windows systems may have Data Execution Protection (DEP) enabled,
51 which requires the use of VirtualMalloc/VirtualFree to alloc/free
52 executable memory. */
53 # define FFI_MMAP_EXEC_WRIT 1
54 # endif
55 #endif
56
57 #if FFI_MMAP_EXEC_WRIT && !defined FFI_MMAP_EXEC_SELINUX
58 # ifdef __linux__
59 /* When defined to 1 check for SELinux and if SELinux is active,
60 don't attempt PROT_EXEC|PROT_WRITE mapping at all, as that
61 might cause audit messages. */
62 # define FFI_MMAP_EXEC_SELINUX 1
63 # endif
64 #endif
65
66 #if FFI_CLOSURES
67
68 #if FFI_EXEC_TRAMPOLINE_TABLE
69
70 #ifdef __MACH__
71
72 #include <mach/mach.h>
73 #include <pthread.h>
74 #include <stdio.h>
75 #include <stdlib.h>
76
77 extern void *ffi_closure_trampoline_table_page;
78
79 typedef struct ffi_trampoline_table ffi_trampoline_table;
80 typedef struct ffi_trampoline_table_entry ffi_trampoline_table_entry;
81
82 struct ffi_trampoline_table
83 {
84 /* contiguous writable and executable pages */
85 vm_address_t config_page;
86 vm_address_t trampoline_page;
87
88 /* free list tracking */
89 uint16_t free_count;
90 ffi_trampoline_table_entry *free_list;
91 ffi_trampoline_table_entry *free_list_pool;
92
93 ffi_trampoline_table *prev;
94 ffi_trampoline_table *next;
95 };
96
97 struct ffi_trampoline_table_entry
98 {
99 void *(*trampoline) ();
100 ffi_trampoline_table_entry *next;
101 };
102
103 /* Total number of trampolines that fit in one trampoline table */
104 #define FFI_TRAMPOLINE_COUNT (PAGE_MAX_SIZE / FFI_TRAMPOLINE_SIZE)
105
106 static pthread_mutex_t ffi_trampoline_lock = PTHREAD_MUTEX_INITIALIZER;
107 static ffi_trampoline_table *ffi_trampoline_tables = NULL;
108
109 static ffi_trampoline_table *
110 ffi_trampoline_table_alloc ()
111 {
112 ffi_trampoline_table *table = NULL;
113
114 /* Loop until we can allocate two contiguous pages */
115 while (table == NULL)
116 {
117 vm_address_t config_page = 0x0;
118 kern_return_t kt;
119
120 /* Try to allocate two pages */
121 kt =
122 vm_allocate (mach_task_self (), &config_page, PAGE_MAX_SIZE * 2,
123 VM_FLAGS_ANYWHERE);
124 if (kt != KERN_SUCCESS)
125 {
126 fprintf (stderr, "vm_allocate() failure: %d at %s:%d\n", kt,
127 __FILE__, __LINE__);
128 break;
129 }
130
131 /* Now drop the second half of the allocation to make room for the trampoline table */
132 vm_address_t trampoline_page = config_page + PAGE_MAX_SIZE;
133 kt = vm_deallocate (mach_task_self (), trampoline_page, PAGE_MAX_SIZE);
134 if (kt != KERN_SUCCESS)
135 {
136 fprintf (stderr, "vm_deallocate() failure: %d at %s:%d\n", kt,
137 __FILE__, __LINE__);
138 break;
139 }
140
141 /* Remap the trampoline table to directly follow the config page */
142 vm_prot_t cur_prot;
143 vm_prot_t max_prot;
144
145 vm_address_t trampoline_page_template = (vm_address_t)&ffi_closure_trampoline_table_page;
146 #ifdef __arm__
147 /* ffi_closure_trampoline_table_page can be thumb-biased on some ARM archs */
148 trampoline_page_template &= ~1UL;
149 #endif
150
151 kt =
152 vm_remap (mach_task_self (), &trampoline_page, PAGE_MAX_SIZE, 0x0, FALSE,
153 mach_task_self (), trampoline_page_template, FALSE,
154 &cur_prot, &max_prot, VM_INHERIT_SHARE);
155
156 /* If we lost access to the destination trampoline page, drop our config allocation mapping and retry */
157 if (kt != KERN_SUCCESS)
158 {
159 /* Log unexpected failures */
160 if (kt != KERN_NO_SPACE)
161 {
162 fprintf (stderr, "vm_remap() failure: %d at %s:%d\n", kt,
163 __FILE__, __LINE__);
164 }
165
166 vm_deallocate (mach_task_self (), config_page, PAGE_SIZE);
167 continue;
168 }
169
170 /* We have valid trampoline and config pages */
171 table = calloc (1, sizeof (ffi_trampoline_table));
172 table->free_count = FFI_TRAMPOLINE_COUNT;
173 table->config_page = config_page;
174 table->trampoline_page = trampoline_page;
175
176 /* Create and initialize the free list */
177 table->free_list_pool =
178 calloc (FFI_TRAMPOLINE_COUNT, sizeof (ffi_trampoline_table_entry));
179
180 uint16_t i;
181 for (i = 0; i < table->free_count; i++)
182 {
183 ffi_trampoline_table_entry *entry = &table->free_list_pool[i];
184 entry->trampoline =
185 (void *) (table->trampoline_page + (i * FFI_TRAMPOLINE_SIZE));
186
187 if (i < table->free_count - 1)
188 entry->next = &table->free_list_pool[i + 1];
189 }
190
191 table->free_list = table->free_list_pool;
192 }
193
194 return table;
195 }
196
197 void *
198 ffi_closure_alloc (size_t size, void **code)
199 {
200 /* Create the closure */
201 ffi_closure *closure = malloc (size);
202 if (closure == NULL)
203 return NULL;
204
205 pthread_mutex_lock (&ffi_trampoline_lock);
206
207 /* Check for an active trampoline table with available entries. */
208 ffi_trampoline_table *table = ffi_trampoline_tables;
209 if (table == NULL || table->free_list == NULL)
210 {
211 table = ffi_trampoline_table_alloc ();
212 if (table == NULL)
213 {
214 pthread_mutex_unlock (&ffi_trampoline_lock);
215 free (closure);
216 return NULL;
217 }
218
219 /* Insert the new table at the top of the list */
220 table->next = ffi_trampoline_tables;
221 if (table->next != NULL)
222 table->next->prev = table;
223
224 ffi_trampoline_tables = table;
225 }
226
227 /* Claim the free entry */
228 ffi_trampoline_table_entry *entry = ffi_trampoline_tables->free_list;
229 ffi_trampoline_tables->free_list = entry->next;
230 ffi_trampoline_tables->free_count--;
231 entry->next = NULL;
232
233 pthread_mutex_unlock (&ffi_trampoline_lock);
234
235 /* Initialize the return values */
236 *code = entry->trampoline;
237 closure->trampoline_table = table;
238 closure->trampoline_table_entry = entry;
239
240 return closure;
241 }
242
243 void
244 ffi_closure_free (void *ptr)
245 {
246 ffi_closure *closure = ptr;
247
248 pthread_mutex_lock (&ffi_trampoline_lock);
249
250 /* Fetch the table and entry references */
251 ffi_trampoline_table *table = closure->trampoline_table;
252 ffi_trampoline_table_entry *entry = closure->trampoline_table_entry;
253
254 /* Return the entry to the free list */
255 entry->next = table->free_list;
256 table->free_list = entry;
257 table->free_count++;
258
259 /* If all trampolines within this table are free, and at least one other table exists, deallocate
260 * the table */
261 if (table->free_count == FFI_TRAMPOLINE_COUNT
262 && ffi_trampoline_tables != table)
263 {
264 /* Remove from the list */
265 if (table->prev != NULL)
266 table->prev->next = table->next;
267
268 if (table->next != NULL)
269 table->next->prev = table->prev;
270
271 /* Deallocate pages */
272 kern_return_t kt;
273 kt = vm_deallocate (mach_task_self (), table->config_page, PAGE_SIZE);
274 if (kt != KERN_SUCCESS)
275 fprintf (stderr, "vm_deallocate() failure: %d at %s:%d\n", kt,
276 __FILE__, __LINE__);
277
278 kt =
279 vm_deallocate (mach_task_self (), table->trampoline_page, PAGE_SIZE);
280 if (kt != KERN_SUCCESS)
281 fprintf (stderr, "vm_deallocate() failure: %d at %s:%d\n", kt,
282 __FILE__, __LINE__);
283
284 /* Deallocate free list */
285 free (table->free_list_pool);
286 free (table);
287 }
288 else if (ffi_trampoline_tables != table)
289 {
290 /* Otherwise, bump this table to the top of the list */
291 table->prev = NULL;
292 table->next = ffi_trampoline_tables;
293 if (ffi_trampoline_tables != NULL)
294 ffi_trampoline_tables->prev = table;
295
296 ffi_trampoline_tables = table;
297 }
298
299 pthread_mutex_unlock (&ffi_trampoline_lock);
300
301 /* Free the closure */
302 free (closure);
303 }
304
305 #endif
306
307 // Per-target implementation; It's unclear what can reasonable be shared between two OS/architecture implementations.
308
309 #elif FFI_MMAP_EXEC_WRIT /* !FFI_EXEC_TRAMPOLINE_TABLE */
310
311 #define USE_LOCKS 1
312 #define USE_DL_PREFIX 1
313 #ifdef __GNUC__
314 #ifndef USE_BUILTIN_FFS
315 #define USE_BUILTIN_FFS 1
316 #endif
317 #endif
318
319 /* We need to use mmap, not sbrk. */
320 #define HAVE_MORECORE 0
321
322 /* We could, in theory, support mremap, but it wouldn't buy us anything. */
323 #define HAVE_MREMAP 0
324
325 /* We have no use for this, so save some code and data. */
326 #define NO_MALLINFO 1
327
328 /* We need all allocations to be in regular segments, otherwise we
329 lose track of the corresponding code address. */
330 #define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T
331
332 /* Don't allocate more than a page unless needed. */
333 #define DEFAULT_GRANULARITY ((size_t)malloc_getpagesize)
334
335 #include <sys/types.h>
336 #include <sys/stat.h>
337 #include <fcntl.h>
338 #include <errno.h>
339 #ifndef _MSC_VER
340 #include <unistd.h>
341 #endif
342 #include <string.h>
343 #include <stdio.h>
344 #if !defined(X86_WIN32) && !defined(X86_WIN64)
345 #ifdef HAVE_MNTENT
346 #include <mntent.h>
347 #endif /* HAVE_MNTENT */
348 #include <sys/param.h>
349 #include <pthread.h>
350
351 /* We don't want sys/mman.h to be included after we redefine mmap and
352 dlmunmap. */
353 #include <sys/mman.h>
354 #define LACKS_SYS_MMAN_H 1
355
356 #if FFI_MMAP_EXEC_SELINUX
357 #include <sys/statfs.h>
358 #include <stdlib.h>
359
360 static int selinux_enabled = -1;
361
362 static int
363 selinux_enabled_check (void)
364 {
365 struct statfs sfs;
366 FILE *f;
367 char *buf = NULL;
368 size_t len = 0;
369
370 if (statfs ("/selinux", &sfs) >= 0
371 && (unsigned int) sfs.f_type == 0xf97cff8cU)
372 return 1;
373 f = fopen ("/proc/mounts", "r");
374 if (f == NULL)
375 return 0;
376 while (getline (&buf, &len, f) >= 0)
377 {
378 char *p = strchr (buf, ' ');
379 if (p == NULL)
380 break;
381 p = strchr (p + 1, ' ');
382 if (p == NULL)
383 break;
384 if (strncmp (p + 1, "selinuxfs ", 10) == 0)
385 {
386 free (buf);
387 fclose (f);
388 return 1;
389 }
390 }
391 free (buf);
392 fclose (f);
393 return 0;
394 }
395
396 #define is_selinux_enabled() (selinux_enabled >= 0 ? selinux_enabled \
397 : (selinux_enabled = selinux_enabled_check ()))
398
399 #else
400
401 #define is_selinux_enabled() 0
402
403 #endif /* !FFI_MMAP_EXEC_SELINUX */
404
405 /* On PaX enable kernels that have MPROTECT enable we can't use PROT_EXEC. */
406 #ifdef FFI_MMAP_EXEC_EMUTRAMP_PAX
407 #include <stdlib.h>
408
409 static int emutramp_enabled = -1;
410
411 static int
412 emutramp_enabled_check (void)
413 {
414 char *buf = NULL;
415 size_t len = 0;
416 FILE *f;
417 int ret;
418 f = fopen ("/proc/self/status", "r");
419 if (f == NULL)
420 return 0;
421 ret = 0;
422
423 while (getline (&buf, &len, f) != -1)
424 if (!strncmp (buf, "PaX:", 4))
425 {
426 char emutramp;
427 if (sscanf (buf, "%*s %*c%c", &emutramp) == 1)
428 ret = (emutramp == 'E');
429 break;
430 }
431 free (buf);
432 fclose (f);
433 return ret;
434 }
435
436 #define is_emutramp_enabled() (emutramp_enabled >= 0 ? emutramp_enabled \
437 : (emutramp_enabled = emutramp_enabled_check ()))
438 #endif /* FFI_MMAP_EXEC_EMUTRAMP_PAX */
439
440 #elif defined (__CYGWIN__) || defined(__INTERIX)
441
442 #include <sys/mman.h>
443
444 /* Cygwin is Linux-like, but not quite that Linux-like. */
445 #define is_selinux_enabled() 0
446
447 #endif /* !defined(X86_WIN32) && !defined(X86_WIN64) */
448
449 #ifndef FFI_MMAP_EXEC_EMUTRAMP_PAX
450 #define is_emutramp_enabled() 0
451 #endif /* FFI_MMAP_EXEC_EMUTRAMP_PAX */
452
453 /* Declare all functions defined in dlmalloc.c as static. */
454 static void *dlmalloc(size_t);
455 static void dlfree(void*);
456 static void *dlcalloc(size_t, size_t) MAYBE_UNUSED;
457 static void *dlrealloc(void *, size_t) MAYBE_UNUSED;
458 static void *dlmemalign(size_t, size_t) MAYBE_UNUSED;
459 static void *dlvalloc(size_t) MAYBE_UNUSED;
460 static int dlmallopt(int, int) MAYBE_UNUSED;
461 static size_t dlmalloc_footprint(void) MAYBE_UNUSED;
462 static size_t dlmalloc_max_footprint(void) MAYBE_UNUSED;
463 static void** dlindependent_calloc(size_t, size_t, void**) MAYBE_UNUSED;
464 static void** dlindependent_comalloc(size_t, size_t*, void**) MAYBE_UNUSED;
465 static void *dlpvalloc(size_t) MAYBE_UNUSED;
466 static int dlmalloc_trim(size_t) MAYBE_UNUSED;
467 static size_t dlmalloc_usable_size(void*) MAYBE_UNUSED;
468 static void dlmalloc_stats(void) MAYBE_UNUSED;
469
470 #if !(defined(X86_WIN32) || defined(X86_WIN64) || defined(__OS2__)) || defined (__CYGWIN__) || defined(__INTERIX)
471 /* Use these for mmap and munmap within dlmalloc.c. */
472 static void *dlmmap(void *, size_t, int, int, int, off_t);
473 static int dlmunmap(void *, size_t);
474 #endif /* !(defined(X86_WIN32) || defined(X86_WIN64) || defined(__OS2__)) || defined (__CYGWIN__) || defined(__INTERIX) */
475
476 #define mmap dlmmap
477 #define munmap dlmunmap
478
479 #include "dlmalloc.c"
480
481 #undef mmap
482 #undef munmap
483
484 #if !(defined(X86_WIN32) || defined(X86_WIN64) || defined(__OS2__)) || defined (__CYGWIN__) || defined(__INTERIX)
485
486 /* A mutex used to synchronize access to *exec* variables in this file. */
487 static pthread_mutex_t open_temp_exec_file_mutex = PTHREAD_MUTEX_INITIALIZER;
488
489 /* A file descriptor of a temporary file from which we'll map
490 executable pages. */
491 static int execfd = -1;
492
493 /* The amount of space already allocated from the temporary file. */
494 static size_t execsize = 0;
495
496 /* Open a temporary file name, and immediately unlink it. */
497 static int
498 open_temp_exec_file_name (char *name, int flags)
499 {
500 int fd;
501
502 #ifdef HAVE_MKOSTEMP
503 fd = mkostemp (name, flags);
504 #else
505 fd = mkstemp (name);
506 #endif
507
508 if (fd != -1)
509 unlink (name);
510
511 return fd;
512 }
513
514 /* Open a temporary file in the named directory. */
515 static int
516 open_temp_exec_file_dir (const char *dir)
517 {
518 static const char suffix[] = "/ffiXXXXXX";
519 int lendir, flags;
520 char *tempname;
521 #ifdef O_TMPFILE
522 int fd;
523 #endif
524
525 #ifdef O_CLOEXEC
526 flags = O_CLOEXEC;
527 #else
528 flags = 0;
529 #endif
530
531 #ifdef O_TMPFILE
532 fd = open (dir, flags | O_RDWR | O_EXCL | O_TMPFILE, 0700);
533 /* If the running system does not support the O_TMPFILE flag then retry without it. */
534 if (fd != -1 || (errno != EINVAL && errno != EISDIR && errno != EOPNOTSUPP)) {
535 return fd;
536 } else {
537 errno = 0;
538 }
539 #endif
540
541 lendir = strlen (dir);
542 tempname = __builtin_alloca (lendir + sizeof (suffix));
543
544 if (!tempname)
545 return -1;
546
547 memcpy (tempname, dir, lendir);
548 memcpy (tempname + lendir, suffix, sizeof (suffix));
549
550 return open_temp_exec_file_name (tempname, flags);
551 }
552
553 /* Open a temporary file in the directory in the named environment
554 variable. */
555 static int
556 open_temp_exec_file_env (const char *envvar)
557 {
558 const char *value = getenv (envvar);
559
560 if (!value)
561 return -1;
562
563 return open_temp_exec_file_dir (value);
564 }
565
566 #ifdef HAVE_MNTENT
567 /* Open a temporary file in an executable and writable mount point
568 listed in the mounts file. Subsequent calls with the same mounts
569 keep searching for mount points in the same file. Providing NULL
570 as the mounts file closes the file. */
571 static int
572 open_temp_exec_file_mnt (const char *mounts)
573 {
574 static const char *last_mounts;
575 static FILE *last_mntent;
576
577 if (mounts != last_mounts)
578 {
579 if (last_mntent)
580 endmntent (last_mntent);
581
582 last_mounts = mounts;
583
584 if (mounts)
585 last_mntent = setmntent (mounts, "r");
586 else
587 last_mntent = NULL;
588 }
589
590 if (!last_mntent)
591 return -1;
592
593 for (;;)
594 {
595 int fd;
596 struct mntent mnt;
597 char buf[MAXPATHLEN * 3];
598
599 if (getmntent_r (last_mntent, &mnt, buf, sizeof (buf)) == NULL)
600 return -1;
601
602 if (hasmntopt (&mnt, "ro")
603 || hasmntopt (&mnt, "noexec")
604 || access (mnt.mnt_dir, W_OK))
605 continue;
606
607 fd = open_temp_exec_file_dir (mnt.mnt_dir);
608
609 if (fd != -1)
610 return fd;
611 }
612 }
613 #endif /* HAVE_MNTENT */
614
615 /* Instructions to look for a location to hold a temporary file that
616 can be mapped in for execution. */
617 static struct
618 {
619 int (*func)(const char *);
620 const char *arg;
621 int repeat;
622 } open_temp_exec_file_opts[] = {
623 { open_temp_exec_file_env, "TMPDIR", 0 },
624 { open_temp_exec_file_dir, "/tmp", 0 },
625 { open_temp_exec_file_dir, "/var/tmp", 0 },
626 { open_temp_exec_file_dir, "/dev/shm", 0 },
627 { open_temp_exec_file_env, "HOME", 0 },
628 #ifdef HAVE_MNTENT
629 { open_temp_exec_file_mnt, "/etc/mtab", 1 },
630 { open_temp_exec_file_mnt, "/proc/mounts", 1 },
631 #endif /* HAVE_MNTENT */
632 };
633
634 /* Current index into open_temp_exec_file_opts. */
635 static int open_temp_exec_file_opts_idx = 0;
636
637 /* Reset a current multi-call func, then advances to the next entry.
638 If we're at the last, go back to the first and return nonzero,
639 otherwise return zero. */
640 static int
641 open_temp_exec_file_opts_next (void)
642 {
643 if (open_temp_exec_file_opts[open_temp_exec_file_opts_idx].repeat)
644 open_temp_exec_file_opts[open_temp_exec_file_opts_idx].func (NULL);
645
646 open_temp_exec_file_opts_idx++;
647 if (open_temp_exec_file_opts_idx
648 == (sizeof (open_temp_exec_file_opts)
649 / sizeof (*open_temp_exec_file_opts)))
650 {
651 open_temp_exec_file_opts_idx = 0;
652 return 1;
653 }
654
655 return 0;
656 }
657
658 /* Return a file descriptor of a temporary zero-sized file in a
659 writable and executable filesystem. */
660 static int
661 open_temp_exec_file (void)
662 {
663 int fd;
664
665 do
666 {
667 fd = open_temp_exec_file_opts[open_temp_exec_file_opts_idx].func
668 (open_temp_exec_file_opts[open_temp_exec_file_opts_idx].arg);
669
670 if (!open_temp_exec_file_opts[open_temp_exec_file_opts_idx].repeat
671 || fd == -1)
672 {
673 if (open_temp_exec_file_opts_next ())
674 break;
675 }
676 }
677 while (fd == -1);
678
679 return fd;
680 }
681
682 /* Map in a chunk of memory from the temporary exec file into separate
683 locations in the virtual memory address space, one writable and one
684 executable. Returns the address of the writable portion, after
685 storing an offset to the corresponding executable portion at the
686 last word of the requested chunk. */
687 static void *
688 dlmmap_locked (void *start, size_t length, int prot, int flags, off_t offset)
689 {
690 void *ptr;
691
692 if (execfd == -1)
693 {
694 open_temp_exec_file_opts_idx = 0;
695 retry_open:
696 execfd = open_temp_exec_file ();
697 if (execfd == -1)
698 return MFAIL;
699 }
700
701 offset = execsize;
702
703 if (ftruncate (execfd, offset + length))
704 return MFAIL;
705
706 flags &= ~(MAP_PRIVATE | MAP_ANONYMOUS);
707 flags |= MAP_SHARED;
708
709 ptr = mmap (NULL, length, (prot & ~PROT_WRITE) | PROT_EXEC,
710 flags, execfd, offset);
711 if (ptr == MFAIL)
712 {
713 if (!offset)
714 {
715 close (execfd);
716 goto retry_open;
717 }
718 ftruncate (execfd, offset);
719 return MFAIL;
720 }
721 else if (!offset
722 && open_temp_exec_file_opts[open_temp_exec_file_opts_idx].repeat)
723 open_temp_exec_file_opts_next ();
724
725 start = mmap (start, length, prot, flags, execfd, offset);
726
727 if (start == MFAIL)
728 {
729 munmap (ptr, length);
730 ftruncate (execfd, offset);
731 return start;
732 }
733
734 mmap_exec_offset ((char *)start, length) = (char*)ptr - (char*)start;
735
736 execsize += length;
737
738 return start;
739 }
740
741 /* Map in a writable and executable chunk of memory if possible.
742 Failing that, fall back to dlmmap_locked. */
743 static void *
744 dlmmap (void *start, size_t length, int prot,
745 int flags, int fd, off_t offset)
746 {
747 void *ptr;
748
749 assert (start == NULL && length % malloc_getpagesize == 0
750 && prot == (PROT_READ | PROT_WRITE)
751 && flags == (MAP_PRIVATE | MAP_ANONYMOUS)
752 && fd == -1 && offset == 0);
753
754 if (execfd == -1 && is_emutramp_enabled ())
755 {
756 ptr = mmap (start, length, prot & ~PROT_EXEC, flags, fd, offset);
757 return ptr;
758 }
759
760 if (execfd == -1 && !is_selinux_enabled ())
761 {
762 ptr = mmap (start, length, prot | PROT_EXEC, flags, fd, offset);
763
764 if (ptr != MFAIL || (errno != EPERM && errno != EACCES))
765 /* Cool, no need to mess with separate segments. */
766 return ptr;
767
768 /* If MREMAP_DUP is ever introduced and implemented, try mmap
769 with ((prot & ~PROT_WRITE) | PROT_EXEC) and mremap with
770 MREMAP_DUP and prot at this point. */
771 }
772
773 if (execsize == 0 || execfd == -1)
774 {
775 pthread_mutex_lock (&open_temp_exec_file_mutex);
776 ptr = dlmmap_locked (start, length, prot, flags, offset);
777 pthread_mutex_unlock (&open_temp_exec_file_mutex);
778
779 return ptr;
780 }
781
782 return dlmmap_locked (start, length, prot, flags, offset);
783 }
784
785 /* Release memory at the given address, as well as the corresponding
786 executable page if it's separate. */
787 static int
788 dlmunmap (void *start, size_t length)
789 {
790 /* We don't bother decreasing execsize or truncating the file, since
791 we can't quite tell whether we're unmapping the end of the file.
792 We don't expect frequent deallocation anyway. If we did, we
793 could locate pages in the file by writing to the pages being
794 deallocated and checking that the file contents change.
795 Yuck. */
796 msegmentptr seg = segment_holding (gm, start);
797 void *code;
798
799 if (seg && (code = add_segment_exec_offset (start, seg)) != start)
800 {
801 int ret = munmap (code, length);
802 if (ret)
803 return ret;
804 }
805
806 return munmap (start, length);
807 }
808
809 #if FFI_CLOSURE_FREE_CODE
810 /* Return segment holding given code address. */
811 static msegmentptr
812 segment_holding_code (mstate m, char* addr)
813 {
814 msegmentptr sp = &m->seg;
815 for (;;) {
816 if (addr >= add_segment_exec_offset (sp->base, sp)
817 && addr < add_segment_exec_offset (sp->base, sp) + sp->size)
818 return sp;
819 if ((sp = sp->next) == 0)
820 return 0;
821 }
822 }
823 #endif
824
825 #endif /* !(defined(X86_WIN32) || defined(X86_WIN64) || defined(__OS2__)) || defined (__CYGWIN__) || defined(__INTERIX) */
826
827 /* Allocate a chunk of memory with the given size. Returns a pointer
828 to the writable address, and sets *CODE to the executable
829 corresponding virtual address. */
830 void *
831 ffi_closure_alloc (size_t size, void **code)
832 {
833 void *ptr;
834
835 if (!code)
836 return NULL;
837
838 ptr = dlmalloc (size);
839
840 if (ptr)
841 {
842 msegmentptr seg = segment_holding (gm, ptr);
843
844 *code = add_segment_exec_offset (ptr, seg);
845 }
846
847 return ptr;
848 }
849
850 /* Release a chunk of memory allocated with ffi_closure_alloc. If
851 FFI_CLOSURE_FREE_CODE is nonzero, the given address can be the
852 writable or the executable address given. Otherwise, only the
853 writable address can be provided here. */
854 void
855 ffi_closure_free (void *ptr)
856 {
857 #if FFI_CLOSURE_FREE_CODE
858 msegmentptr seg = segment_holding_code (gm, ptr);
859
860 if (seg)
861 ptr = sub_segment_exec_offset (ptr, seg);
862 #endif
863
864 dlfree (ptr);
865 }
866
867 # else /* ! FFI_MMAP_EXEC_WRIT */
868
869 /* On many systems, memory returned by malloc is writable and
870 executable, so just use it. */
871
872 #include <stdlib.h>
873
874 void *
875 ffi_closure_alloc (size_t size, void **code)
876 {
877 if (!code)
878 return NULL;
879
880 return *code = malloc (size);
881 }
882
883 void
884 ffi_closure_free (void *ptr)
885 {
886 free (ptr);
887 }
888
889 # endif /* ! FFI_MMAP_EXEC_WRIT */
890 #endif /* FFI_CLOSURES */