Update README with a new port
[libffi.git] / src / closures.c
1 /* -----------------------------------------------------------------------
2 closures.c - Copyright (c) 2007, 2009, 2010 Red Hat, Inc.
3 Copyright (C) 2007, 2009, 2010 Free Software Foundation, Inc
4 Copyright (c) 2011 Plausible Labs Cooperative, Inc.
5
6 Code to allocate and deallocate memory for closures.
7
8 Permission is hereby granted, free of charge, to any person obtaining
9 a copy of this software and associated documentation files (the
10 ``Software''), to deal in the Software without restriction, including
11 without limitation the rights to use, copy, modify, merge, publish,
12 distribute, sublicense, and/or sell copies of the Software, and to
13 permit persons to whom the Software is furnished to do so, subject to
14 the following conditions:
15
16 The above copyright notice and this permission notice shall be included
17 in all copies or substantial portions of the Software.
18
19 THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
20 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
22 NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
23 HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
24 WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25 OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26 DEALINGS IN THE SOFTWARE.
27 ----------------------------------------------------------------------- */
28
29 #if defined __linux__ && !defined _GNU_SOURCE
30 #define _GNU_SOURCE 1
31 #endif
32
33 #include <fficonfig.h>
34 #include <ffi.h>
35 #include <ffi_common.h>
36
37 #if !FFI_MMAP_EXEC_WRIT && !FFI_EXEC_TRAMPOLINE_TABLE
38 # if __gnu_linux__ && !defined(__ANDROID__)
39 /* This macro indicates it may be forbidden to map anonymous memory
40 with both write and execute permission. Code compiled when this
41 option is defined will attempt to map such pages once, but if it
42 fails, it falls back to creating a temporary file in a writable and
43 executable filesystem and mapping pages from it into separate
44 locations in the virtual memory space, one location writable and
45 another executable. */
46 # define FFI_MMAP_EXEC_WRIT 1
47 # define HAVE_MNTENT 1
48 # endif
49 # if defined(X86_WIN32) || defined(X86_WIN64) || defined(__OS2__)
50 /* Windows systems may have Data Execution Protection (DEP) enabled,
51 which requires the use of VirtualMalloc/VirtualFree to alloc/free
52 executable memory. */
53 # define FFI_MMAP_EXEC_WRIT 1
54 # endif
55 #endif
56
57 #if FFI_MMAP_EXEC_WRIT && !defined FFI_MMAP_EXEC_SELINUX
58 # ifdef __linux__
59 /* When defined to 1 check for SELinux and if SELinux is active,
60 don't attempt PROT_EXEC|PROT_WRITE mapping at all, as that
61 might cause audit messages. */
62 # define FFI_MMAP_EXEC_SELINUX 1
63 # endif
64 #endif
65
66 #if FFI_CLOSURES
67
68 #if FFI_EXEC_TRAMPOLINE_TABLE
69
70 #ifdef __MACH__
71
72 #include <mach/mach.h>
73 #include <pthread.h>
74 #include <stdio.h>
75 #include <stdlib.h>
76
77 extern void *ffi_closure_trampoline_table_page;
78
79 typedef struct ffi_trampoline_table ffi_trampoline_table;
80 typedef struct ffi_trampoline_table_entry ffi_trampoline_table_entry;
81
82 struct ffi_trampoline_table
83 {
84 /* contiguous writable and executable pages */
85 vm_address_t config_page;
86 vm_address_t trampoline_page;
87
88 /* free list tracking */
89 uint16_t free_count;
90 ffi_trampoline_table_entry *free_list;
91 ffi_trampoline_table_entry *free_list_pool;
92
93 ffi_trampoline_table *prev;
94 ffi_trampoline_table *next;
95 };
96
97 struct ffi_trampoline_table_entry
98 {
99 void *(*trampoline) ();
100 ffi_trampoline_table_entry *next;
101 };
102
103 /* Total number of trampolines that fit in one trampoline table */
104 #define FFI_TRAMPOLINE_COUNT (PAGE_MAX_SIZE / FFI_TRAMPOLINE_SIZE)
105
106 static pthread_mutex_t ffi_trampoline_lock = PTHREAD_MUTEX_INITIALIZER;
107 static ffi_trampoline_table *ffi_trampoline_tables = NULL;
108
109 static ffi_trampoline_table *
110 ffi_trampoline_table_alloc ()
111 {
112 ffi_trampoline_table *table = NULL;
113
114 /* Loop until we can allocate two contiguous pages */
115 while (table == NULL)
116 {
117 vm_address_t config_page = 0x0;
118 kern_return_t kt;
119
120 /* Try to allocate two pages */
121 kt =
122 vm_allocate (mach_task_self (), &config_page, PAGE_MAX_SIZE * 2,
123 VM_FLAGS_ANYWHERE);
124 if (kt != KERN_SUCCESS)
125 {
126 fprintf (stderr, "vm_allocate() failure: %d at %s:%d\n", kt,
127 __FILE__, __LINE__);
128 break;
129 }
130
131 /* Now drop the second half of the allocation to make room for the trampoline table */
132 vm_address_t trampoline_page = config_page + PAGE_MAX_SIZE;
133 kt = vm_deallocate (mach_task_self (), trampoline_page, PAGE_MAX_SIZE);
134 if (kt != KERN_SUCCESS)
135 {
136 fprintf (stderr, "vm_deallocate() failure: %d at %s:%d\n", kt,
137 __FILE__, __LINE__);
138 break;
139 }
140
141 /* Remap the trampoline table to directly follow the config page */
142 vm_prot_t cur_prot;
143 vm_prot_t max_prot;
144
145 vm_address_t trampoline_page_template = (vm_address_t)&ffi_closure_trampoline_table_page;
146 #ifdef __arm__
147 /* ffi_closure_trampoline_table_page can be thumb-biased on some ARM archs */
148 trampoline_page_template &= ~1UL;
149 #endif
150
151 kt =
152 vm_remap (mach_task_self (), &trampoline_page, PAGE_MAX_SIZE, 0x0, FALSE,
153 mach_task_self (), trampoline_page_template, FALSE,
154 &cur_prot, &max_prot, VM_INHERIT_SHARE);
155
156 /* If we lost access to the destination trampoline page, drop our config allocation mapping and retry */
157 if (kt != KERN_SUCCESS)
158 {
159 /* Log unexpected failures */
160 if (kt != KERN_NO_SPACE)
161 {
162 fprintf (stderr, "vm_remap() failure: %d at %s:%d\n", kt,
163 __FILE__, __LINE__);
164 }
165
166 vm_deallocate (mach_task_self (), config_page, PAGE_SIZE);
167 continue;
168 }
169
170 /* We have valid trampoline and config pages */
171 table = calloc (1, sizeof (ffi_trampoline_table));
172 table->free_count = FFI_TRAMPOLINE_COUNT;
173 table->config_page = config_page;
174 table->trampoline_page = trampoline_page;
175
176 /* Create and initialize the free list */
177 table->free_list_pool =
178 calloc (FFI_TRAMPOLINE_COUNT, sizeof (ffi_trampoline_table_entry));
179
180 uint16_t i;
181 for (i = 0; i < table->free_count; i++)
182 {
183 ffi_trampoline_table_entry *entry = &table->free_list_pool[i];
184 entry->trampoline =
185 (void *) (table->trampoline_page + (i * FFI_TRAMPOLINE_SIZE));
186
187 if (i < table->free_count - 1)
188 entry->next = &table->free_list_pool[i + 1];
189 }
190
191 table->free_list = table->free_list_pool;
192 }
193
194 return table;
195 }
196
197 void *
198 ffi_closure_alloc (size_t size, void **code)
199 {
200 /* Create the closure */
201 ffi_closure *closure = malloc (size);
202 if (closure == NULL)
203 return NULL;
204
205 pthread_mutex_lock (&ffi_trampoline_lock);
206
207 /* Check for an active trampoline table with available entries. */
208 ffi_trampoline_table *table = ffi_trampoline_tables;
209 if (table == NULL || table->free_list == NULL)
210 {
211 table = ffi_trampoline_table_alloc ();
212 if (table == NULL)
213 {
214 free (closure);
215 return NULL;
216 }
217
218 /* Insert the new table at the top of the list */
219 table->next = ffi_trampoline_tables;
220 if (table->next != NULL)
221 table->next->prev = table;
222
223 ffi_trampoline_tables = table;
224 }
225
226 /* Claim the free entry */
227 ffi_trampoline_table_entry *entry = ffi_trampoline_tables->free_list;
228 ffi_trampoline_tables->free_list = entry->next;
229 ffi_trampoline_tables->free_count--;
230 entry->next = NULL;
231
232 pthread_mutex_unlock (&ffi_trampoline_lock);
233
234 /* Initialize the return values */
235 *code = entry->trampoline;
236 closure->trampoline_table = table;
237 closure->trampoline_table_entry = entry;
238
239 return closure;
240 }
241
242 void
243 ffi_closure_free (void *ptr)
244 {
245 ffi_closure *closure = ptr;
246
247 pthread_mutex_lock (&ffi_trampoline_lock);
248
249 /* Fetch the table and entry references */
250 ffi_trampoline_table *table = closure->trampoline_table;
251 ffi_trampoline_table_entry *entry = closure->trampoline_table_entry;
252
253 /* Return the entry to the free list */
254 entry->next = table->free_list;
255 table->free_list = entry;
256 table->free_count++;
257
258 /* If all trampolines within this table are free, and at least one other table exists, deallocate
259 * the table */
260 if (table->free_count == FFI_TRAMPOLINE_COUNT
261 && ffi_trampoline_tables != table)
262 {
263 /* Remove from the list */
264 if (table->prev != NULL)
265 table->prev->next = table->next;
266
267 if (table->next != NULL)
268 table->next->prev = table->prev;
269
270 /* Deallocate pages */
271 kern_return_t kt;
272 kt = vm_deallocate (mach_task_self (), table->config_page, PAGE_SIZE);
273 if (kt != KERN_SUCCESS)
274 fprintf (stderr, "vm_deallocate() failure: %d at %s:%d\n", kt,
275 __FILE__, __LINE__);
276
277 kt =
278 vm_deallocate (mach_task_self (), table->trampoline_page, PAGE_SIZE);
279 if (kt != KERN_SUCCESS)
280 fprintf (stderr, "vm_deallocate() failure: %d at %s:%d\n", kt,
281 __FILE__, __LINE__);
282
283 /* Deallocate free list */
284 free (table->free_list_pool);
285 free (table);
286 }
287 else if (ffi_trampoline_tables != table)
288 {
289 /* Otherwise, bump this table to the top of the list */
290 table->prev = NULL;
291 table->next = ffi_trampoline_tables;
292 if (ffi_trampoline_tables != NULL)
293 ffi_trampoline_tables->prev = table;
294
295 ffi_trampoline_tables = table;
296 }
297
298 pthread_mutex_unlock (&ffi_trampoline_lock);
299
300 /* Free the closure */
301 free (closure);
302 }
303
304 #endif
305
306 // Per-target implementation; It's unclear what can reasonable be shared between two OS/architecture implementations.
307
308 #elif FFI_MMAP_EXEC_WRIT /* !FFI_EXEC_TRAMPOLINE_TABLE */
309
310 #define USE_LOCKS 1
311 #define USE_DL_PREFIX 1
312 #ifdef __GNUC__
313 #ifndef USE_BUILTIN_FFS
314 #define USE_BUILTIN_FFS 1
315 #endif
316 #endif
317
318 /* We need to use mmap, not sbrk. */
319 #define HAVE_MORECORE 0
320
321 /* We could, in theory, support mremap, but it wouldn't buy us anything. */
322 #define HAVE_MREMAP 0
323
324 /* We have no use for this, so save some code and data. */
325 #define NO_MALLINFO 1
326
327 /* We need all allocations to be in regular segments, otherwise we
328 lose track of the corresponding code address. */
329 #define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T
330
331 /* Don't allocate more than a page unless needed. */
332 #define DEFAULT_GRANULARITY ((size_t)malloc_getpagesize)
333
334 #if FFI_CLOSURE_TEST
335 /* Don't release single pages, to avoid a worst-case scenario of
336 continuously allocating and releasing single pages, but release
337 pairs of pages, which should do just as well given that allocations
338 are likely to be small. */
339 #define DEFAULT_TRIM_THRESHOLD ((size_t)malloc_getpagesize)
340 #endif
341
342 #include <sys/types.h>
343 #include <sys/stat.h>
344 #include <fcntl.h>
345 #include <errno.h>
346 #ifndef _MSC_VER
347 #include <unistd.h>
348 #endif
349 #include <string.h>
350 #include <stdio.h>
351 #if !defined(X86_WIN32) && !defined(X86_WIN64)
352 #ifdef HAVE_MNTENT
353 #include <mntent.h>
354 #endif /* HAVE_MNTENT */
355 #include <sys/param.h>
356 #include <pthread.h>
357
358 /* We don't want sys/mman.h to be included after we redefine mmap and
359 dlmunmap. */
360 #include <sys/mman.h>
361 #define LACKS_SYS_MMAN_H 1
362
363 #if FFI_MMAP_EXEC_SELINUX
364 #include <sys/statfs.h>
365 #include <stdlib.h>
366
367 static int selinux_enabled = -1;
368
369 static int
370 selinux_enabled_check (void)
371 {
372 struct statfs sfs;
373 FILE *f;
374 char *buf = NULL;
375 size_t len = 0;
376
377 if (statfs ("/selinux", &sfs) >= 0
378 && (unsigned int) sfs.f_type == 0xf97cff8cU)
379 return 1;
380 f = fopen ("/proc/mounts", "r");
381 if (f == NULL)
382 return 0;
383 while (getline (&buf, &len, f) >= 0)
384 {
385 char *p = strchr (buf, ' ');
386 if (p == NULL)
387 break;
388 p = strchr (p + 1, ' ');
389 if (p == NULL)
390 break;
391 if (strncmp (p + 1, "selinuxfs ", 10) == 0)
392 {
393 free (buf);
394 fclose (f);
395 return 1;
396 }
397 }
398 free (buf);
399 fclose (f);
400 return 0;
401 }
402
403 #define is_selinux_enabled() (selinux_enabled >= 0 ? selinux_enabled \
404 : (selinux_enabled = selinux_enabled_check ()))
405
406 #else
407
408 #define is_selinux_enabled() 0
409
410 #endif /* !FFI_MMAP_EXEC_SELINUX */
411
412 /* On PaX enable kernels that have MPROTECT enable we can't use PROT_EXEC. */
413 #ifdef FFI_MMAP_EXEC_EMUTRAMP_PAX
414 #include <stdlib.h>
415
416 static int emutramp_enabled = -1;
417
418 static int
419 emutramp_enabled_check (void)
420 {
421 char *buf = NULL;
422 size_t len = 0;
423 FILE *f;
424 int ret;
425 f = fopen ("/proc/self/status", "r");
426 if (f == NULL)
427 return 0;
428 ret = 0;
429
430 while (getline (&buf, &len, f) != -1)
431 if (!strncmp (buf, "PaX:", 4))
432 {
433 char emutramp;
434 if (sscanf (buf, "%*s %*c%c", &emutramp) == 1)
435 ret = (emutramp == 'E');
436 break;
437 }
438 free (buf);
439 fclose (f);
440 return ret;
441 }
442
443 #define is_emutramp_enabled() (emutramp_enabled >= 0 ? emutramp_enabled \
444 : (emutramp_enabled = emutramp_enabled_check ()))
445 #endif /* FFI_MMAP_EXEC_EMUTRAMP_PAX */
446
447 #elif defined (__CYGWIN__) || defined(__INTERIX)
448
449 #include <sys/mman.h>
450
451 /* Cygwin is Linux-like, but not quite that Linux-like. */
452 #define is_selinux_enabled() 0
453
454 #endif /* !defined(X86_WIN32) && !defined(X86_WIN64) */
455
456 #ifndef FFI_MMAP_EXEC_EMUTRAMP_PAX
457 #define is_emutramp_enabled() 0
458 #endif /* FFI_MMAP_EXEC_EMUTRAMP_PAX */
459
460 /* Declare all functions defined in dlmalloc.c as static. */
461 static void *dlmalloc(size_t);
462 static void dlfree(void*);
463 static void *dlcalloc(size_t, size_t) MAYBE_UNUSED;
464 static void *dlrealloc(void *, size_t) MAYBE_UNUSED;
465 static void *dlmemalign(size_t, size_t) MAYBE_UNUSED;
466 static void *dlvalloc(size_t) MAYBE_UNUSED;
467 static int dlmallopt(int, int) MAYBE_UNUSED;
468 static size_t dlmalloc_footprint(void) MAYBE_UNUSED;
469 static size_t dlmalloc_max_footprint(void) MAYBE_UNUSED;
470 static void** dlindependent_calloc(size_t, size_t, void**) MAYBE_UNUSED;
471 static void** dlindependent_comalloc(size_t, size_t*, void**) MAYBE_UNUSED;
472 static void *dlpvalloc(size_t) MAYBE_UNUSED;
473 static int dlmalloc_trim(size_t) MAYBE_UNUSED;
474 static size_t dlmalloc_usable_size(void*) MAYBE_UNUSED;
475 static void dlmalloc_stats(void) MAYBE_UNUSED;
476
477 #if !(defined(X86_WIN32) || defined(X86_WIN64) || defined(__OS2__)) || defined (__CYGWIN__) || defined(__INTERIX)
478 /* Use these for mmap and munmap within dlmalloc.c. */
479 static void *dlmmap(void *, size_t, int, int, int, off_t);
480 static int dlmunmap(void *, size_t);
481 #endif /* !(defined(X86_WIN32) || defined(X86_WIN64) || defined(__OS2__)) || defined (__CYGWIN__) || defined(__INTERIX) */
482
483 #define mmap dlmmap
484 #define munmap dlmunmap
485
486 #include "dlmalloc.c"
487
488 #undef mmap
489 #undef munmap
490
491 #if !(defined(X86_WIN32) || defined(X86_WIN64) || defined(__OS2__)) || defined (__CYGWIN__) || defined(__INTERIX)
492
493 /* A mutex used to synchronize access to *exec* variables in this file. */
494 static pthread_mutex_t open_temp_exec_file_mutex = PTHREAD_MUTEX_INITIALIZER;
495
496 /* A file descriptor of a temporary file from which we'll map
497 executable pages. */
498 static int execfd = -1;
499
500 /* The amount of space already allocated from the temporary file. */
501 static size_t execsize = 0;
502
503 /* Open a temporary file name, and immediately unlink it. */
504 static int
505 open_temp_exec_file_name (char *name, int flags)
506 {
507 int fd;
508
509 #ifdef HAVE_MKOSTEMP
510 fd = mkostemp (name, flags);
511 #else
512 fd = mkstemp (name);
513 #endif
514
515 if (fd != -1)
516 unlink (name);
517
518 return fd;
519 }
520
521 /* Open a temporary file in the named directory. */
522 static int
523 open_temp_exec_file_dir (const char *dir)
524 {
525 static const char suffix[] = "/ffiXXXXXX";
526 int lendir, flags;
527 char *tempname;
528 #ifdef O_TMPFILE
529 int fd;
530 #endif
531
532 #ifdef O_CLOEXEC
533 flags = O_CLOEXEC;
534 #else
535 flags = 0;
536 #endif
537
538 #ifdef O_TMPFILE
539 fd = open (dir, flags | O_RDWR | O_EXCL | O_TMPFILE, 0700);
540 /* If the running system does not support the O_TMPFILE flag then retry without it. */
541 if (fd != -1 || (errno != EINVAL && errno != EISDIR && errno != EOPNOTSUPP)) {
542 return fd;
543 } else {
544 errno = 0;
545 }
546 #endif
547
548 lendir = strlen (dir);
549 tempname = __builtin_alloca (lendir + sizeof (suffix));
550
551 if (!tempname)
552 return -1;
553
554 memcpy (tempname, dir, lendir);
555 memcpy (tempname + lendir, suffix, sizeof (suffix));
556
557 return open_temp_exec_file_name (tempname, flags);
558 }
559
560 /* Open a temporary file in the directory in the named environment
561 variable. */
562 static int
563 open_temp_exec_file_env (const char *envvar)
564 {
565 const char *value = getenv (envvar);
566
567 if (!value)
568 return -1;
569
570 return open_temp_exec_file_dir (value);
571 }
572
573 #ifdef HAVE_MNTENT
574 /* Open a temporary file in an executable and writable mount point
575 listed in the mounts file. Subsequent calls with the same mounts
576 keep searching for mount points in the same file. Providing NULL
577 as the mounts file closes the file. */
578 static int
579 open_temp_exec_file_mnt (const char *mounts)
580 {
581 static const char *last_mounts;
582 static FILE *last_mntent;
583
584 if (mounts != last_mounts)
585 {
586 if (last_mntent)
587 endmntent (last_mntent);
588
589 last_mounts = mounts;
590
591 if (mounts)
592 last_mntent = setmntent (mounts, "r");
593 else
594 last_mntent = NULL;
595 }
596
597 if (!last_mntent)
598 return -1;
599
600 for (;;)
601 {
602 int fd;
603 struct mntent mnt;
604 char buf[MAXPATHLEN * 3];
605
606 if (getmntent_r (last_mntent, &mnt, buf, sizeof (buf)) == NULL)
607 return -1;
608
609 if (hasmntopt (&mnt, "ro")
610 || hasmntopt (&mnt, "noexec")
611 || access (mnt.mnt_dir, W_OK))
612 continue;
613
614 fd = open_temp_exec_file_dir (mnt.mnt_dir);
615
616 if (fd != -1)
617 return fd;
618 }
619 }
620 #endif /* HAVE_MNTENT */
621
622 /* Instructions to look for a location to hold a temporary file that
623 can be mapped in for execution. */
624 static struct
625 {
626 int (*func)(const char *);
627 const char *arg;
628 int repeat;
629 } open_temp_exec_file_opts[] = {
630 { open_temp_exec_file_env, "TMPDIR", 0 },
631 { open_temp_exec_file_dir, "/tmp", 0 },
632 { open_temp_exec_file_dir, "/var/tmp", 0 },
633 { open_temp_exec_file_dir, "/dev/shm", 0 },
634 { open_temp_exec_file_env, "HOME", 0 },
635 #ifdef HAVE_MNTENT
636 { open_temp_exec_file_mnt, "/etc/mtab", 1 },
637 { open_temp_exec_file_mnt, "/proc/mounts", 1 },
638 #endif /* HAVE_MNTENT */
639 };
640
641 /* Current index into open_temp_exec_file_opts. */
642 static int open_temp_exec_file_opts_idx = 0;
643
644 /* Reset a current multi-call func, then advances to the next entry.
645 If we're at the last, go back to the first and return nonzero,
646 otherwise return zero. */
647 static int
648 open_temp_exec_file_opts_next (void)
649 {
650 if (open_temp_exec_file_opts[open_temp_exec_file_opts_idx].repeat)
651 open_temp_exec_file_opts[open_temp_exec_file_opts_idx].func (NULL);
652
653 open_temp_exec_file_opts_idx++;
654 if (open_temp_exec_file_opts_idx
655 == (sizeof (open_temp_exec_file_opts)
656 / sizeof (*open_temp_exec_file_opts)))
657 {
658 open_temp_exec_file_opts_idx = 0;
659 return 1;
660 }
661
662 return 0;
663 }
664
665 /* Return a file descriptor of a temporary zero-sized file in a
666 writable and executable filesystem. */
667 static int
668 open_temp_exec_file (void)
669 {
670 int fd;
671
672 do
673 {
674 fd = open_temp_exec_file_opts[open_temp_exec_file_opts_idx].func
675 (open_temp_exec_file_opts[open_temp_exec_file_opts_idx].arg);
676
677 if (!open_temp_exec_file_opts[open_temp_exec_file_opts_idx].repeat
678 || fd == -1)
679 {
680 if (open_temp_exec_file_opts_next ())
681 break;
682 }
683 }
684 while (fd == -1);
685
686 return fd;
687 }
688
689 /* Map in a chunk of memory from the temporary exec file into separate
690 locations in the virtual memory address space, one writable and one
691 executable. Returns the address of the writable portion, after
692 storing an offset to the corresponding executable portion at the
693 last word of the requested chunk. */
694 static void *
695 dlmmap_locked (void *start, size_t length, int prot, int flags, off_t offset)
696 {
697 void *ptr;
698
699 if (execfd == -1)
700 {
701 open_temp_exec_file_opts_idx = 0;
702 retry_open:
703 execfd = open_temp_exec_file ();
704 if (execfd == -1)
705 return MFAIL;
706 }
707
708 offset = execsize;
709
710 if (ftruncate (execfd, offset + length))
711 return MFAIL;
712
713 flags &= ~(MAP_PRIVATE | MAP_ANONYMOUS);
714 flags |= MAP_SHARED;
715
716 ptr = mmap (NULL, length, (prot & ~PROT_WRITE) | PROT_EXEC,
717 flags, execfd, offset);
718 if (ptr == MFAIL)
719 {
720 if (!offset)
721 {
722 close (execfd);
723 goto retry_open;
724 }
725 ftruncate (execfd, offset);
726 return MFAIL;
727 }
728 else if (!offset
729 && open_temp_exec_file_opts[open_temp_exec_file_opts_idx].repeat)
730 open_temp_exec_file_opts_next ();
731
732 start = mmap (start, length, prot, flags, execfd, offset);
733
734 if (start == MFAIL)
735 {
736 munmap (ptr, length);
737 ftruncate (execfd, offset);
738 return start;
739 }
740
741 mmap_exec_offset ((char *)start, length) = (char*)ptr - (char*)start;
742
743 execsize += length;
744
745 return start;
746 }
747
748 /* Map in a writable and executable chunk of memory if possible.
749 Failing that, fall back to dlmmap_locked. */
750 static void *
751 dlmmap (void *start, size_t length, int prot,
752 int flags, int fd, off_t offset)
753 {
754 void *ptr;
755
756 assert (start == NULL && length % malloc_getpagesize == 0
757 && prot == (PROT_READ | PROT_WRITE)
758 && flags == (MAP_PRIVATE | MAP_ANONYMOUS)
759 && fd == -1 && offset == 0);
760
761 #if FFI_CLOSURE_TEST
762 printf ("mapping in %zi\n", length);
763 #endif
764
765 if (execfd == -1 && is_emutramp_enabled ())
766 {
767 ptr = mmap (start, length, prot & ~PROT_EXEC, flags, fd, offset);
768 return ptr;
769 }
770
771 if (execfd == -1 && !is_selinux_enabled ())
772 {
773 ptr = mmap (start, length, prot | PROT_EXEC, flags, fd, offset);
774
775 if (ptr != MFAIL || (errno != EPERM && errno != EACCES))
776 /* Cool, no need to mess with separate segments. */
777 return ptr;
778
779 /* If MREMAP_DUP is ever introduced and implemented, try mmap
780 with ((prot & ~PROT_WRITE) | PROT_EXEC) and mremap with
781 MREMAP_DUP and prot at this point. */
782 }
783
784 if (execsize == 0 || execfd == -1)
785 {
786 pthread_mutex_lock (&open_temp_exec_file_mutex);
787 ptr = dlmmap_locked (start, length, prot, flags, offset);
788 pthread_mutex_unlock (&open_temp_exec_file_mutex);
789
790 return ptr;
791 }
792
793 return dlmmap_locked (start, length, prot, flags, offset);
794 }
795
796 /* Release memory at the given address, as well as the corresponding
797 executable page if it's separate. */
798 static int
799 dlmunmap (void *start, size_t length)
800 {
801 /* We don't bother decreasing execsize or truncating the file, since
802 we can't quite tell whether we're unmapping the end of the file.
803 We don't expect frequent deallocation anyway. If we did, we
804 could locate pages in the file by writing to the pages being
805 deallocated and checking that the file contents change.
806 Yuck. */
807 msegmentptr seg = segment_holding (gm, start);
808 void *code;
809
810 #if FFI_CLOSURE_TEST
811 printf ("unmapping %zi\n", length);
812 #endif
813
814 if (seg && (code = add_segment_exec_offset (start, seg)) != start)
815 {
816 int ret = munmap (code, length);
817 if (ret)
818 return ret;
819 }
820
821 return munmap (start, length);
822 }
823
824 #if FFI_CLOSURE_FREE_CODE
825 /* Return segment holding given code address. */
826 static msegmentptr
827 segment_holding_code (mstate m, char* addr)
828 {
829 msegmentptr sp = &m->seg;
830 for (;;) {
831 if (addr >= add_segment_exec_offset (sp->base, sp)
832 && addr < add_segment_exec_offset (sp->base, sp) + sp->size)
833 return sp;
834 if ((sp = sp->next) == 0)
835 return 0;
836 }
837 }
838 #endif
839
840 #endif /* !(defined(X86_WIN32) || defined(X86_WIN64) || defined(__OS2__)) || defined (__CYGWIN__) || defined(__INTERIX) */
841
842 /* Allocate a chunk of memory with the given size. Returns a pointer
843 to the writable address, and sets *CODE to the executable
844 corresponding virtual address. */
845 void *
846 ffi_closure_alloc (size_t size, void **code)
847 {
848 void *ptr;
849
850 if (!code)
851 return NULL;
852
853 ptr = dlmalloc (size);
854
855 if (ptr)
856 {
857 msegmentptr seg = segment_holding (gm, ptr);
858
859 *code = add_segment_exec_offset (ptr, seg);
860 }
861
862 return ptr;
863 }
864
865 /* Release a chunk of memory allocated with ffi_closure_alloc. If
866 FFI_CLOSURE_FREE_CODE is nonzero, the given address can be the
867 writable or the executable address given. Otherwise, only the
868 writable address can be provided here. */
869 void
870 ffi_closure_free (void *ptr)
871 {
872 #if FFI_CLOSURE_FREE_CODE
873 msegmentptr seg = segment_holding_code (gm, ptr);
874
875 if (seg)
876 ptr = sub_segment_exec_offset (ptr, seg);
877 #endif
878
879 dlfree (ptr);
880 }
881
882
883 #if FFI_CLOSURE_TEST
884 /* Do some internal sanity testing to make sure allocation and
885 deallocation of pages are working as intended. */
886 int main ()
887 {
888 void *p[3];
889 #define GET(idx, len) do { p[idx] = dlmalloc (len); printf ("allocated %zi for p[%i]\n", (len), (idx)); } while (0)
890 #define PUT(idx) do { printf ("freeing p[%i]\n", (idx)); dlfree (p[idx]); } while (0)
891 GET (0, malloc_getpagesize / 2);
892 GET (1, 2 * malloc_getpagesize - 64 * sizeof (void*));
893 PUT (1);
894 GET (1, 2 * malloc_getpagesize);
895 GET (2, malloc_getpagesize / 2);
896 PUT (1);
897 PUT (0);
898 PUT (2);
899 return 0;
900 }
901 #endif /* FFI_CLOSURE_TEST */
902 # else /* ! FFI_MMAP_EXEC_WRIT */
903
904 /* On many systems, memory returned by malloc is writable and
905 executable, so just use it. */
906
907 #include <stdlib.h>
908
909 void *
910 ffi_closure_alloc (size_t size, void **code)
911 {
912 if (!code)
913 return NULL;
914
915 return *code = malloc (size);
916 }
917
918 void
919 ffi_closure_free (void *ptr)
920 {
921 free (ptr);
922 }
923
924 # endif /* ! FFI_MMAP_EXEC_WRIT */
925 #endif /* FFI_CLOSURES */