diff --git a/ext/opcache/shared_alloc_mmap.c b/ext/opcache/shared_alloc_mmap.c index 1840b214705f..7ebedd40ed9c 100644 --- a/ext/opcache/shared_alloc_mmap.c +++ b/ext/opcache/shared_alloc_mmap.c @@ -20,6 +20,7 @@ */ #include "zend_shared_alloc.h" +#include "jit/zend_jit.h" #ifdef USE_MMAP @@ -45,7 +46,7 @@ # define MAP_HUGETLB MAP_ALIGNED_SUPER #endif -#if (defined(__linux__) || defined(__FreeBSD__)) && (defined(__x86_64__) || defined (__aarch64__)) && !defined(__SANITIZE_ADDRESS__) +#if defined(HAVE_JIT) && (defined(__linux__) || defined(__FreeBSD__)) && (defined(__x86_64__) || defined (__aarch64__)) && !defined(__SANITIZE_ADDRESS__) static void *find_prefered_mmap_base(size_t requested_size) { size_t huge_page_size = 2 * 1024 * 1024; @@ -76,8 +77,13 @@ static void *find_prefered_mmap_base(size_t requested_size) } if ((uintptr_t)execute_ex >= start) { /* the current segment lays before PHP .text segment or PHP .text segment itself */ + /*Search for candidates at the end of the free segment near the .text segment + to prevent candidates from being missed due to large hole*/ if (last_free_addr + requested_size <= start) { - last_candidate = last_free_addr; + last_candidate = ZEND_MM_ALIGNED_SIZE_EX(start - requested_size, huge_page_size); + if (last_candidate + requested_size > start) { + last_candidate -= huge_page_size; + } } if ((uintptr_t)execute_ex < end) { /* the current segment is PHP .text segment itself */ @@ -128,7 +134,10 @@ static void *find_prefered_mmap_base(size_t requested_size) if ((uintptr_t)execute_ex >= e_start) { /* the current segment lays before PHP .text segment or PHP .text segment itself */ if (last_free_addr + requested_size <= e_start) { - last_candidate = last_free_addr; + last_candidate = ZEND_MM_ALIGNED_SIZE_EX(e_start - requested_size, huge_page_size); + if (last_candidate + requested_size > e_start) { + last_candidate -= huge_page_size; + } } if ((uintptr_t)execute_ex < e_end) { /* the current segment is PHP .text segment itself */ @@ -180,8 +189,17 @@ static int create_segments(size_t requested_size, zend_shared_segment ***shared_ #ifdef PROT_MAX flags |= PROT_MAX(PROT_READ | PROT_WRITE | PROT_EXEC); #endif -#if (defined(__linux__) || defined(__FreeBSD__)) && (defined(__x86_64__) || defined (__aarch64__)) && !defined(__SANITIZE_ADDRESS__) - void *hint = find_prefered_mmap_base(requested_size); +#if defined(HAVE_JIT) && (defined(__linux__) || defined(__FreeBSD__)) && (defined(__x86_64__) || defined (__aarch64__)) && !defined(__SANITIZE_ADDRESS__) + void *hint; + if (JIT_G(enabled) && JIT_G(buffer_size) + && zend_jit_check_support() == SUCCESS) { + hint = find_prefered_mmap_base(requested_size); + } else { + /* Do not use a hint if JIT is not enabled, as this profits only JIT and + * this may be unsafe when the hole after the heap is the only candidate + * (e.g. in non-PIE builds) (GH-13775). */ + hint = MAP_FAILED; + } if (hint != MAP_FAILED) { # ifdef MAP_HUGETLB size_t huge_page_size = 2 * 1024 * 1024;