translate-all: use qemu_protect_rwx/none helpers

The helpers require the address and size to be page-aligned, so
do that before calling them.

Reviewed-by: Richard Henderson <rth@twiddle.net>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Emilio G. Cota 2017-07-15 02:38:57 -04:00 committed by Richard Henderson
parent 5fa64b3130
commit f51f315a67
1 changed files with 13 additions and 48 deletions

View File

@ -602,63 +602,24 @@ static inline void *split_cross_256mb(void *buf1, size_t size1)
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE] static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
__attribute__((aligned(CODE_GEN_ALIGN))); __attribute__((aligned(CODE_GEN_ALIGN)));
# ifdef _WIN32
static inline void do_protect(void *addr, long size, int prot)
{
DWORD old_protect;
VirtualProtect(addr, size, prot, &old_protect);
}
static inline void map_exec(void *addr, long size)
{
do_protect(addr, size, PAGE_EXECUTE_READWRITE);
}
static inline void map_none(void *addr, long size)
{
do_protect(addr, size, PAGE_NOACCESS);
}
# else
static inline void do_protect(void *addr, long size, int prot)
{
uintptr_t start, end;
start = (uintptr_t)addr;
start &= qemu_real_host_page_mask;
end = (uintptr_t)addr + size;
end = ROUND_UP(end, qemu_real_host_page_size);
mprotect((void *)start, end - start, prot);
}
static inline void map_exec(void *addr, long size)
{
do_protect(addr, size, PROT_READ | PROT_WRITE | PROT_EXEC);
}
static inline void map_none(void *addr, long size)
{
do_protect(addr, size, PROT_NONE);
}
# endif /* WIN32 */
static inline void *alloc_code_gen_buffer(void) static inline void *alloc_code_gen_buffer(void)
{ {
void *buf = static_code_gen_buffer; void *buf = static_code_gen_buffer;
void *end = static_code_gen_buffer + sizeof(static_code_gen_buffer);
size_t full_size, size; size_t full_size, size;
/* The size of the buffer, rounded down to end on a page boundary. */ /* page-align the beginning and end of the buffer */
full_size = (((uintptr_t)buf + sizeof(static_code_gen_buffer)) buf = QEMU_ALIGN_PTR_UP(buf, qemu_real_host_page_size);
& qemu_real_host_page_mask) - (uintptr_t)buf; end = QEMU_ALIGN_PTR_DOWN(end, qemu_real_host_page_size);
/* Reserve a guard page. */ /* Reserve a guard page. */
full_size = end - buf;
size = full_size - qemu_real_host_page_size; size = full_size - qemu_real_host_page_size;
/* Honor a command-line option limiting the size of the buffer. */ /* Honor a command-line option limiting the size of the buffer. */
if (size > tcg_ctx->code_gen_buffer_size) { if (size > tcg_ctx->code_gen_buffer_size) {
size = (((uintptr_t)buf + tcg_ctx->code_gen_buffer_size) size = QEMU_ALIGN_DOWN(tcg_ctx->code_gen_buffer_size,
& qemu_real_host_page_mask) - (uintptr_t)buf; qemu_real_host_page_size);
} }
tcg_ctx->code_gen_buffer_size = size; tcg_ctx->code_gen_buffer_size = size;
@ -669,8 +630,12 @@ static inline void *alloc_code_gen_buffer(void)
} }
#endif #endif
map_exec(buf, size); if (qemu_mprotect_rwx(buf, size)) {
map_none(buf + size, qemu_real_host_page_size); abort();
}
if (qemu_mprotect_none(buf + size, qemu_real_host_page_size)) {
abort();
}
qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE); qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
return buf; return buf;