From d9c5858570a57f374b71216c5da39ee381fa92f5 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Fri, 12 Feb 2021 10:48:32 -0800 Subject: [PATCH] tcg: Introduce target-specific page data for user-only This data can be allocated by page_alloc_target_data() and released by page_set_flags(start, end, prot | PAGE_RESET). This data will be used to hold tag memory for AArch64 MTE. Reviewed-by: Peter Maydell Signed-off-by: Richard Henderson Message-id: 20210212184902.1251044-2-richard.henderson@linaro.org Signed-off-by: Peter Maydell --- accel/tcg/translate-all.c | 28 ++++++++++++++++++++++++++ include/exec/cpu-all.h | 42 +++++++++++++++++++++++++++++++++------ linux-user/mmap.c | 4 +++- linux-user/syscall.c | 4 ++-- 4 files changed, 69 insertions(+), 9 deletions(-) diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c index 81d4c83f22..bba9c8e0b3 100644 --- a/accel/tcg/translate-all.c +++ b/accel/tcg/translate-all.c @@ -114,6 +114,7 @@ typedef struct PageDesc { unsigned int code_write_count; #else unsigned long flags; + void *target_data; #endif #ifndef CONFIG_USER_ONLY QemuSpin lock; @@ -2740,6 +2741,7 @@ int page_get_flags(target_ulong address) void page_set_flags(target_ulong start, target_ulong end, int flags) { target_ulong addr, len; + bool reset_target_data; /* This function should never be called with addresses outside the guest address space. If this assert fires, it probably indicates @@ -2754,6 +2756,8 @@ void page_set_flags(target_ulong start, target_ulong end, int flags) if (flags & PAGE_WRITE) { flags |= PAGE_WRITE_ORG; } + reset_target_data = !(flags & PAGE_VALID) || (flags & PAGE_RESET); + flags &= ~PAGE_RESET; for (addr = start, len = end - start; len != 0; @@ -2767,10 +2771,34 @@ void page_set_flags(target_ulong start, target_ulong end, int flags) p->first_tb) { tb_invalidate_phys_page(addr, 0); } + if (reset_target_data && p->target_data) { + g_free(p->target_data); + p->target_data = NULL; + } p->flags = flags; } } +void *page_get_target_data(target_ulong address) +{ + PageDesc *p = page_find(address >> TARGET_PAGE_BITS); + return p ? p->target_data : NULL; +} + +void *page_alloc_target_data(target_ulong address, size_t size) +{ + PageDesc *p = page_find(address >> TARGET_PAGE_BITS); + void *ret = NULL; + + if (p->flags & PAGE_VALID) { + ret = p->target_data; + if (!ret) { + p->target_data = ret = g_malloc0(size); + } + } + return ret; +} + int page_check_range(target_ulong start, target_ulong len, int flags) { PageDesc *p; diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h index babf0a8959..6421892830 100644 --- a/include/exec/cpu-all.h +++ b/include/exec/cpu-all.h @@ -256,15 +256,21 @@ extern intptr_t qemu_host_page_mask; #define PAGE_EXEC 0x0004 #define PAGE_BITS (PAGE_READ | PAGE_WRITE | PAGE_EXEC) #define PAGE_VALID 0x0008 -/* original state of the write flag (used when tracking self-modifying - code */ +/* + * Original state of the write flag (used when tracking self-modifying code) + */ #define PAGE_WRITE_ORG 0x0010 -/* Invalidate the TLB entry immediately, helpful for s390x - * Low-Address-Protection. Used with PAGE_WRITE in tlb_set_page_with_attrs() */ -#define PAGE_WRITE_INV 0x0040 +/* + * Invalidate the TLB entry immediately, helpful for s390x + * Low-Address-Protection. Used with PAGE_WRITE in tlb_set_page_with_attrs() + */ +#define PAGE_WRITE_INV 0x0020 +/* For use with page_set_flags: page is being replaced; target_data cleared. */ +#define PAGE_RESET 0x0040 + #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY) /* FIXME: Code that sets/uses this is broken and needs to go away. */ -#define PAGE_RESERVED 0x0020 +#define PAGE_RESERVED 0x0100 #endif /* Target-specific bits that will be used via page_get_flags(). */ #define PAGE_TARGET_1 0x0080 @@ -279,6 +285,30 @@ int walk_memory_regions(void *, walk_memory_regions_fn); int page_get_flags(target_ulong address); void page_set_flags(target_ulong start, target_ulong end, int flags); int page_check_range(target_ulong start, target_ulong len, int flags); + +/** + * page_alloc_target_data(address, size) + * @address: guest virtual address + * @size: size of data to allocate + * + * Allocate @size bytes of out-of-band data to associate with the + * guest page at @address. If the page is not mapped, NULL will + * be returned. If there is existing data associated with @address, + * no new memory will be allocated. + * + * The memory will be freed when the guest page is deallocated, + * e.g. with the munmap system call. + */ +void *page_alloc_target_data(target_ulong address, size_t size); + +/** + * page_get_target_data(address) + * @address: guest virtual address + * + * Return any out-of-bound memory assocated with the guest page + * at @address, as per page_alloc_target_data. + */ +void *page_get_target_data(target_ulong address); #endif CPUArchState *cpu_copy(CPUArchState *env); diff --git a/linux-user/mmap.c b/linux-user/mmap.c index 1c9faef476..ac0624f31a 100644 --- a/linux-user/mmap.c +++ b/linux-user/mmap.c @@ -599,6 +599,7 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot, } } the_end1: + page_flags |= PAGE_RESET; page_set_flags(start, start + len, page_flags); the_end: trace_target_mmap_complete(start); @@ -794,7 +795,8 @@ abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size, new_addr = h2g(host_addr); prot = page_get_flags(old_addr); page_set_flags(old_addr, old_addr + old_size, 0); - page_set_flags(new_addr, new_addr + new_size, prot | PAGE_VALID); + page_set_flags(new_addr, new_addr + new_size, + prot | PAGE_VALID | PAGE_RESET); } tb_invalidate_phys_range(new_addr, new_addr + new_size); mmap_unlock(); diff --git a/linux-user/syscall.c b/linux-user/syscall.c index 36b0901055..0c2d660bc4 100644 --- a/linux-user/syscall.c +++ b/linux-user/syscall.c @@ -4643,8 +4643,8 @@ static inline abi_ulong do_shmat(CPUArchState *cpu_env, raddr=h2g((unsigned long)host_raddr); page_set_flags(raddr, raddr + shm_info.shm_segsz, - PAGE_VALID | PAGE_READ | - ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE)); + PAGE_VALID | PAGE_RESET | PAGE_READ | + (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE)); for (i = 0; i < N_SHM_REGIONS; i++) { if (!shm_regions[i].in_use) {