tcg: Introduce target-specific page data for user-only
This data can be allocated by page_alloc_target_data() and released by page_set_flags(start, end, prot | PAGE_RESET). This data will be used to hold tag memory for AArch64 MTE. Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org> Message-id: 20210212184902.1251044-2-richard.henderson@linaro.org Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
parent
8ba4bca570
commit
d9c5858570
@ -114,6 +114,7 @@ typedef struct PageDesc {
|
||||
unsigned int code_write_count;
|
||||
#else
|
||||
unsigned long flags;
|
||||
void *target_data;
|
||||
#endif
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
QemuSpin lock;
|
||||
@ -2740,6 +2741,7 @@ int page_get_flags(target_ulong address)
|
||||
void page_set_flags(target_ulong start, target_ulong end, int flags)
|
||||
{
|
||||
target_ulong addr, len;
|
||||
bool reset_target_data;
|
||||
|
||||
/* This function should never be called with addresses outside the
|
||||
guest address space. If this assert fires, it probably indicates
|
||||
@ -2754,6 +2756,8 @@ void page_set_flags(target_ulong start, target_ulong end, int flags)
|
||||
if (flags & PAGE_WRITE) {
|
||||
flags |= PAGE_WRITE_ORG;
|
||||
}
|
||||
reset_target_data = !(flags & PAGE_VALID) || (flags & PAGE_RESET);
|
||||
flags &= ~PAGE_RESET;
|
||||
|
||||
for (addr = start, len = end - start;
|
||||
len != 0;
|
||||
@ -2767,10 +2771,34 @@ void page_set_flags(target_ulong start, target_ulong end, int flags)
|
||||
p->first_tb) {
|
||||
tb_invalidate_phys_page(addr, 0);
|
||||
}
|
||||
if (reset_target_data && p->target_data) {
|
||||
g_free(p->target_data);
|
||||
p->target_data = NULL;
|
||||
}
|
||||
p->flags = flags;
|
||||
}
|
||||
}
|
||||
|
||||
void *page_get_target_data(target_ulong address)
|
||||
{
|
||||
PageDesc *p = page_find(address >> TARGET_PAGE_BITS);
|
||||
return p ? p->target_data : NULL;
|
||||
}
|
||||
|
||||
void *page_alloc_target_data(target_ulong address, size_t size)
|
||||
{
|
||||
PageDesc *p = page_find(address >> TARGET_PAGE_BITS);
|
||||
void *ret = NULL;
|
||||
|
||||
if (p->flags & PAGE_VALID) {
|
||||
ret = p->target_data;
|
||||
if (!ret) {
|
||||
p->target_data = ret = g_malloc0(size);
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
int page_check_range(target_ulong start, target_ulong len, int flags)
|
||||
{
|
||||
PageDesc *p;
|
||||
|
@ -256,15 +256,21 @@ extern intptr_t qemu_host_page_mask;
|
||||
#define PAGE_EXEC 0x0004
|
||||
#define PAGE_BITS (PAGE_READ | PAGE_WRITE | PAGE_EXEC)
|
||||
#define PAGE_VALID 0x0008
|
||||
/* original state of the write flag (used when tracking self-modifying
|
||||
code */
|
||||
/*
|
||||
* Original state of the write flag (used when tracking self-modifying code)
|
||||
*/
|
||||
#define PAGE_WRITE_ORG 0x0010
|
||||
/* Invalidate the TLB entry immediately, helpful for s390x
|
||||
* Low-Address-Protection. Used with PAGE_WRITE in tlb_set_page_with_attrs() */
|
||||
#define PAGE_WRITE_INV 0x0040
|
||||
/*
|
||||
* Invalidate the TLB entry immediately, helpful for s390x
|
||||
* Low-Address-Protection. Used with PAGE_WRITE in tlb_set_page_with_attrs()
|
||||
*/
|
||||
#define PAGE_WRITE_INV 0x0020
|
||||
/* For use with page_set_flags: page is being replaced; target_data cleared. */
|
||||
#define PAGE_RESET 0x0040
|
||||
|
||||
#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
|
||||
/* FIXME: Code that sets/uses this is broken and needs to go away. */
|
||||
#define PAGE_RESERVED 0x0020
|
||||
#define PAGE_RESERVED 0x0100
|
||||
#endif
|
||||
/* Target-specific bits that will be used via page_get_flags(). */
|
||||
#define PAGE_TARGET_1 0x0080
|
||||
@ -279,6 +285,30 @@ int walk_memory_regions(void *, walk_memory_regions_fn);
|
||||
int page_get_flags(target_ulong address);
|
||||
void page_set_flags(target_ulong start, target_ulong end, int flags);
|
||||
int page_check_range(target_ulong start, target_ulong len, int flags);
|
||||
|
||||
/**
|
||||
* page_alloc_target_data(address, size)
|
||||
* @address: guest virtual address
|
||||
* @size: size of data to allocate
|
||||
*
|
||||
* Allocate @size bytes of out-of-band data to associate with the
|
||||
* guest page at @address. If the page is not mapped, NULL will
|
||||
* be returned. If there is existing data associated with @address,
|
||||
* no new memory will be allocated.
|
||||
*
|
||||
* The memory will be freed when the guest page is deallocated,
|
||||
* e.g. with the munmap system call.
|
||||
*/
|
||||
void *page_alloc_target_data(target_ulong address, size_t size);
|
||||
|
||||
/**
|
||||
* page_get_target_data(address)
|
||||
* @address: guest virtual address
|
||||
*
|
||||
* Return any out-of-bound memory assocated with the guest page
|
||||
* at @address, as per page_alloc_target_data.
|
||||
*/
|
||||
void *page_get_target_data(target_ulong address);
|
||||
#endif
|
||||
|
||||
CPUArchState *cpu_copy(CPUArchState *env);
|
||||
|
@ -599,6 +599,7 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
|
||||
}
|
||||
}
|
||||
the_end1:
|
||||
page_flags |= PAGE_RESET;
|
||||
page_set_flags(start, start + len, page_flags);
|
||||
the_end:
|
||||
trace_target_mmap_complete(start);
|
||||
@ -794,7 +795,8 @@ abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
|
||||
new_addr = h2g(host_addr);
|
||||
prot = page_get_flags(old_addr);
|
||||
page_set_flags(old_addr, old_addr + old_size, 0);
|
||||
page_set_flags(new_addr, new_addr + new_size, prot | PAGE_VALID);
|
||||
page_set_flags(new_addr, new_addr + new_size,
|
||||
prot | PAGE_VALID | PAGE_RESET);
|
||||
}
|
||||
tb_invalidate_phys_range(new_addr, new_addr + new_size);
|
||||
mmap_unlock();
|
||||
|
@ -4643,8 +4643,8 @@ static inline abi_ulong do_shmat(CPUArchState *cpu_env,
|
||||
raddr=h2g((unsigned long)host_raddr);
|
||||
|
||||
page_set_flags(raddr, raddr + shm_info.shm_segsz,
|
||||
PAGE_VALID | PAGE_READ |
|
||||
((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
|
||||
PAGE_VALID | PAGE_RESET | PAGE_READ |
|
||||
(shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
|
||||
|
||||
for (i = 0; i < N_SHM_REGIONS; i++) {
|
||||
if (!shm_regions[i].in_use) {
|
||||
|
Loading…
Reference in New Issue
Block a user