include/exec: Change reserved_va semantics to last byte
Change the semantics to be the last byte of the guest va, rather than the following byte. This avoids some overflow conditions. Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
parent
a3a67f54f0
commit
95059f9c31
@ -68,13 +68,9 @@ bool have_guest_base;
|
||||
# if HOST_LONG_BITS > TARGET_VIRT_ADDR_SPACE_BITS
|
||||
# if TARGET_VIRT_ADDR_SPACE_BITS == 32 && \
|
||||
(TARGET_LONG_BITS == 32 || defined(TARGET_ABI32))
|
||||
/*
|
||||
* There are a number of places where we assign reserved_va to a variable
|
||||
* of type abi_ulong and expect it to fit. Avoid the last page.
|
||||
*/
|
||||
# define MAX_RESERVED_VA (0xfffffffful & TARGET_PAGE_MASK)
|
||||
# define MAX_RESERVED_VA 0xfffffffful
|
||||
# else
|
||||
# define MAX_RESERVED_VA (1ul << TARGET_VIRT_ADDR_SPACE_BITS)
|
||||
# define MAX_RESERVED_VA ((1ul << TARGET_VIRT_ADDR_SPACE_BITS) - 1)
|
||||
# endif
|
||||
# else
|
||||
# define MAX_RESERVED_VA 0
|
||||
@ -466,7 +462,7 @@ int main(int argc, char **argv)
|
||||
envlist_free(envlist);
|
||||
|
||||
if (reserved_va) {
|
||||
mmap_next_start = reserved_va;
|
||||
mmap_next_start = reserved_va + 1;
|
||||
}
|
||||
|
||||
{
|
||||
|
@ -234,7 +234,7 @@ static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size,
|
||||
size = HOST_PAGE_ALIGN(size) + alignment;
|
||||
end_addr = start + size;
|
||||
if (end_addr > reserved_va) {
|
||||
end_addr = reserved_va;
|
||||
end_addr = reserved_va + 1;
|
||||
}
|
||||
addr = end_addr - qemu_host_page_size;
|
||||
|
||||
@ -243,7 +243,7 @@ static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size,
|
||||
if (looped) {
|
||||
return (abi_ulong)-1;
|
||||
}
|
||||
end_addr = reserved_va;
|
||||
end_addr = reserved_va + 1;
|
||||
addr = end_addr - qemu_host_page_size;
|
||||
looped = 1;
|
||||
continue;
|
||||
|
@ -152,6 +152,15 @@ static inline void tswap64s(uint64_t *s)
|
||||
*/
|
||||
extern uintptr_t guest_base;
|
||||
extern bool have_guest_base;
|
||||
|
||||
/*
|
||||
* If non-zero, the guest virtual address space is a contiguous subset
|
||||
* of the host virtual address space, i.e. '-R reserved_va' is in effect
|
||||
* either from the command-line or by default. The value is the last
|
||||
* byte of the guest address space e.g. UINT32_MAX.
|
||||
*
|
||||
* If zero, the host and guest virtual address spaces are intermingled.
|
||||
*/
|
||||
extern unsigned long reserved_va;
|
||||
|
||||
/*
|
||||
@ -171,7 +180,7 @@ extern unsigned long reserved_va;
|
||||
#define GUEST_ADDR_MAX_ \
|
||||
((MIN_CONST(TARGET_VIRT_ADDR_SPACE_BITS, TARGET_ABI_BITS) <= 32) ? \
|
||||
UINT32_MAX : ~0ul)
|
||||
#define GUEST_ADDR_MAX (reserved_va ? reserved_va - 1 : GUEST_ADDR_MAX_)
|
||||
#define GUEST_ADDR_MAX (reserved_va ? : GUEST_ADDR_MAX_)
|
||||
|
||||
#else
|
||||
|
||||
|
@ -30,7 +30,7 @@ static inline unsigned long arm_max_reserved_va(CPUState *cs)
|
||||
* the high addresses. Restrict linux-user to the
|
||||
* cached write-back RAM in the system map.
|
||||
*/
|
||||
return 0x80000000ul;
|
||||
return 0x7ffffffful;
|
||||
} else {
|
||||
/*
|
||||
* We need to be able to map the commpage.
|
||||
|
@ -208,7 +208,7 @@ static bool init_guest_commpage(void)
|
||||
* has specified -R reserved_va, which would trigger an assert().
|
||||
*/
|
||||
if (reserved_va != 0 &&
|
||||
TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE >= reserved_va) {
|
||||
TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE - 1 > reserved_va) {
|
||||
error_report("Cannot allocate vsyscall page");
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
@ -2504,7 +2504,7 @@ static void pgb_have_guest_base(const char *image_name, abi_ulong guest_loaddr,
|
||||
if (guest_hiaddr > reserved_va) {
|
||||
error_report("%s: requires more than reserved virtual "
|
||||
"address space (0x%" PRIx64 " > 0x%lx)",
|
||||
image_name, (uint64_t)guest_hiaddr + 1, reserved_va);
|
||||
image_name, (uint64_t)guest_hiaddr, reserved_va);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
} else {
|
||||
@ -2525,7 +2525,7 @@ static void pgb_have_guest_base(const char *image_name, abi_ulong guest_loaddr,
|
||||
if (reserved_va) {
|
||||
guest_loaddr = (guest_base >= mmap_min_addr ? 0
|
||||
: mmap_min_addr - guest_base);
|
||||
guest_hiaddr = reserved_va - 1;
|
||||
guest_hiaddr = reserved_va;
|
||||
}
|
||||
|
||||
/* Reserve the address space for the binary, or reserved_va. */
|
||||
@ -2755,7 +2755,7 @@ static void pgb_reserved_va(const char *image_name, abi_ulong guest_loaddr,
|
||||
if (guest_hiaddr > reserved_va) {
|
||||
error_report("%s: requires more than reserved virtual "
|
||||
"address space (0x%" PRIx64 " > 0x%lx)",
|
||||
image_name, (uint64_t)guest_hiaddr + 1, reserved_va);
|
||||
image_name, (uint64_t)guest_hiaddr, reserved_va);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
@ -2768,17 +2768,17 @@ static void pgb_reserved_va(const char *image_name, abi_ulong guest_loaddr,
|
||||
/* Reserve the memory on the host. */
|
||||
assert(guest_base != 0);
|
||||
test = g2h_untagged(0);
|
||||
addr = mmap(test, reserved_va, PROT_NONE, flags, -1, 0);
|
||||
addr = mmap(test, reserved_va + 1, PROT_NONE, flags, -1, 0);
|
||||
if (addr == MAP_FAILED || addr != test) {
|
||||
error_report("Unable to reserve 0x%lx bytes of virtual address "
|
||||
"space at %p (%s) for use as guest address space (check your "
|
||||
"virtual memory ulimit setting, min_mmap_addr or reserve less "
|
||||
"using -R option)", reserved_va, test, strerror(errno));
|
||||
"using -R option)", reserved_va + 1, test, strerror(errno));
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
qemu_log_mask(CPU_LOG_PAGE, "%s: base @ %p for %lu bytes\n",
|
||||
__func__, addr, reserved_va);
|
||||
__func__, addr, reserved_va + 1);
|
||||
}
|
||||
|
||||
void probe_guest_base(const char *image_name, abi_ulong guest_loaddr,
|
||||
|
@ -109,11 +109,9 @@ static const char *last_log_filename;
|
||||
# if HOST_LONG_BITS > TARGET_VIRT_ADDR_SPACE_BITS
|
||||
# if TARGET_VIRT_ADDR_SPACE_BITS == 32 && \
|
||||
(TARGET_LONG_BITS == 32 || defined(TARGET_ABI32))
|
||||
/* There are a number of places where we assign reserved_va to a variable
|
||||
of type abi_ulong and expect it to fit. Avoid the last page. */
|
||||
# define MAX_RESERVED_VA(CPU) (0xfffffffful & TARGET_PAGE_MASK)
|
||||
# define MAX_RESERVED_VA(CPU) 0xfffffffful
|
||||
# else
|
||||
# define MAX_RESERVED_VA(CPU) (1ul << TARGET_VIRT_ADDR_SPACE_BITS)
|
||||
# define MAX_RESERVED_VA(CPU) ((1ul << TARGET_VIRT_ADDR_SPACE_BITS) - 1)
|
||||
# endif
|
||||
# else
|
||||
# define MAX_RESERVED_VA(CPU) 0
|
||||
@ -379,7 +377,9 @@ static void handle_arg_reserved_va(const char *arg)
|
||||
{
|
||||
char *p;
|
||||
int shift = 0;
|
||||
reserved_va = strtoul(arg, &p, 0);
|
||||
unsigned long val;
|
||||
|
||||
val = strtoul(arg, &p, 0);
|
||||
switch (*p) {
|
||||
case 'k':
|
||||
case 'K':
|
||||
@ -393,10 +393,10 @@ static void handle_arg_reserved_va(const char *arg)
|
||||
break;
|
||||
}
|
||||
if (shift) {
|
||||
unsigned long unshifted = reserved_va;
|
||||
unsigned long unshifted = val;
|
||||
p++;
|
||||
reserved_va <<= shift;
|
||||
if (reserved_va >> shift != unshifted) {
|
||||
val <<= shift;
|
||||
if (val >> shift != unshifted) {
|
||||
fprintf(stderr, "Reserved virtual address too big\n");
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
@ -405,6 +405,8 @@ static void handle_arg_reserved_va(const char *arg)
|
||||
fprintf(stderr, "Unrecognised -R size suffix '%s'\n", p);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
/* The representation is size - 1, with 0 remaining "default". */
|
||||
reserved_va = val ? val - 1 : 0;
|
||||
}
|
||||
|
||||
static void handle_arg_singlestep(const char *arg)
|
||||
@ -793,7 +795,7 @@ int main(int argc, char **argv, char **envp)
|
||||
*/
|
||||
max_reserved_va = MAX_RESERVED_VA(cpu);
|
||||
if (reserved_va != 0) {
|
||||
if (reserved_va % qemu_host_page_size) {
|
||||
if ((reserved_va + 1) % qemu_host_page_size) {
|
||||
char *s = size_to_str(qemu_host_page_size);
|
||||
fprintf(stderr, "Reserved virtual address not aligned mod %s\n", s);
|
||||
g_free(s);
|
||||
@ -804,11 +806,8 @@ int main(int argc, char **argv, char **envp)
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
} else if (HOST_LONG_BITS == 64 && TARGET_VIRT_ADDR_SPACE_BITS <= 32) {
|
||||
/*
|
||||
* reserved_va must be aligned with the host page size
|
||||
* as it is used with mmap()
|
||||
*/
|
||||
reserved_va = max_reserved_va & qemu_host_page_mask;
|
||||
/* MAX_RESERVED_VA + 1 is a large power of 2, so is aligned. */
|
||||
reserved_va = max_reserved_va;
|
||||
}
|
||||
|
||||
{
|
||||
|
@ -283,7 +283,7 @@ static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size,
|
||||
end_addr = start + size;
|
||||
if (start > reserved_va - size) {
|
||||
/* Start at the top of the address space. */
|
||||
end_addr = ((reserved_va - size) & -align) + size;
|
||||
end_addr = ((reserved_va + 1 - size) & -align) + size;
|
||||
looped = true;
|
||||
}
|
||||
|
||||
@ -297,7 +297,7 @@ static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size,
|
||||
return (abi_ulong)-1;
|
||||
}
|
||||
/* Re-start at the top of the address space. */
|
||||
addr = end_addr = ((reserved_va - size) & -align) + size;
|
||||
addr = end_addr = ((reserved_va + 1 - size) & -align) + size;
|
||||
looped = true;
|
||||
} else {
|
||||
prot = page_get_flags(addr);
|
||||
|
Loading…
Reference in New Issue
Block a user