linux-user: remove useless macros GUEST_BASE and RESERVED_VA

As we have removed CONFIG_USE_GUEST_BASE, we always use a guest base
and the macros GUEST_BASE and RESERVED_VA become useless: replace
them by their values.

Reviewed-by: Alexander Graf <agraf@suse.de>
Signed-off-by: Laurent Vivier <laurent@vivier.eu>
Message-Id: <1440420834-8388-1-git-send-email-laurent@vivier.eu>
Signed-off-by: Richard Henderson <rth@twiddle.net>
This commit is contained in:
Laurent Vivier 2015-08-24 14:53:54 +02:00 committed by Richard Henderson
parent 4cbea59869
commit b76f21a707
11 changed files with 64 additions and 78 deletions

View File

@ -163,10 +163,8 @@ static inline void tswap64s(uint64_t *s)
extern unsigned long guest_base;
extern int have_guest_base;
extern unsigned long reserved_va;
#define GUEST_BASE guest_base
#define RESERVED_VA reserved_va
#define GUEST_ADDR_MAX (RESERVED_VA ? RESERVED_VA : \
#define GUEST_ADDR_MAX (reserved_va ? reserved_va : \
(1ul << TARGET_VIRT_ADDR_SPACE_BITS) - 1)
#endif

View File

@ -49,20 +49,20 @@
#if defined(CONFIG_USER_ONLY)
/* All direct uses of g2h and h2g need to go away for usermode softmmu. */
#define g2h(x) ((void *)((unsigned long)(target_ulong)(x) + GUEST_BASE))
#define g2h(x) ((void *)((unsigned long)(target_ulong)(x) + guest_base))
#if HOST_LONG_BITS <= TARGET_VIRT_ADDR_SPACE_BITS
#define h2g_valid(x) 1
#else
#define h2g_valid(x) ({ \
unsigned long __guest = (unsigned long)(x) - GUEST_BASE; \
unsigned long __guest = (unsigned long)(x) - guest_base; \
(__guest < (1ul << TARGET_VIRT_ADDR_SPACE_BITS)) && \
(!RESERVED_VA || (__guest < RESERVED_VA)); \
(!reserved_va || (__guest < reserved_va)); \
})
#endif
#define h2g_nocheck(x) ({ \
unsigned long __ret = (unsigned long)(x) - GUEST_BASE; \
unsigned long __ret = (unsigned long)(x) - guest_base; \
(abi_ulong)__ret; \
})

View File

@ -215,14 +215,14 @@ static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size)
int prot;
int looped = 0;
if (size > RESERVED_VA) {
if (size > reserved_va) {
return (abi_ulong)-1;
}
size = HOST_PAGE_ALIGN(size);
end_addr = start + size;
if (end_addr > RESERVED_VA) {
end_addr = RESERVED_VA;
if (end_addr > reserved_va) {
end_addr = reserved_va;
}
addr = end_addr - qemu_host_page_size;
@ -231,7 +231,7 @@ static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size)
if (looped) {
return (abi_ulong)-1;
}
end_addr = RESERVED_VA;
end_addr = reserved_va;
addr = end_addr - qemu_host_page_size;
looped = 1;
continue;
@ -274,7 +274,7 @@ abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size)
size = HOST_PAGE_ALIGN(size);
if (RESERVED_VA) {
if (reserved_va) {
return mmap_find_vma_reserved(start, size);
}
@ -667,7 +667,7 @@ int target_munmap(abi_ulong start, abi_ulong len)
ret = 0;
/* unmap what we can */
if (real_start < real_end) {
if (RESERVED_VA) {
if (reserved_va) {
mmap_reserve(real_start, real_end - real_start);
} else {
ret = munmap(g2h(real_start), real_end - real_start);
@ -697,7 +697,7 @@ abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
flags,
g2h(new_addr));
if (RESERVED_VA && host_addr != MAP_FAILED) {
if (reserved_va && host_addr != MAP_FAILED) {
/* If new and old addresses overlap then the above mremap will
already have failed with EINVAL. */
mmap_reserve(old_addr, old_size);
@ -715,13 +715,13 @@ abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
old_size, new_size,
flags | MREMAP_FIXED,
g2h(mmap_start));
if ( RESERVED_VA ) {
if (reserved_va) {
mmap_reserve(old_addr, old_size);
}
}
} else {
int prot = 0;
if (RESERVED_VA && old_size < new_size) {
if (reserved_va && old_size < new_size) {
abi_ulong addr;
for (addr = old_addr + old_size;
addr < old_addr + new_size;
@ -731,7 +731,7 @@ abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
}
if (prot == 0) {
host_addr = mremap(g2h(old_addr), old_size, new_size, flags);
if (host_addr != MAP_FAILED && RESERVED_VA && old_size > new_size) {
if (host_addr != MAP_FAILED && reserved_va && old_size > new_size) {
mmap_reserve(old_addr + old_size, new_size - old_size);
}
} else {

View File

@ -30,7 +30,7 @@ static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
static const int tcg_target_reg_alloc_order[] = {
TCG_REG_X20, TCG_REG_X21, TCG_REG_X22, TCG_REG_X23,
TCG_REG_X24, TCG_REG_X25, TCG_REG_X26, TCG_REG_X27,
TCG_REG_X28, /* we will reserve this for GUEST_BASE if configured */
TCG_REG_X28, /* we will reserve this for guest_base if configured */
TCG_REG_X8, TCG_REG_X9, TCG_REG_X10, TCG_REG_X11,
TCG_REG_X12, TCG_REG_X13, TCG_REG_X14, TCG_REG_X15,
@ -1225,7 +1225,7 @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
s->code_ptr, label_ptr);
#else /* !CONFIG_SOFTMMU */
tcg_out_qemu_ld_direct(s, memop, ext, data_reg,
GUEST_BASE ? TCG_REG_GUEST_BASE : TCG_REG_XZR,
guest_base ? TCG_REG_GUEST_BASE : TCG_REG_XZR,
otype, addr_reg);
#endif /* CONFIG_SOFTMMU */
}
@ -1246,7 +1246,7 @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
data_reg, addr_reg, s->code_ptr, label_ptr);
#else /* !CONFIG_SOFTMMU */
tcg_out_qemu_st_direct(s, memop, data_reg,
GUEST_BASE ? TCG_REG_GUEST_BASE : TCG_REG_XZR,
guest_base ? TCG_REG_GUEST_BASE : TCG_REG_XZR,
otype, addr_reg);
#endif /* CONFIG_SOFTMMU */
}
@ -1806,8 +1806,8 @@ static void tcg_target_qemu_prologue(TCGContext *s)
CPU_TEMP_BUF_NLONGS * sizeof(long));
#if !defined(CONFIG_SOFTMMU)
if (GUEST_BASE) {
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_GUEST_BASE, GUEST_BASE);
if (guest_base) {
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_GUEST_BASE, guest_base);
tcg_regset_set_reg(s->reserved_regs, TCG_REG_GUEST_BASE);
}
#endif

View File

@ -1493,8 +1493,8 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
add_qemu_ldst_label(s, true, oi, datalo, datahi, addrlo, addrhi,
s->code_ptr, label_ptr);
#else /* !CONFIG_SOFTMMU */
if (GUEST_BASE) {
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, GUEST_BASE);
if (guest_base) {
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, guest_base);
tcg_out_qemu_ld_index(s, opc, datalo, datahi, addrlo, TCG_REG_TMP);
} else {
tcg_out_qemu_ld_direct(s, opc, datalo, datahi, addrlo);
@ -1623,8 +1623,8 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
add_qemu_ldst_label(s, false, oi, datalo, datahi, addrlo, addrhi,
s->code_ptr, label_ptr);
#else /* !CONFIG_SOFTMMU */
if (GUEST_BASE) {
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, GUEST_BASE);
if (guest_base) {
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, guest_base);
tcg_out_qemu_st_index(s, COND_AL, opc, datalo,
datahi, addrlo, TCG_REG_TMP);
} else {

View File

@ -1432,7 +1432,7 @@ int arch_prctl(int code, unsigned long addr);
static int guest_base_flags;
static inline void setup_guest_base_seg(void)
{
if (arch_prctl(ARCH_SET_GS, GUEST_BASE) == 0) {
if (arch_prctl(ARCH_SET_GS, guest_base) == 0) {
guest_base_flags = P_GS;
}
}
@ -1577,7 +1577,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
s->code_ptr, label_ptr);
#else
{
int32_t offset = GUEST_BASE;
int32_t offset = guest_base;
TCGReg base = addrlo;
int index = -1;
int seg = 0;
@ -1586,7 +1586,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
We can do this with the ADDR32 prefix if we're not using
a guest base, or when using segmentation. Otherwise we
need to zero-extend manually. */
if (GUEST_BASE == 0 || guest_base_flags) {
if (guest_base == 0 || guest_base_flags) {
seg = guest_base_flags;
offset = 0;
if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
@ -1597,8 +1597,8 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
tcg_out_ext32u(s, TCG_REG_L0, base);
base = TCG_REG_L0;
}
if (offset != GUEST_BASE) {
tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_L1, GUEST_BASE);
if (offset != guest_base) {
tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_L1, guest_base);
index = TCG_REG_L1;
offset = 0;
}
@ -1717,12 +1717,12 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
s->code_ptr, label_ptr);
#else
{
int32_t offset = GUEST_BASE;
int32_t offset = guest_base;
TCGReg base = addrlo;
int seg = 0;
/* See comment in tcg_out_qemu_ld re zero-extension of addrlo. */
if (GUEST_BASE == 0 || guest_base_flags) {
if (guest_base == 0 || guest_base_flags) {
seg = guest_base_flags;
offset = 0;
if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
@ -1731,12 +1731,12 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
} else if (TCG_TARGET_REG_BITS == 64) {
/* ??? Note that we can't use the same SIB addressing scheme
as for loads, since we require L0 free for bswap. */
if (offset != GUEST_BASE) {
if (offset != guest_base) {
if (TARGET_LONG_BITS == 32) {
tcg_out_ext32u(s, TCG_REG_L0, base);
base = TCG_REG_L0;
}
tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_L1, GUEST_BASE);
tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_L1, guest_base);
tgen_arithr(s, ARITH_ADD + P_REXW, TCG_REG_L1, base);
base = TCG_REG_L1;
offset = 0;
@ -2315,8 +2315,8 @@ static void tcg_target_qemu_prologue(TCGContext *s)
tcg_out_opc(s, OPC_RET, 0, 0, 0);
#if !defined(CONFIG_SOFTMMU)
/* Try to set up a segment register to point to GUEST_BASE. */
if (GUEST_BASE) {
/* Try to set up a segment register to point to guest_base. */
if (guest_base) {
setup_guest_base_seg();
}
#endif

View File

@ -43,9 +43,6 @@ static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
#ifndef CONFIG_SOFTMMU
#define TCG_GUEST_BASE_REG TCG_REG_R55
#endif
#ifndef GUEST_BASE
#define GUEST_BASE 0
#endif
/* Branch registers */
enum {
@ -1763,7 +1760,7 @@ static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args)
bswap = opc & MO_BSWAP;
#if TARGET_LONG_BITS == 32
if (GUEST_BASE != 0) {
if (guest_base != 0) {
tcg_out_bundle(s, mII,
INSN_NOP_M,
tcg_opc_i29(TCG_REG_P0, OPC_ZXT4_I29,
@ -1827,7 +1824,7 @@ static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args)
}
}
#else
if (GUEST_BASE != 0) {
if (guest_base != 0) {
tcg_out_bundle(s, MmI,
tcg_opc_a1 (TCG_REG_P0, OPC_ADD_A1, TCG_REG_R2,
TCG_GUEST_BASE_REG, addr_reg),
@ -1887,7 +1884,7 @@ static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args)
bswap = opc & MO_BSWAP;
#if TARGET_LONG_BITS == 32
if (GUEST_BASE != 0) {
if (guest_base != 0) {
tcg_out_bundle(s, mII,
INSN_NOP_M,
tcg_opc_i29(TCG_REG_P0, OPC_ZXT4_I29,
@ -1933,7 +1930,7 @@ static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args)
INSN_NOP_M,
INSN_NOP_I);
#else
if (GUEST_BASE != 0) {
if (guest_base != 0) {
add_guest_base = tcg_opc_a1 (TCG_REG_P0, OPC_ADD_A1, TCG_REG_R2,
TCG_GUEST_BASE_REG, addr_reg);
addr_reg = TCG_REG_R2;
@ -1942,7 +1939,7 @@ static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args)
}
if (!bswap) {
tcg_out_bundle(s, (GUEST_BASE ? MmI : mmI),
tcg_out_bundle(s, (guest_base ? MmI : mmI),
add_guest_base,
tcg_opc_m4 (TCG_REG_P0, opc_st_m4[s_bits],
data_reg, addr_reg),
@ -2351,14 +2348,14 @@ static void tcg_target_qemu_prologue(TCGContext *s)
tcg_opc_i21(TCG_REG_P0, OPC_MOV_I21,
TCG_REG_B6, TCG_REG_R33, 0));
/* ??? If GUEST_BASE < 0x200000, we could load the register via
/* ??? If guest_base < 0x200000, we could load the register via
an ADDL in the M slot of the next bundle. */
if (GUEST_BASE != 0) {
if (guest_base != 0) {
tcg_out_bundle(s, mlx,
INSN_NOP_M,
tcg_opc_l2 (GUEST_BASE),
tcg_opc_l2(guest_base),
tcg_opc_x2 (TCG_REG_P0, OPC_MOVL_X2,
TCG_GUEST_BASE_REG, GUEST_BASE));
TCG_GUEST_BASE_REG, guest_base));
tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
}

View File

@ -1180,12 +1180,12 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
add_qemu_ldst_label(s, 1, oi, data_regl, data_regh, addr_regl, addr_regh,
s->code_ptr, label_ptr);
#else
if (GUEST_BASE == 0 && data_regl != addr_regl) {
if (guest_base == 0 && data_regl != addr_regl) {
base = addr_regl;
} else if (GUEST_BASE == (int16_t)GUEST_BASE) {
tcg_out_opc_imm(s, OPC_ADDIU, base, addr_regl, GUEST_BASE);
} else if (guest_base == (int16_t)guest_base) {
tcg_out_opc_imm(s, OPC_ADDIU, base, addr_regl, guest_base);
} else {
tcg_out_movi(s, TCG_TYPE_PTR, base, GUEST_BASE);
tcg_out_movi(s, TCG_TYPE_PTR, base, guest_base);
tcg_out_opc_reg(s, OPC_ADDU, base, base, addr_regl);
}
tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc);
@ -1314,14 +1314,14 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
add_qemu_ldst_label(s, 0, oi, data_regl, data_regh, addr_regl, addr_regh,
s->code_ptr, label_ptr);
#else
if (GUEST_BASE == 0) {
if (guest_base == 0) {
base = addr_regl;
} else {
base = TCG_REG_A0;
if (GUEST_BASE == (int16_t)GUEST_BASE) {
tcg_out_opc_imm(s, OPC_ADDIU, base, addr_regl, GUEST_BASE);
if (guest_base == (int16_t)guest_base) {
tcg_out_opc_imm(s, OPC_ADDIU, base, addr_regl, guest_base);
} else {
tcg_out_movi(s, TCG_TYPE_PTR, base, GUEST_BASE);
tcg_out_movi(s, TCG_TYPE_PTR, base, guest_base);
tcg_out_opc_reg(s, OPC_ADDU, base, base, addr_regl);
}
}

View File

@ -80,10 +80,6 @@
static tcg_insn_unit *tb_ret_addr;
#ifndef GUEST_BASE
#define GUEST_BASE 0
#endif
#include "elf.h"
static bool have_isa_2_06;
#define HAVE_ISA_2_06 have_isa_2_06
@ -1619,7 +1615,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
rbase = TCG_REG_R3;
#else /* !CONFIG_SOFTMMU */
rbase = GUEST_BASE ? TCG_GUEST_BASE_REG : 0;
rbase = guest_base ? TCG_GUEST_BASE_REG : 0;
if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
tcg_out_ext32u(s, TCG_REG_TMP1, addrlo);
addrlo = TCG_REG_TMP1;
@ -1694,7 +1690,7 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
rbase = TCG_REG_R3;
#else /* !CONFIG_SOFTMMU */
rbase = GUEST_BASE ? TCG_GUEST_BASE_REG : 0;
rbase = guest_base ? TCG_GUEST_BASE_REG : 0;
if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
tcg_out_ext32u(s, TCG_REG_TMP1, addrlo);
addrlo = TCG_REG_TMP1;
@ -1799,8 +1795,8 @@ static void tcg_target_qemu_prologue(TCGContext *s)
tcg_out_st(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_REG_R1, FRAME_SIZE+LR_OFFSET);
#ifndef CONFIG_SOFTMMU
if (GUEST_BASE) {
tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, GUEST_BASE);
if (guest_base) {
tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base);
tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
}
#endif

View File

@ -57,11 +57,6 @@
#define TCG_GUEST_BASE_REG TCG_REG_R0
#endif
#ifndef GUEST_BASE
#define GUEST_BASE 0
#endif
/* All of the following instructions are prefixed with their instruction
format, and are defined as 8- or 16-bit quantities, even when the two
halves of the 16-bit quantity may appear 32 bits apart in the insn.
@ -1638,9 +1633,9 @@ static void tcg_prepare_user_ldst(TCGContext *s, TCGReg *addr_reg,
tgen_ext32u(s, TCG_TMP0, *addr_reg);
*addr_reg = TCG_TMP0;
}
if (GUEST_BASE < 0x80000) {
if (guest_base < 0x80000) {
*index_reg = TCG_REG_NONE;
*disp = GUEST_BASE;
*disp = guest_base;
} else {
*index_reg = TCG_GUEST_BASE_REG;
*disp = 0;
@ -2349,8 +2344,8 @@ static void tcg_target_qemu_prologue(TCGContext *s)
TCG_STATIC_CALL_ARGS_SIZE + TCG_TARGET_CALL_STACK_OFFSET,
CPU_TEMP_BUF_NLONGS * sizeof(long));
if (GUEST_BASE >= 0x80000) {
tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, GUEST_BASE);
if (guest_base >= 0x80000) {
tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base);
tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
}

View File

@ -954,8 +954,8 @@ static void tcg_target_qemu_prologue(TCGContext *s)
INSN_IMM13(-frame_size));
#ifndef CONFIG_SOFTMMU
if (GUEST_BASE != 0) {
tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, GUEST_BASE);
if (guest_base != 0) {
tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base);
tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
}
#endif
@ -1144,7 +1144,7 @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr,
addr = TCG_REG_T1;
}
tcg_out_ldst_rr(s, data, addr,
(GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_G0),
(guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0),
qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]);
#endif /* CONFIG_SOFTMMU */
}
@ -1199,7 +1199,7 @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr,
addr = TCG_REG_T1;
}
tcg_out_ldst_rr(s, data, addr,
(GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_G0),
(guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0),
qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]);
#endif /* CONFIG_SOFTMMU */
}