target/i386: Fix 32-bit wrapping of pc/eip computation (#2022)

tcg: Reduce serial context atomicity earlier (#2034)
 -----BEGIN PGP SIGNATURE-----
 
 iQFRBAABCgA7FiEEekgeeIaLTbaoWgXAZN846K9+IV8FAmV41IEdHHJpY2hhcmQu
 aGVuZGVyc29uQGxpbmFyby5vcmcACgkQZN846K9+IV+0DwgApqX4Ntaz1/eIbEmr
 sWTGlG7sQX28JrYm+Bd4MgtlE2+i06Vs3q1ZHThuZs9S6tQf8bcm1q1m0qZ486jk
 hgQqSMPAOJv1U+QhTRy1kW3l8UmZkw9YddfV5FjBHeuRWglVeSxDtqkc4fUffthb
 82KvYIqo836HsYOOWtJqSuWVi60+q1RqYg+WZuygUmprf8Y+72Zu7ojjrizHoUNQ
 wTjGR8Jsf22ZrFi+B0MXL78oumMLTnjxCv1426+P+0zVclJAJZxS/7K+VhD4cG1q
 FG2zAphly+vuB248XSyzYxM8vgCVNAkLoUb2AAw1pdQpUzNaAEoTcAXIR7PJDord
 wZnmvw==
 =Fsyn
 -----END PGP SIGNATURE-----

Merge tag 'pull-tcg-20231212' of https://gitlab.com/rth7680/qemu into staging

target/i386: Fix 32-bit wrapping of pc/eip computation (#2022)
tcg: Reduce serial context atomicity earlier (#2034)

# -----BEGIN PGP SIGNATURE-----
#
# iQFRBAABCgA7FiEEekgeeIaLTbaoWgXAZN846K9+IV8FAmV41IEdHHJpY2hhcmQu
# aGVuZGVyc29uQGxpbmFyby5vcmcACgkQZN846K9+IV+0DwgApqX4Ntaz1/eIbEmr
# sWTGlG7sQX28JrYm+Bd4MgtlE2+i06Vs3q1ZHThuZs9S6tQf8bcm1q1m0qZ486jk
# hgQqSMPAOJv1U+QhTRy1kW3l8UmZkw9YddfV5FjBHeuRWglVeSxDtqkc4fUffthb
# 82KvYIqo836HsYOOWtJqSuWVi60+q1RqYg+WZuygUmprf8Y+72Zu7ojjrizHoUNQ
# wTjGR8Jsf22ZrFi+B0MXL78oumMLTnjxCv1426+P+0zVclJAJZxS/7K+VhD4cG1q
# FG2zAphly+vuB248XSyzYxM8vgCVNAkLoUb2AAw1pdQpUzNaAEoTcAXIR7PJDord
# wZnmvw==
# =Fsyn
# -----END PGP SIGNATURE-----
# gpg: Signature made Tue 12 Dec 2023 16:45:37 EST
# gpg:                using RSA key 7A481E78868B4DB6A85A05C064DF38E8AF7E215F
# gpg:                issuer "richard.henderson@linaro.org"
# gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>" [full]
# Primary key fingerprint: 7A48 1E78 868B 4DB6 A85A  05C0 64DF 38E8 AF7E 215F

* tag 'pull-tcg-20231212' of https://gitlab.com/rth7680/qemu:
  tcg: Reduce serial context atomicity earlier
  target/i386: Fix 32-bit wrapping of pc/eip computation

Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
Stefan Hajnoczi 2023-12-12 16:54:28 -05:00
commit 4fd8a95437
5 changed files with 58 additions and 22 deletions

View File

@ -2324,10 +2324,15 @@ static inline int cpu_mmu_index_kernel(CPUX86State *env)
static inline void cpu_get_tb_cpu_state(CPUX86State *env, vaddr *pc,
uint64_t *cs_base, uint32_t *flags)
{
*cs_base = env->segs[R_CS].base;
*pc = *cs_base + env->eip;
*flags = env->hflags |
(env->eflags & (IOPL_MASK | TF_MASK | RF_MASK | VM_MASK | AC_MASK));
if (env->hflags & HF_CS64_MASK) {
*cs_base = 0;
*pc = env->eip;
} else {
*cs_base = env->segs[R_CS].base;
*pc = (uint32_t)(*cs_base + env->eip);
}
}
void do_cpu_init(X86CPU *cpu);

View File

@ -52,7 +52,12 @@ static void x86_cpu_synchronize_from_tb(CPUState *cs,
/* The instruction pointer is always up to date with CF_PCREL. */
if (!(tb_cflags(tb) & CF_PCREL)) {
CPUX86State *env = cpu_env(cs);
env->eip = tb->pc - tb->cs_base;
if (tb->flags & HF_CS64_MASK) {
env->eip = tb->pc;
} else {
env->eip = (uint32_t)(tb->pc - tb->cs_base);
}
}
}
@ -66,8 +71,10 @@ static void x86_restore_state_to_opc(CPUState *cs,
if (tb_cflags(tb) & CF_PCREL) {
env->eip = (env->eip & TARGET_PAGE_MASK) | data[0];
} else if (tb->flags & HF_CS64_MASK) {
env->eip = data[0];
} else {
env->eip = data[0] - tb->cs_base;
env->eip = (uint32_t)(data[0] - tb->cs_base);
}
if (cc_op != CC_OP_DYNAMIC) {
env->cc_op = cc_op;

View File

@ -552,8 +552,10 @@ static void gen_update_eip_cur(DisasContext *s)
assert(s->pc_save != -1);
if (tb_cflags(s->base.tb) & CF_PCREL) {
tcg_gen_addi_tl(cpu_eip, cpu_eip, s->base.pc_next - s->pc_save);
} else if (CODE64(s)) {
tcg_gen_movi_tl(cpu_eip, s->base.pc_next);
} else {
tcg_gen_movi_tl(cpu_eip, s->base.pc_next - s->cs_base);
tcg_gen_movi_tl(cpu_eip, (uint32_t)(s->base.pc_next - s->cs_base));
}
s->pc_save = s->base.pc_next;
}
@ -563,8 +565,10 @@ static void gen_update_eip_next(DisasContext *s)
assert(s->pc_save != -1);
if (tb_cflags(s->base.tb) & CF_PCREL) {
tcg_gen_addi_tl(cpu_eip, cpu_eip, s->pc - s->pc_save);
} else if (CODE64(s)) {
tcg_gen_movi_tl(cpu_eip, s->base.pc_next);
} else {
tcg_gen_movi_tl(cpu_eip, s->pc - s->cs_base);
tcg_gen_movi_tl(cpu_eip, (uint32_t)(s->base.pc_next - s->cs_base));
}
s->pc_save = s->pc;
}
@ -610,8 +614,10 @@ static TCGv eip_next_tl(DisasContext *s)
TCGv ret = tcg_temp_new();
tcg_gen_addi_tl(ret, cpu_eip, s->pc - s->pc_save);
return ret;
} else if (CODE64(s)) {
return tcg_constant_tl(s->pc);
} else {
return tcg_constant_tl(s->pc - s->cs_base);
return tcg_constant_tl((uint32_t)(s->pc - s->cs_base));
}
}
@ -622,8 +628,10 @@ static TCGv eip_cur_tl(DisasContext *s)
TCGv ret = tcg_temp_new();
tcg_gen_addi_tl(ret, cpu_eip, s->base.pc_next - s->pc_save);
return ret;
} else if (CODE64(s)) {
return tcg_constant_tl(s->base.pc_next);
} else {
return tcg_constant_tl(s->base.pc_next - s->cs_base);
return tcg_constant_tl((uint32_t)(s->base.pc_next - s->cs_base));
}
}
@ -2837,6 +2845,10 @@ static void gen_jmp_rel(DisasContext *s, MemOp ot, int diff, int tb_num)
}
}
new_eip &= mask;
new_pc = new_eip + s->cs_base;
if (!CODE64(s)) {
new_pc = (uint32_t)new_pc;
}
gen_update_cc_op(s);
set_cc_op(s, CC_OP_DYNAMIC);
@ -2854,8 +2866,7 @@ static void gen_jmp_rel(DisasContext *s, MemOp ot, int diff, int tb_num)
}
}
if (use_goto_tb &&
translator_use_goto_tb(&s->base, new_eip + s->cs_base)) {
if (use_goto_tb && translator_use_goto_tb(&s->base, new_pc)) {
/* jump to same page: we can use a direct jump */
tcg_gen_goto_tb(tb_num);
if (!(tb_cflags(s->base.tb) & CF_PCREL)) {

View File

@ -77,6 +77,13 @@ static MemOp tcg_canonicalize_memop(MemOp op, bool is64, bool st)
if (st) {
op &= ~MO_SIGN;
}
/* In serial mode, reduce atomicity. */
if (!(tcg_ctx->gen_tb->cflags & CF_PARALLEL)) {
op &= ~MO_ATOM_MASK;
op |= MO_ATOM_NONE;
}
return op;
}
@ -428,8 +435,7 @@ static bool use_two_i64_for_i128(MemOp mop)
case MO_ATOM_SUBALIGN:
case MO_ATOM_WITHIN16:
case MO_ATOM_WITHIN16_PAIR:
/* In a serialized context, no atomicity is required. */
return !(tcg_ctx->gen_tb->cflags & CF_PARALLEL);
return false;
default:
g_assert_not_reached();
}
@ -499,13 +505,20 @@ static void maybe_free_addr64(TCGv_i64 a64)
static void tcg_gen_qemu_ld_i128_int(TCGv_i128 val, TCGTemp *addr,
TCGArg idx, MemOp memop)
{
const MemOpIdx orig_oi = make_memop_idx(memop, idx);
MemOpIdx orig_oi;
TCGv_i64 ext_addr = NULL;
TCGOpcode opc;
check_max_alignment(get_alignment_bits(memop));
tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
/* In serial mode, reduce atomicity. */
if (!(tcg_ctx->gen_tb->cflags & CF_PARALLEL)) {
memop &= ~MO_ATOM_MASK;
memop |= MO_ATOM_NONE;
}
orig_oi = make_memop_idx(memop, idx);
/* TODO: For now, force 32-bit hosts to use the helper. */
if (TCG_TARGET_HAS_qemu_ldst_i128 && TCG_TARGET_REG_BITS == 64) {
TCGv_i64 lo, hi;
@ -608,13 +621,20 @@ void tcg_gen_qemu_ld_i128_chk(TCGv_i128 val, TCGTemp *addr, TCGArg idx,
static void tcg_gen_qemu_st_i128_int(TCGv_i128 val, TCGTemp *addr,
TCGArg idx, MemOp memop)
{
const MemOpIdx orig_oi = make_memop_idx(memop, idx);
MemOpIdx orig_oi;
TCGv_i64 ext_addr = NULL;
TCGOpcode opc;
check_max_alignment(get_alignment_bits(memop));
tcg_gen_req_mo(TCG_MO_ST_LD | TCG_MO_ST_ST);
/* In serial mode, reduce atomicity. */
if (!(tcg_ctx->gen_tb->cflags & CF_PARALLEL)) {
memop &= ~MO_ATOM_MASK;
memop |= MO_ATOM_NONE;
}
orig_oi = make_memop_idx(memop, idx);
/* TODO: For now, force 32-bit hosts to use the helper. */
if (TCG_TARGET_HAS_qemu_ldst_i128 && TCG_TARGET_REG_BITS == 64) {

View File

@ -5440,15 +5440,8 @@ static TCGAtomAlign atom_and_align_for_opc(TCGContext *s, MemOp opc,
MemOp align = get_alignment_bits(opc);
MemOp size = opc & MO_SIZE;
MemOp half = size ? size - 1 : 0;
MemOp atom = opc & MO_ATOM_MASK;
MemOp atmax;
MemOp atom;
/* When serialized, no further atomicity required. */
if (s->gen_tb->cflags & CF_PARALLEL) {
atom = opc & MO_ATOM_MASK;
} else {
atom = MO_ATOM_NONE;
}
switch (atom) {
case MO_ATOM_NONE: