target/i386: fix cmpxchg with 32-bit register destination

Unlike the memory case, where "the destination operand receives a write
cycle without regard to the result of the comparison", rm must not be
touched altogether if the write fails, including not zero-extending
it on 64-bit processors.  This is not how the movcond currently works,
because it is always followed by a gen_op_mov_reg_v to rm.

To fix it, introduce a new function that is similar to gen_op_mov_reg_v
but writes to a TCG temporary.

Considering that gen_extu(ot, oldv) is not needed in the memory case
either, the two cases for register and memory destinations are different
enough that one might as well fuse the two "if (mod == 3)" into one.
So do that too.

Resolves: https://gitlab.com/qemu-project/qemu/-/issues/508
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
[rth: Add a test case ]
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Paolo Bonzini 2022-09-11 14:04:36 +02:00 committed by Richard Henderson
parent 98f10f0e26
commit d1bb978ba1
3 changed files with 99 additions and 26 deletions

View File

@ -439,32 +439,51 @@ static inline MemOp mo_b_d32(int b, MemOp ot)
return b & 1 ? (ot == MO_16 ? MO_16 : MO_32) : MO_8; return b & 1 ? (ot == MO_16 ? MO_16 : MO_32) : MO_8;
} }
static void gen_op_mov_reg_v(DisasContext *s, MemOp ot, int reg, TCGv t0) /* Compute the result of writing t0 to the OT-sized register REG.
*
* If DEST is NULL, store the result into the register and return the
* register's TCGv.
*
* If DEST is not NULL, store the result into DEST and return the
* register's TCGv.
*/
static TCGv gen_op_deposit_reg_v(DisasContext *s, MemOp ot, int reg, TCGv dest, TCGv t0)
{ {
switch(ot) { switch(ot) {
case MO_8: case MO_8:
if (!byte_reg_is_xH(s, reg)) { if (byte_reg_is_xH(s, reg)) {
tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], t0, 0, 8); dest = dest ? dest : cpu_regs[reg - 4];
} else { tcg_gen_deposit_tl(dest, cpu_regs[reg - 4], t0, 8, 8);
tcg_gen_deposit_tl(cpu_regs[reg - 4], cpu_regs[reg - 4], t0, 8, 8); return cpu_regs[reg - 4];
} }
dest = dest ? dest : cpu_regs[reg];
tcg_gen_deposit_tl(dest, cpu_regs[reg], t0, 0, 8);
break; break;
case MO_16: case MO_16:
tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], t0, 0, 16); dest = dest ? dest : cpu_regs[reg];
tcg_gen_deposit_tl(dest, cpu_regs[reg], t0, 0, 16);
break; break;
case MO_32: case MO_32:
/* For x86_64, this sets the higher half of register to zero. /* For x86_64, this sets the higher half of register to zero.
For i386, this is equivalent to a mov. */ For i386, this is equivalent to a mov. */
tcg_gen_ext32u_tl(cpu_regs[reg], t0); dest = dest ? dest : cpu_regs[reg];
tcg_gen_ext32u_tl(dest, t0);
break; break;
#ifdef TARGET_X86_64 #ifdef TARGET_X86_64
case MO_64: case MO_64:
tcg_gen_mov_tl(cpu_regs[reg], t0); dest = dest ? dest : cpu_regs[reg];
tcg_gen_mov_tl(dest, t0);
break; break;
#endif #endif
default: default:
tcg_abort(); tcg_abort();
} }
return cpu_regs[reg];
}
static void gen_op_mov_reg_v(DisasContext *s, MemOp ot, int reg, TCGv t0)
{
gen_op_deposit_reg_v(s, ot, reg, NULL, t0);
} }
static inline static inline
@ -3747,7 +3766,7 @@ static bool disas_insn(DisasContext *s, CPUState *cpu)
case 0x1b0: case 0x1b0:
case 0x1b1: /* cmpxchg Ev, Gv */ case 0x1b1: /* cmpxchg Ev, Gv */
{ {
TCGv oldv, newv, cmpv; TCGv oldv, newv, cmpv, dest;
ot = mo_b_d(b, dflag); ot = mo_b_d(b, dflag);
modrm = x86_ldub_code(env, s); modrm = x86_ldub_code(env, s);
@ -3758,7 +3777,7 @@ static bool disas_insn(DisasContext *s, CPUState *cpu)
cmpv = tcg_temp_new(); cmpv = tcg_temp_new();
gen_op_mov_v_reg(s, ot, newv, reg); gen_op_mov_v_reg(s, ot, newv, reg);
tcg_gen_mov_tl(cmpv, cpu_regs[R_EAX]); tcg_gen_mov_tl(cmpv, cpu_regs[R_EAX]);
gen_extu(ot, cmpv);
if (s->prefix & PREFIX_LOCK) { if (s->prefix & PREFIX_LOCK) {
if (mod == 3) { if (mod == 3) {
goto illegal_op; goto illegal_op;
@ -3766,32 +3785,43 @@ static bool disas_insn(DisasContext *s, CPUState *cpu)
gen_lea_modrm(env, s, modrm); gen_lea_modrm(env, s, modrm);
tcg_gen_atomic_cmpxchg_tl(oldv, s->A0, cmpv, newv, tcg_gen_atomic_cmpxchg_tl(oldv, s->A0, cmpv, newv,
s->mem_index, ot | MO_LE); s->mem_index, ot | MO_LE);
gen_op_mov_reg_v(s, ot, R_EAX, oldv);
} else { } else {
if (mod == 3) { if (mod == 3) {
rm = (modrm & 7) | REX_B(s); rm = (modrm & 7) | REX_B(s);
gen_op_mov_v_reg(s, ot, oldv, rm); gen_op_mov_v_reg(s, ot, oldv, rm);
gen_extu(ot, oldv);
/*
* Unlike the memory case, where "the destination operand receives
* a write cycle without regard to the result of the comparison",
* rm must not be touched altogether if the write fails, including
* not zero-extending it on 64-bit processors. So, precompute
* the result of a successful writeback and perform the movcond
* directly on cpu_regs. Also need to write accumulator first, in
* case rm is part of RAX too.
*/
dest = gen_op_deposit_reg_v(s, ot, rm, newv, newv);
tcg_gen_movcond_tl(TCG_COND_EQ, dest, oldv, cmpv, newv, dest);
} else { } else {
gen_lea_modrm(env, s, modrm); gen_lea_modrm(env, s, modrm);
gen_op_ld_v(s, ot, oldv, s->A0); gen_op_ld_v(s, ot, oldv, s->A0);
rm = 0; /* avoid warning */
} /*
gen_extu(ot, oldv); * Perform an unconditional store cycle like physical cpu;
gen_extu(ot, cmpv); * must be before changing accumulator to ensure
/* store value = (old == cmp ? new : old); */ * idempotency if the store faults and the instruction
tcg_gen_movcond_tl(TCG_COND_EQ, newv, oldv, cmpv, newv, oldv); * is restarted
if (mod == 3) { */
gen_op_mov_reg_v(s, ot, R_EAX, oldv); tcg_gen_movcond_tl(TCG_COND_EQ, newv, oldv, cmpv, newv, oldv);
gen_op_mov_reg_v(s, ot, rm, newv);
} else {
/* Perform an unconditional store cycle like physical cpu;
must be before changing accumulator to ensure
idempotency if the store faults and the instruction
is restarted */
gen_op_st_v(s, ot, newv, s->A0); gen_op_st_v(s, ot, newv, s->A0);
gen_op_mov_reg_v(s, ot, R_EAX, oldv);
} }
} }
/*
* Write EAX only if the cmpxchg fails; reuse newv as the destination,
* since it's dead here.
*/
dest = gen_op_deposit_reg_v(s, ot, R_EAX, newv, oldv);
tcg_gen_movcond_tl(TCG_COND_EQ, dest, oldv, cmpv, dest, newv);
tcg_gen_mov_tl(cpu_cc_src, oldv); tcg_gen_mov_tl(cpu_cc_src, oldv);
tcg_gen_mov_tl(s->cc_srcT, cmpv); tcg_gen_mov_tl(s->cc_srcT, cmpv);
tcg_gen_sub_tl(cpu_cc_dst, cmpv, oldv); tcg_gen_sub_tl(cpu_cc_dst, cmpv, oldv);

View File

@ -11,6 +11,7 @@ include $(SRC_PATH)/tests/tcg/i386/Makefile.target
ifeq ($(filter %-linux-user, $(TARGET)),$(TARGET)) ifeq ($(filter %-linux-user, $(TARGET)),$(TARGET))
X86_64_TESTS += vsyscall X86_64_TESTS += vsyscall
X86_64_TESTS += noexec X86_64_TESTS += noexec
X86_64_TESTS += cmpxchg
TESTS=$(MULTIARCH_TESTS) $(X86_64_TESTS) test-x86_64 TESTS=$(MULTIARCH_TESTS) $(X86_64_TESTS) test-x86_64
else else
TESTS=$(MULTIARCH_TESTS) TESTS=$(MULTIARCH_TESTS)

View File

@ -0,0 +1,42 @@
#include <assert.h>
static int mem;
static unsigned long test_cmpxchgb(unsigned long orig)
{
unsigned long ret;
mem = orig;
asm("cmpxchgb %b[cmp],%[mem]"
: [ mem ] "+m"(mem), [ rax ] "=a"(ret)
: [ cmp ] "r"(0x77), "a"(orig));
return ret;
}
static unsigned long test_cmpxchgw(unsigned long orig)
{
unsigned long ret;
mem = orig;
asm("cmpxchgw %w[cmp],%[mem]"
: [ mem ] "+m"(mem), [ rax ] "=a"(ret)
: [ cmp ] "r"(0x7777), "a"(orig));
return ret;
}
static unsigned long test_cmpxchgl(unsigned long orig)
{
unsigned long ret;
mem = orig;
asm("cmpxchgl %[cmp],%[mem]"
: [ mem ] "+m"(mem), [ rax ] "=a"(ret)
: [ cmp ] "r"(0x77777777u), "a"(orig));
return ret;
}
int main()
{
unsigned long test = 0xdeadbeef12345678ull;
assert(test == test_cmpxchgb(test));
assert(test == test_cmpxchgw(test));
assert(test == test_cmpxchgl(test));
return 0;
}