target/i386: Raise #GP on unaligned m128 accesses when required.
Many instructions which load/store 128-bit values are supposed to raise #GP when the memory operand isn't 16-byte aligned. This includes: - Instructions explicitly requiring memory alignment (Exceptions Type 1 in the "AVX and SSE Instruction Exception Specification" section of the SDM) - Legacy SSE instructions that load/store 128-bit values (Exceptions Types 2 and 4). This change sets MO_ALIGN_16 on 128-bit memory accesses that require 16-byte alignment. It adds cpu_record_sigbus and cpu_do_unaligned_access hooks that simulate a #GP exception in qemu-user and qemu-system, respectively. Resolves: https://gitlab.com/qemu-project/qemu/-/issues/217 Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Signed-off-by: Ricky Zhou <ricky@rzhou.org> Message-Id: <20220830034816.57091-2-ricky@rzhou.org> [Do not bother checking PREFIX_VEX, since AVX is not supported. - Paolo] Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
52281c6d11
commit
958e1dd130
@ -140,3 +140,16 @@ G_NORETURN void raise_exception_ra(CPUX86State *env, int exception_index,
|
||||
{
|
||||
raise_interrupt2(env, exception_index, 0, 0, 0, retaddr);
|
||||
}
|
||||
|
||||
G_NORETURN void handle_unaligned_access(CPUX86State *env, vaddr vaddr,
|
||||
MMUAccessType access_type,
|
||||
uintptr_t retaddr)
|
||||
{
|
||||
/*
|
||||
* Unaligned accesses are currently only triggered by SSE/AVX
|
||||
* instructions that impose alignment requirements on memory
|
||||
* operands. These instructions raise #GP(0) upon accessing an
|
||||
* unaligned address.
|
||||
*/
|
||||
raise_exception_ra(env, EXCP0D_GPF, retaddr);
|
||||
}
|
||||
|
@ -42,17 +42,6 @@ void x86_cpu_do_interrupt(CPUState *cpu);
|
||||
bool x86_cpu_exec_interrupt(CPUState *cpu, int int_req);
|
||||
#endif
|
||||
|
||||
/* helper.c */
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
void x86_cpu_record_sigsegv(CPUState *cs, vaddr addr,
|
||||
MMUAccessType access_type,
|
||||
bool maperr, uintptr_t ra);
|
||||
#else
|
||||
bool x86_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
|
||||
MMUAccessType access_type, int mmu_idx,
|
||||
bool probe, uintptr_t retaddr);
|
||||
#endif
|
||||
|
||||
void breakpoint_handler(CPUState *cs);
|
||||
|
||||
/* n must be a constant to be efficient */
|
||||
@ -78,6 +67,23 @@ G_NORETURN void raise_exception_err_ra(CPUX86State *env, int exception_index,
|
||||
int error_code, uintptr_t retaddr);
|
||||
G_NORETURN void raise_interrupt(CPUX86State *nenv, int intno, int is_int,
|
||||
int error_code, int next_eip_addend);
|
||||
G_NORETURN void handle_unaligned_access(CPUX86State *env, vaddr vaddr,
|
||||
MMUAccessType access_type,
|
||||
uintptr_t retaddr);
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
void x86_cpu_record_sigsegv(CPUState *cs, vaddr addr,
|
||||
MMUAccessType access_type,
|
||||
bool maperr, uintptr_t ra);
|
||||
void x86_cpu_record_sigbus(CPUState *cs, vaddr addr,
|
||||
MMUAccessType access_type, uintptr_t ra);
|
||||
#else
|
||||
bool x86_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
|
||||
MMUAccessType access_type, int mmu_idx,
|
||||
bool probe, uintptr_t retaddr);
|
||||
G_NORETURN void x86_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
|
||||
MMUAccessType access_type,
|
||||
int mmu_idx, uintptr_t retaddr);
|
||||
#endif
|
||||
|
||||
/* cc_helper.c */
|
||||
extern const uint8_t parity_table[256];
|
||||
|
@ -439,3 +439,11 @@ bool x86_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
G_NORETURN void x86_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
|
||||
MMUAccessType access_type,
|
||||
int mmu_idx, uintptr_t retaddr)
|
||||
{
|
||||
X86CPU *cpu = X86_CPU(cs);
|
||||
handle_unaligned_access(&cpu->env, vaddr, access_type, retaddr);
|
||||
}
|
||||
|
@ -75,10 +75,12 @@ static const struct TCGCPUOps x86_tcg_ops = {
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
.fake_user_interrupt = x86_cpu_do_interrupt,
|
||||
.record_sigsegv = x86_cpu_record_sigsegv,
|
||||
.record_sigbus = x86_cpu_record_sigbus,
|
||||
#else
|
||||
.tlb_fill = x86_cpu_tlb_fill,
|
||||
.do_interrupt = x86_cpu_do_interrupt,
|
||||
.cpu_exec_interrupt = x86_cpu_exec_interrupt,
|
||||
.do_unaligned_access = x86_cpu_do_unaligned_access,
|
||||
.debug_excp_handler = breakpoint_handler,
|
||||
.debug_check_breakpoint = x86_debug_check_breakpoint,
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
|
@ -2738,21 +2738,23 @@ static inline void gen_stq_env_A0(DisasContext *s, int offset)
|
||||
tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ);
|
||||
}
|
||||
|
||||
static inline void gen_ldo_env_A0(DisasContext *s, int offset)
|
||||
static inline void gen_ldo_env_A0(DisasContext *s, int offset, bool align)
|
||||
{
|
||||
int mem_index = s->mem_index;
|
||||
tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, mem_index, MO_LEUQ);
|
||||
tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, mem_index,
|
||||
MO_LEUQ | (align ? MO_ALIGN_16 : 0));
|
||||
tcg_gen_st_i64(s->tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(0)));
|
||||
tcg_gen_addi_tl(s->tmp0, s->A0, 8);
|
||||
tcg_gen_qemu_ld_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ);
|
||||
tcg_gen_st_i64(s->tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(1)));
|
||||
}
|
||||
|
||||
static inline void gen_sto_env_A0(DisasContext *s, int offset)
|
||||
static inline void gen_sto_env_A0(DisasContext *s, int offset, bool align)
|
||||
{
|
||||
int mem_index = s->mem_index;
|
||||
tcg_gen_ld_i64(s->tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(0)));
|
||||
tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, mem_index, MO_LEUQ);
|
||||
tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, mem_index,
|
||||
MO_LEUQ | (align ? MO_ALIGN_16 : 0));
|
||||
tcg_gen_addi_tl(s->tmp0, s->A0, 8);
|
||||
tcg_gen_ld_i64(s->tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(1)));
|
||||
tcg_gen_qemu_st_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ);
|
||||
@ -3131,7 +3133,7 @@ static const struct SSEOpHelper_table6 sse_op_table6[256] = {
|
||||
[0x25] = UNARY_OP(pmovsxdq, SSE41, SSE_OPF_MMX),
|
||||
[0x28] = BINARY_OP(pmuldq, SSE41, SSE_OPF_MMX),
|
||||
[0x29] = BINARY_OP(pcmpeqq, SSE41, SSE_OPF_MMX),
|
||||
[0x2a] = SPECIAL_OP(SSE41), /* movntqda */
|
||||
[0x2a] = SPECIAL_OP(SSE41), /* movntdqa */
|
||||
[0x2b] = BINARY_OP(packusdw, SSE41, SSE_OPF_MMX),
|
||||
[0x30] = UNARY_OP(pmovzxbw, SSE41, SSE_OPF_MMX),
|
||||
[0x31] = UNARY_OP(pmovzxbd, SSE41, SSE_OPF_MMX),
|
||||
@ -3294,17 +3296,17 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
|
||||
break;
|
||||
case 0x1e7: /* movntdq */
|
||||
case 0x02b: /* movntps */
|
||||
case 0x12b: /* movntps */
|
||||
case 0x12b: /* movntpd */
|
||||
if (mod == 3)
|
||||
goto illegal_op;
|
||||
gen_lea_modrm(env, s, modrm);
|
||||
gen_sto_env_A0(s, ZMM_OFFSET(reg));
|
||||
gen_sto_env_A0(s, ZMM_OFFSET(reg), true);
|
||||
break;
|
||||
case 0x3f0: /* lddqu */
|
||||
if (mod == 3)
|
||||
goto illegal_op;
|
||||
gen_lea_modrm(env, s, modrm);
|
||||
gen_ldo_env_A0(s, ZMM_OFFSET(reg));
|
||||
gen_ldo_env_A0(s, ZMM_OFFSET(reg), false);
|
||||
break;
|
||||
case 0x22b: /* movntss */
|
||||
case 0x32b: /* movntsd */
|
||||
@ -3373,7 +3375,9 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
|
||||
case 0x26f: /* movdqu xmm, ea */
|
||||
if (mod != 3) {
|
||||
gen_lea_modrm(env, s, modrm);
|
||||
gen_ldo_env_A0(s, ZMM_OFFSET(reg));
|
||||
gen_ldo_env_A0(s, ZMM_OFFSET(reg),
|
||||
/* movaps, movapd, movdqa */
|
||||
b == 0x028 || b == 0x128 || b == 0x16f);
|
||||
} else {
|
||||
rm = (modrm & 7) | REX_B(s);
|
||||
gen_op_movo(s, ZMM_OFFSET(reg), ZMM_OFFSET(rm));
|
||||
@ -3432,7 +3436,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
|
||||
case 0x212: /* movsldup */
|
||||
if (mod != 3) {
|
||||
gen_lea_modrm(env, s, modrm);
|
||||
gen_ldo_env_A0(s, ZMM_OFFSET(reg));
|
||||
gen_ldo_env_A0(s, ZMM_OFFSET(reg), true);
|
||||
} else {
|
||||
rm = (modrm & 7) | REX_B(s);
|
||||
gen_op_movl(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_L(0)),
|
||||
@ -3474,7 +3478,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
|
||||
case 0x216: /* movshdup */
|
||||
if (mod != 3) {
|
||||
gen_lea_modrm(env, s, modrm);
|
||||
gen_ldo_env_A0(s, ZMM_OFFSET(reg));
|
||||
gen_ldo_env_A0(s, ZMM_OFFSET(reg), true);
|
||||
} else {
|
||||
rm = (modrm & 7) | REX_B(s);
|
||||
gen_op_movl(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_L(1)),
|
||||
@ -3568,7 +3572,9 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
|
||||
case 0x27f: /* movdqu ea, xmm */
|
||||
if (mod != 3) {
|
||||
gen_lea_modrm(env, s, modrm);
|
||||
gen_sto_env_A0(s, ZMM_OFFSET(reg));
|
||||
gen_sto_env_A0(s, ZMM_OFFSET(reg),
|
||||
/* movaps, movapd, movdqa */
|
||||
b == 0x029 || b == 0x129 || b == 0x17f);
|
||||
} else {
|
||||
rm = (modrm & 7) | REX_B(s);
|
||||
gen_op_movo(s, ZMM_OFFSET(rm), ZMM_OFFSET(reg));
|
||||
@ -3724,7 +3730,8 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
|
||||
if (mod != 3) {
|
||||
gen_lea_modrm(env, s, modrm);
|
||||
op2_offset = offsetof(CPUX86State,xmm_t0);
|
||||
gen_ldo_env_A0(s, op2_offset);
|
||||
/* FIXME: should be 64-bit access if b1 == 0. */
|
||||
gen_ldo_env_A0(s, op2_offset, !!b1);
|
||||
} else {
|
||||
rm = (modrm & 7) | REX_B(s);
|
||||
op2_offset = ZMM_OFFSET(rm);
|
||||
@ -3913,11 +3920,11 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
|
||||
tcg_gen_st16_tl(s->tmp0, cpu_env, op2_offset +
|
||||
offsetof(ZMMReg, ZMM_W(0)));
|
||||
break;
|
||||
case 0x2a: /* movntqda */
|
||||
gen_ldo_env_A0(s, op1_offset);
|
||||
case 0x2a: /* movntdqa */
|
||||
gen_ldo_env_A0(s, op1_offset, true);
|
||||
return;
|
||||
default:
|
||||
gen_ldo_env_A0(s, op2_offset);
|
||||
gen_ldo_env_A0(s, op2_offset, true);
|
||||
}
|
||||
}
|
||||
if (!op6->fn[b1].op1) {
|
||||
@ -4499,7 +4506,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
|
||||
} else {
|
||||
op2_offset = offsetof(CPUX86State, xmm_t0);
|
||||
gen_lea_modrm(env, s, modrm);
|
||||
gen_ldo_env_A0(s, op2_offset);
|
||||
gen_ldo_env_A0(s, op2_offset, true);
|
||||
}
|
||||
|
||||
val = x86_ldub_code(env, s);
|
||||
@ -4606,7 +4613,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
|
||||
break;
|
||||
default:
|
||||
/* 128 bit access */
|
||||
gen_ldo_env_A0(s, op2_offset);
|
||||
gen_ldo_env_A0(s, op2_offset, true);
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
|
@ -48,3 +48,10 @@ void x86_cpu_record_sigsegv(CPUState *cs, vaddr addr,
|
||||
|
||||
cpu_loop_exit_restore(cs, ra);
|
||||
}
|
||||
|
||||
void x86_cpu_record_sigbus(CPUState *cs, vaddr addr,
|
||||
MMUAccessType access_type, uintptr_t ra)
|
||||
{
|
||||
X86CPU *cpu = X86_CPU(cs);
|
||||
handle_unaligned_access(&cpu->env, addr, access_type, ra);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user