target-i386: introduce CCPrepare

Introduce a struct that describes how to build a *cond operation
that checks for a given x86 condition code.  For now, just change
gen_compute_eflags_* to return the new struct, generate code for
the CCPrepare struct, and go on as before.

[rth: Use ctz with the proper width rather than ffs.]

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Richard Henderson <rth@twiddle.net>
This commit is contained in:
Richard Henderson 2013-01-23 14:21:52 -08:00
parent c365395e9b
commit bec93d7283

View File

@ -23,6 +23,7 @@
#include <inttypes.h>
#include <signal.h>
#include "qemu/host-utils.h"
#include "cpu.h"
#include "disas/disas.h"
#include "tcg-op.h"
@ -47,6 +48,14 @@
#define REX_B(s) 0
#endif
#ifdef TARGET_X86_64
# define ctztl ctz64
# define clztl clz64
#else
# define ctztl ctz32
# define clztl clz32
#endif
//#define MACRO_TEST 1
/* global register indexes */
@ -881,11 +890,21 @@ static void gen_compute_eflags(DisasContext *s)
tcg_gen_extu_i32_tl(cpu_cc_src, cpu_tmp2_i32);
}
typedef struct CCPrepare {
TCGCond cond;
TCGv reg;
TCGv reg2;
target_ulong imm;
target_ulong mask;
bool use_reg2;
bool no_setcond;
} CCPrepare;
/* compute eflags.C to reg */
static void gen_compute_eflags_c(DisasContext *s, TCGv reg, bool inv)
static CCPrepare gen_prepare_eflags_c(DisasContext *s, TCGv reg)
{
TCGv t0, t1;
int size;
int size, shift;
switch (s->cc_op) {
case CC_OP_SUBB ... CC_OP_SUBQ:
@ -904,9 +923,8 @@ static void gen_compute_eflags_c(DisasContext *s, TCGv reg, bool inv)
t1 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
add_sub:
tcg_gen_setcond_tl(inv ? TCG_COND_GEU : TCG_COND_LTU, reg, t0, t1);
inv = false;
break;
return (CCPrepare) { .cond = TCG_COND_LTU, .reg = t0,
.reg2 = t1, .mask = -1, .use_reg2 = true };
case CC_OP_SBBB ... CC_OP_SBBQ:
/* (DATA_TYPE)(CC_DST + CC_SRC + 1) <= (DATA_TYPE)CC_SRC */
@ -929,42 +947,33 @@ static void gen_compute_eflags_c(DisasContext *s, TCGv reg, bool inv)
t1 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
adc_sbb:
tcg_gen_setcond_tl(inv ? TCG_COND_GTU : TCG_COND_LEU, reg, t0, t1);
inv = false;
break;
return (CCPrepare) { .cond = TCG_COND_LEU, .reg = t0,
.reg2 = t1, .mask = -1, .use_reg2 = true };
case CC_OP_LOGICB ... CC_OP_LOGICQ:
tcg_gen_movi_tl(reg, 0);
break;
return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
case CC_OP_INCB ... CC_OP_INCQ:
case CC_OP_DECB ... CC_OP_DECQ:
if (inv) {
tcg_gen_xori_tl(reg, cpu_cc_src, 1);
} else {
tcg_gen_mov_tl(reg, cpu_cc_src);
}
inv = false;
break;
return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
.mask = -1, .no_setcond = true };
case CC_OP_SHLB ... CC_OP_SHLQ:
/* (CC_SRC >> (DATA_BITS - 1)) & 1 */
size = s->cc_op - CC_OP_SHLB;
tcg_gen_shri_tl(reg, cpu_cc_src, (8 << size) - 1);
tcg_gen_andi_tl(reg, reg, 1);
break;
shift = (8 << size) - 1;
return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
.mask = (target_ulong)1 << shift };
case CC_OP_MULB ... CC_OP_MULQ:
tcg_gen_setcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE,
reg, cpu_cc_src, 0);
inv = false;
break;
return (CCPrepare) { .cond = TCG_COND_NE,
.reg = cpu_cc_src, .mask = -1 };
case CC_OP_EFLAGS:
case CC_OP_SARB ... CC_OP_SARQ:
/* CC_SRC & 1 */
tcg_gen_andi_tl(reg, cpu_cc_src, 1);
break;
return (CCPrepare) { .cond = TCG_COND_NE,
.reg = cpu_cc_src, .mask = CC_C };
default:
/* The need to compute only C from CC_OP_DYNAMIC is important
@ -972,74 +981,104 @@ static void gen_compute_eflags_c(DisasContext *s, TCGv reg, bool inv)
gen_update_cc_op(s);
gen_helper_cc_compute_c(cpu_tmp2_i32, cpu_env, cpu_cc_op);
tcg_gen_extu_i32_tl(reg, cpu_tmp2_i32);
break;
}
if (inv) {
tcg_gen_xori_tl(reg, reg, 1);
return (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
.mask = -1, .no_setcond = true };
}
}
/* compute eflags.P to reg */
static void gen_compute_eflags_p(DisasContext *s, TCGv reg)
static CCPrepare gen_prepare_eflags_p(DisasContext *s, TCGv reg)
{
gen_compute_eflags(s);
tcg_gen_shri_tl(reg, cpu_cc_src, 2);
tcg_gen_andi_tl(reg, reg, 1);
return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
.mask = CC_P };
}
/* compute eflags.S to reg */
static void gen_compute_eflags_s(DisasContext *s, TCGv reg, bool inv)
static CCPrepare gen_prepare_eflags_s(DisasContext *s, TCGv reg)
{
switch (s->cc_op) {
case CC_OP_DYNAMIC:
gen_compute_eflags(s);
/* FALLTHRU */
case CC_OP_EFLAGS:
tcg_gen_shri_tl(reg, cpu_cc_src, 7);
tcg_gen_andi_tl(reg, reg, 1);
if (inv) {
tcg_gen_xori_tl(reg, reg, 1);
}
break;
return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
.mask = CC_S };
default:
{
int size = (s->cc_op - CC_OP_ADDB) & 3;
TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, true);
tcg_gen_setcondi_tl(inv ? TCG_COND_GE : TCG_COND_LT, reg, t0, 0);
return (CCPrepare) { .cond = TCG_COND_LT, .reg = t0, .mask = -1 };
}
break;
}
}
/* compute eflags.O to reg */
static void gen_compute_eflags_o(DisasContext *s, TCGv reg)
static CCPrepare gen_prepare_eflags_o(DisasContext *s, TCGv reg)
{
gen_compute_eflags(s);
tcg_gen_shri_tl(reg, cpu_cc_src, 11);
tcg_gen_andi_tl(reg, reg, 1);
return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
.mask = CC_O };
}
/* compute eflags.Z to reg */
static void gen_compute_eflags_z(DisasContext *s, TCGv reg, bool inv)
static CCPrepare gen_prepare_eflags_z(DisasContext *s, TCGv reg)
{
switch (s->cc_op) {
case CC_OP_DYNAMIC:
gen_compute_eflags(s);
/* FALLTHRU */
case CC_OP_EFLAGS:
tcg_gen_shri_tl(reg, cpu_cc_src, 6);
tcg_gen_andi_tl(reg, reg, 1);
if (inv) {
tcg_gen_xori_tl(reg, reg, 1);
}
break;
return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
.mask = CC_Z };
default:
{
int size = (s->cc_op - CC_OP_ADDB) & 3;
TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
tcg_gen_setcondi_tl(inv ? TCG_COND_NE : TCG_COND_EQ, reg, t0, 0);
return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 };
}
break;
}
}
#define gen_compute_eflags_c(s, reg, inv) \
gen_do_setcc(reg, gen_prepare_eflags_c(s, reg), inv)
#define gen_compute_eflags_p(s, reg) \
gen_do_setcc(reg, gen_prepare_eflags_p(s, reg), false)
#define gen_compute_eflags_s(s, reg, inv) \
gen_do_setcc(reg, gen_prepare_eflags_s(s, reg), inv)
#define gen_compute_eflags_o(s, reg) \
gen_do_setcc(reg, gen_prepare_eflags_o(s, reg), false)
#define gen_compute_eflags_z(s, reg, inv) \
gen_do_setcc(reg, gen_prepare_eflags_z(s, reg), inv)
static void gen_do_setcc(TCGv reg, struct CCPrepare cc, bool inv)
{
if (inv) {
cc.cond = tcg_invert_cond(cc.cond);
}
if (cc.no_setcond) {
if (cc.cond == TCG_COND_EQ) {
tcg_gen_xori_tl(reg, cc.reg, 1);
} else {
tcg_gen_mov_tl(reg, cc.reg);
}
return;
}
if (cc.cond == TCG_COND_NE && !cc.use_reg2 && cc.imm == 0 &&
cc.mask != 0 && (cc.mask & (cc.mask - 1)) == 0) {
tcg_gen_shri_tl(reg, cc.reg, ctztl(cc.mask));
tcg_gen_andi_tl(reg, reg, 1);
return;
}
if (cc.mask != -1) {
tcg_gen_andi_tl(reg, cc.reg, cc.mask);
}
if (cc.use_reg2) {
tcg_gen_setcond_tl(cc.cond, reg, cc.reg, cc.reg2);
} else {
tcg_gen_setcondi_tl(cc.cond, reg, cc.reg, cc.imm);
}
}