diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc index a83f8aab30..d268199fc1 100644 --- a/tcg/i386/tcg-target.c.inc +++ b/tcg/i386/tcg-target.c.inc @@ -244,6 +244,7 @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece) #define P_VEXL 0x80000 /* Set VEX.L = 1 */ #define P_EVEX 0x100000 /* Requires EVEX encoding */ +#define OPC_ARITH_EbIb (0x80) #define OPC_ARITH_EvIz (0x81) #define OPC_ARITH_EvIb (0x83) #define OPC_ARITH_GvEv (0x03) /* ... plus (ARITH_FOO << 3) */ @@ -1316,23 +1317,41 @@ static void tgen_arithi(TCGContext *s, int c, int r0, c &= 7; } - /* ??? While INC is 2 bytes shorter than ADDL $1, they also induce - partial flags update stalls on Pentium4 and are not recommended - by current Intel optimization manuals. */ - if (!cf && (c == ARITH_ADD || c == ARITH_SUB) && (val == 1 || val == -1)) { - int is_inc = (c == ARITH_ADD) ^ (val < 0); - if (TCG_TARGET_REG_BITS == 64) { - /* The single-byte increment encodings are re-tasked as the - REX prefixes. Use the MODRM encoding. */ - tcg_out_modrm(s, OPC_GRP5 + rexw, - (is_inc ? EXT5_INC_Ev : EXT5_DEC_Ev), r0); - } else { - tcg_out8(s, (is_inc ? OPC_INC_r32 : OPC_DEC_r32) + r0); + switch (c) { + case ARITH_ADD: + case ARITH_SUB: + if (!cf) { + /* + * ??? While INC is 2 bytes shorter than ADDL $1, they also induce + * partial flags update stalls on Pentium4 and are not recommended + * by current Intel optimization manuals. + */ + if (val == 1 || val == -1) { + int is_inc = (c == ARITH_ADD) ^ (val < 0); + if (TCG_TARGET_REG_BITS == 64) { + /* + * The single-byte increment encodings are re-tasked + * as the REX prefixes. Use the MODRM encoding. + */ + tcg_out_modrm(s, OPC_GRP5 + rexw, + (is_inc ? EXT5_INC_Ev : EXT5_DEC_Ev), r0); + } else { + tcg_out8(s, (is_inc ? OPC_INC_r32 : OPC_DEC_r32) + r0); + } + return; + } + if (val == 128) { + /* + * Facilitate using an 8-bit immediate. Carry is inverted + * by this transformation, so do it only if cf == 0. + */ + c ^= ARITH_ADD ^ ARITH_SUB; + val = -128; + } } - return; - } + break; - if (c == ARITH_AND) { + case ARITH_AND: if (TCG_TARGET_REG_BITS == 64) { if (val == 0xffffffffu) { tcg_out_ext32u(s, r0, r0); @@ -1351,6 +1370,17 @@ static void tgen_arithi(TCGContext *s, int c, int r0, tcg_out_ext16u(s, r0, r0); return; } + break; + + case ARITH_OR: + case ARITH_XOR: + if (val >= 0x80 && val <= 0xff + && (r0 < 4 || TCG_TARGET_REG_BITS == 64)) { + tcg_out_modrm(s, OPC_ARITH_EbIb + P_REXB_RM, c, r0); + tcg_out8(s, val); + return; + } + break; } if (val == (int8_t)val) { diff --git a/tcg/ppc/tcg-target-con-set.h b/tcg/ppc/tcg-target-con-set.h index bbd7b21247..cb47b29452 100644 --- a/tcg/ppc/tcg-target-con-set.h +++ b/tcg/ppc/tcg-target-con-set.h @@ -35,7 +35,7 @@ C_O1_I3(v, v, v, v) C_O1_I4(r, r, ri, rZ, rZ) C_O1_I4(r, r, r, ri, ri) C_O2_I1(r, r, r) -C_O2_I1(o, m, r) +C_N1O1_I1(o, m, r) C_O2_I2(r, r, r, r) C_O2_I4(r, r, rI, rZM, r, r) C_O2_I4(r, r, r, r, rI, rZM) diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc index 856c3b18f5..54816967bc 100644 --- a/tcg/ppc/tcg-target.c.inc +++ b/tcg/ppc/tcg-target.c.inc @@ -2595,6 +2595,7 @@ static void tcg_out_qemu_ldst_i128(TCGContext *s, TCGReg datalo, TCGReg datahi, tcg_debug_assert(!need_bswap); tcg_debug_assert(datalo & 1); tcg_debug_assert(datahi == datalo - 1); + tcg_debug_assert(!is_ld || datahi != index); insn = is_ld ? LQ : STQ; tcg_out32(s, insn | TAI(datahi, index, 0)); } else { @@ -4071,7 +4072,7 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) case INDEX_op_qemu_ld_a32_i128: case INDEX_op_qemu_ld_a64_i128: - return C_O2_I1(o, m, r); + return C_N1O1_I1(o, m, r); case INDEX_op_qemu_st_a32_i128: case INDEX_op_qemu_st_a64_i128: return C_O0_I3(o, m, r); diff --git a/tcg/tcg.c b/tcg/tcg.c index 896a36caeb..e2c38f6d11 100644 --- a/tcg/tcg.c +++ b/tcg/tcg.c @@ -653,6 +653,7 @@ static void tcg_out_movext3(TCGContext *s, const TCGMovExtend *i1, #define C_O1_I4(O1, I1, I2, I3, I4) C_PFX5(c_o1_i4_, O1, I1, I2, I3, I4), #define C_N1_I2(O1, I1, I2) C_PFX3(c_n1_i2_, O1, I1, I2), +#define C_N1O1_I1(O1, O2, I1) C_PFX3(c_n1o1_i1_, O1, O2, I1), #define C_N2_I1(O1, O2, I1) C_PFX3(c_n2_i1_, O1, O2, I1), #define C_O2_I1(O1, O2, I1) C_PFX3(c_o2_i1_, O1, O2, I1), @@ -676,6 +677,7 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode); #undef C_O1_I3 #undef C_O1_I4 #undef C_N1_I2 +#undef C_N1O1_I1 #undef C_N2_I1 #undef C_O2_I1 #undef C_O2_I2 @@ -696,6 +698,7 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode); #define C_O1_I4(O1, I1, I2, I3, I4) { .args_ct_str = { #O1, #I1, #I2, #I3, #I4 } }, #define C_N1_I2(O1, I1, I2) { .args_ct_str = { "&" #O1, #I1, #I2 } }, +#define C_N1O1_I1(O1, O2, I1) { .args_ct_str = { "&" #O1, #O2, #I1 } }, #define C_N2_I1(O1, O2, I1) { .args_ct_str = { "&" #O1, "&" #O2, #I1 } }, #define C_O2_I1(O1, O2, I1) { .args_ct_str = { #O1, #O2, #I1 } }, @@ -718,6 +721,7 @@ static const TCGTargetOpDef constraint_sets[] = { #undef C_O1_I3 #undef C_O1_I4 #undef C_N1_I2 +#undef C_N1O1_I1 #undef C_N2_I1 #undef C_O2_I1 #undef C_O2_I2 @@ -738,6 +742,7 @@ static const TCGTargetOpDef constraint_sets[] = { #define C_O1_I4(O1, I1, I2, I3, I4) C_PFX5(c_o1_i4_, O1, I1, I2, I3, I4) #define C_N1_I2(O1, I1, I2) C_PFX3(c_n1_i2_, O1, I1, I2) +#define C_N1O1_I1(O1, O2, I1) C_PFX3(c_n1o1_i1_, O1, O2, I1) #define C_N2_I1(O1, O2, I1) C_PFX3(c_n2_i1_, O1, O2, I1) #define C_O2_I1(O1, O2, I1) C_PFX3(c_o2_i1_, O1, O2, I1) @@ -2988,6 +2993,7 @@ static void process_op_defs(TCGContext *s) .pair = 2, .pair_index = o, .regs = def->args_ct[o].regs << 1, + .newreg = def->args_ct[o].newreg, }; def->args_ct[o].pair = 1; def->args_ct[o].pair_index = i; @@ -3004,6 +3010,7 @@ static void process_op_defs(TCGContext *s) .pair = 1, .pair_index = o, .regs = def->args_ct[o].regs >> 1, + .newreg = def->args_ct[o].newreg, }; def->args_ct[o].pair = 2; def->args_ct[o].pair_index = i; @@ -5036,17 +5043,21 @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op) break; case 1: /* first of pair */ - tcg_debug_assert(!arg_ct->newreg); if (arg_ct->oalias) { reg = new_args[arg_ct->alias_index]; - break; + } else if (arg_ct->newreg) { + reg = tcg_reg_alloc_pair(s, arg_ct->regs, + i_allocated_regs | o_allocated_regs, + output_pref(op, k), + ts->indirect_base); + } else { + reg = tcg_reg_alloc_pair(s, arg_ct->regs, o_allocated_regs, + output_pref(op, k), + ts->indirect_base); } - reg = tcg_reg_alloc_pair(s, arg_ct->regs, o_allocated_regs, - output_pref(op, k), ts->indirect_base); break; case 2: /* second of pair */ - tcg_debug_assert(!arg_ct->newreg); if (arg_ct->oalias) { reg = new_args[arg_ct->alias_index]; } else { diff --git a/util/cpuinfo-ppc.c b/util/cpuinfo-ppc.c index 1ea3db0ac8..b2d8893a06 100644 --- a/util/cpuinfo-ppc.c +++ b/util/cpuinfo-ppc.c @@ -6,10 +6,10 @@ #include "qemu/osdep.h" #include "host/cpuinfo.h" +#include #ifdef CONFIG_GETAUXVAL # include #else -# include # include "elf.h" #endif @@ -40,7 +40,7 @@ unsigned __attribute__((constructor)) cpuinfo_init(void) info |= CPUINFO_V2_06; } - if (hwcap2 & PPC_FEATURE2_HAS_ISEL) { + if (hwcap2 & PPC_FEATURE2_ISEL) { info |= CPUINFO_ISEL; } if (hwcap & PPC_FEATURE_HAS_ALTIVEC) { @@ -53,7 +53,7 @@ unsigned __attribute__((constructor)) cpuinfo_init(void) * always have both anyway, since VSX came with Power7 * and crypto came with Power8. */ - if (hwcap2 & PPC_FEATURE2_HAS_VEC_CRYPTO) { + if (hwcap2 & PPC_FEATURE2_VEC_CRYPTO) { info |= CPUINFO_CRYPTO; } }